From b3b9f82085c9538a58786e8501222a1d2cdfe7d3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 2 Apr 2019 12:46:30 +0100 Subject: [PATCH 001/383] docs: move confusing install step to noteworthy difference --- docs/ansible_detailed.rst | 17 +++++++++-------- docs/conf.py | 4 ++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index 5b541a14..aeaa5e7c 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -75,13 +75,7 @@ Installation ``mitogen_host_pinned`` strategies exists to mimic the ``free`` and ``host_pinned`` strategies. -4. If targets have a restrictive ``sudoers`` file, add a rule like: - - :: - - deploy = (ALL) NOPASSWD:/usr/bin/python -c* - -5. +4. .. raw:: html @@ -103,7 +97,6 @@ Installation - Demo ~~~~ @@ -172,6 +165,14 @@ Noteworthy Differences * The ``doas``, ``su`` and ``sudo`` become methods are available. File bugs to register interest in more. +* The ``sudo`` comands executed differ slightly compared to Ansible. In some + cases where the target has a ``sudo`` configuration that restricts the exact + commands allowed to run, it may be necessary to add a ``sudoers`` rule like: + + :: + + your_ssh_username = (ALL) NOPASSWD:/usr/bin/python -c* + * The `docker `_, `jail `_, `kubectl `_, diff --git a/docs/conf.py b/docs/conf.py index 7f03e451..80973c36 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -5,8 +5,8 @@ sys.path.append('..') import mitogen VERSION = '%s.%s.%s' % mitogen.__version__ -author = u'David Wilson' -copyright = u'2019, David Wilson' +author = u'Network Genomics' +copyright = u'2019, Network Genomics' exclude_patterns = ['_build'] extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.programoutput'] html_show_sourcelink = False From cae96a022b3621b5c0a60da75a7df3a50749fe03 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 4 Apr 2019 13:18:39 +0100 Subject: [PATCH 002/383] Update README link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5ef2447f..da93a80b 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ # Mitogen -Please see the documentation. +Please see the documentation. ![](https://i.imgur.com/eBM6LhJ.gif) From 7bb2832302c2d2ba4ca785cee1b999577982eb43 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 10 Apr 2019 03:11:56 +0100 Subject: [PATCH 003/383] docs: Google site verification --- docs/_templates/layout.html | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index 297ab9ef..d5157b62 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -1,6 +1,11 @@ {% extends "!layout.html" %} {% set css_files = css_files + ['_static/style.css'] %} +{% block extrahead %} + + {{ super() }} +{% endblock %} + {% block footer %} {{ super() }} From eb6d83e1a09e2ee79820d55947baab298d477c4f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 20 Apr 2019 19:06:22 +0100 Subject: [PATCH 004/383] docs: update download link. --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 80973c36..11ef822a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -39,6 +39,6 @@ rst_epilog = """ .. |mitogen_version| replace:: %(VERSION)s -.. |mitogen_url| replace:: `mitogen-%(VERSION)s.tar.gz `__ +.. |mitogen_url| replace:: `mitogen-%(VERSION)s.tar.gz `__ """ % locals() From d002cbbff0156e980ecad43223bb21d0d499a3d8 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 20 Apr 2019 19:08:30 +0100 Subject: [PATCH 005/383] docs: update stats --- docs/_templates/layout.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index d5157b62..c5770eda 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -12,13 +12,13 @@ - + {% endblock %} From 72d8973bc9d6e529ab8688e2d816468e61bf581a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Apr 2019 01:45:43 +0100 Subject: [PATCH 006/383] docs: Get rid of a ton of blocking resources from theme. --- docs/_templates/layout.html | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html index c5770eda..f5fe42b0 100644 --- a/docs/_templates/layout.html +++ b/docs/_templates/layout.html @@ -1,9 +1,14 @@ {% extends "!layout.html" %} {% set css_files = css_files + ['_static/style.css'] %} -{% block extrahead %} +{# We don't support Sphinx search, so don't let its JS either. #} +{% block scripts %} +{% endblock %} + +{# Alabaster ships a completely useless custom.css, suppress it. #} +{%- block extrahead %} - {{ super() }} + {% endblock %} {% block footer %} @@ -18,7 +23,13 @@ g.defer=true; g.async=true; g.src=u+'js'; s.parentNode.insertBefore(g,s); })(); - + + {% endblock %} From c616359a1767718fce78e659012f9b1695171f8c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 11 May 2019 17:01:19 +0100 Subject: [PATCH 007/383] Import include processing bench --- tests/ansible/bench/_includes.yml | 1 + tests/ansible/bench/includes.yml | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 tests/ansible/bench/_includes.yml create mode 100644 tests/ansible/bench/includes.yml diff --git a/tests/ansible/bench/_includes.yml b/tests/ansible/bench/_includes.yml new file mode 100644 index 00000000..6501db21 --- /dev/null +++ b/tests/ansible/bench/_includes.yml @@ -0,0 +1 @@ +- meta: end_play diff --git a/tests/ansible/bench/includes.yml b/tests/ansible/bench/includes.yml new file mode 100644 index 00000000..4f50113a --- /dev/null +++ b/tests/ansible/bench/includes.yml @@ -0,0 +1,4 @@ +- hosts: test-targets + tasks: + - include_tasks: _includes.yml + with_sequence: start=1 end=1000 From f791e34d551d10f0958178de84dbe34bc51f652a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 11 May 2019 17:02:52 +0100 Subject: [PATCH 008/383] Add extra/ to .gitignore. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index e244ca12..cf9c084d 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ docs/_build/ htmlcov/ *.egg-info __pycache__/ +extra From 5cb1cb26129ff670066e01ba568eaed51ab49164 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 11 May 2019 17:04:42 +0100 Subject: [PATCH 009/383] Import debug helper macros. --- scripts/debug-helpers.sh | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 scripts/debug-helpers.sh diff --git a/scripts/debug-helpers.sh b/scripts/debug-helpers.sh new file mode 100644 index 00000000..7011c18c --- /dev/null +++ b/scripts/debug-helpers.sh @@ -0,0 +1,39 @@ +# +# Bash helpers for debugging. +# + +# Tell Ansible to write PID files for the mux and top-level process to CWD. +export MITOGEN_SAVE_PIDS=1 + + +# strace -ff -p $(muxpid) +muxpid() { + cat .ansible-mux.pid +} + +# gdb -p $(anspid) +anspid() { + cat .ansible-controller.pid +} + +# perf top -git $(muxtids) +# perf top -git $(muxtids) +muxtids() { + ls /proc/$(muxpid)/task | tr \\n , +} + +# perf top -git $(anstids) +anstids() { + ls /proc/$(anspid)/task | tr \\n , +} + +# ttrace $(muxpid) [.. options ..] +# strace only threads of PID, not children +ttrace() { + local pid=$1; shift; + local s="" + for i in $(ls /proc/$pid/task) ; do + s="-p $i $s" + done + strace $s "$@" +} From 4a614c3950cb52dd316041ee89d8efc184e50f29 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 010/383] issue #587: bump max Ansible version --- ansible_mitogen/strategy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index b9211fcc..3c7ea687 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -42,7 +42,7 @@ import ansible.executor.process.worker ANSIBLE_VERSION_MIN = '2.3' -ANSIBLE_VERSION_MAX = '2.7' +ANSIBLE_VERSION_MAX = '2.8' NEW_VERSION_MSG = ( "Your Ansible version (%s) is too recent. The most recent version\n" "supported by Mitogen for Ansible is %s.x. Please check the Mitogen\n" From 46dde959620524b76ec2723cafe3fa54ee170c75 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 011/383] issue #587: 2.8 PlayContext.connection no longer contains connection name Not clear what the intention is here. Either need to ferret it out of some other location, or just stop preloading the connection class in the top-level process. --- ansible_mitogen/strategy.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index 3c7ea687..5521e127 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -40,6 +40,12 @@ import ansible_mitogen.process import ansible import ansible.executor.process.worker +try: + # 2.8+ has a standardized "unset" object. + from ansible.utils.sentinel import Sentinel +except ImportError: + Sentinel = None + ANSIBLE_VERSION_MIN = '2.3' ANSIBLE_VERSION_MAX = '2.8' @@ -261,14 +267,17 @@ class StrategyMixin(object): name=task.action, mod_type='', ) - ansible_mitogen.loaders.connection_loader.get( - name=play_context.connection, - class_only=True, - ) ansible_mitogen.loaders.action_loader.get( name=task.action, class_only=True, ) + if play_context.connection is not Sentinel: + # 2.8 appears to defer computing this value until it's inside the + # worker. TODO: figure out where this value has moved. + ansible_mitogen.loaders.connection_loader.get( + name=play_context.connection, + class_only=True, + ) return super(StrategyMixin, self)._queue_task( host=host, From e11b251c75b89e643cab764fd271007e739cf6ce Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 012/383] issue #587: 2.8 PluginLoader.get() introduced new collection_list kwarg --- ansible_mitogen/strategy.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index 5521e127..1060e3b5 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -121,7 +121,11 @@ def wrap_action_loader__get(name, *args, **kwargs): This is used instead of static subclassing as it generalizes to third party action modules outside the Ansible tree. """ - klass = action_loader__get(name, class_only=True) + get_kwargs = {'class_only': True} + if ansible.__version__ >= '2.8': + get_kwargs['collection_list'] = kwargs.pop('collection_list', None) + + klass = action_loader__get(name, **get_kwargs) if klass: bases = (ansible_mitogen.mixins.ActionModuleMixin, klass) adorned_klass = type(str(name), bases, {}) From c1c8d5c31e652dcc02c54e5109fb71ea48b676eb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 013/383] issue #587: 2.8 PlayContext lacks sudo_flags attribute. This is a huge bodge. --- ansible_mitogen/strategy.py | 4 ++-- ansible_mitogen/transport_config.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index 1060e3b5..b6b9aaf2 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -276,8 +276,8 @@ class StrategyMixin(object): class_only=True, ) if play_context.connection is not Sentinel: - # 2.8 appears to defer computing this value until it's inside the - # worker. TODO: figure out where this value has moved. + # 2.8 appears to defer computing this until inside the worker. + # TODO: figure out where it has moved. ansible_mitogen.loaders.connection_loader.get( name=play_context.connection, class_only=True, diff --git a/ansible_mitogen/transport_config.py b/ansible_mitogen/transport_config.py index ad1cab3e..d5f15b10 100644 --- a/ansible_mitogen/transport_config.py +++ b/ansible_mitogen/transport_config.py @@ -380,8 +380,9 @@ class PlayContextSpec(Spec): for term in ansible.utils.shlex.shlex_split( first_true(( self._play_context.become_flags, - self._play_context.sudo_flags, - # Ansible 2.3. + # Ansible <=2.7. + getattr(self._play_context, 'sudo_flags', ''), + # Ansible <=2.3. getattr(C, 'DEFAULT_BECOME_FLAGS', ''), getattr(C, 'DEFAULT_SUDO_FLAGS', '') ), default='') From b8e1b4df5153d00eacdd37e77d9a680a76c95ecc Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 014/383] issue #587: 2.8 whitespace handling was improved. https://github.com/ansible/ansible/commit/b72e989e1837ccad8dcdc926c43ccbc4d8cdfe44 --- tests/ansible/integration/async/runner_one_job.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/ansible/integration/async/runner_one_job.yml b/tests/ansible/integration/async/runner_one_job.yml index ca798a7f..3b14fa31 100644 --- a/tests/ansible/integration/async/runner_one_job.yml +++ b/tests/ansible/integration/async/runner_one_job.yml @@ -37,7 +37,12 @@ - result1.ansible_job_id == job1.ansible_job_id - result1.attempts <= 100000 - result1.changed == True - - result1.cmd == "sleep 1;\n echo alldone" + - | + # ansible/b72e989e1837ccad8dcdc926c43ccbc4d8cdfe44 + (ansible_version.full >= '2.8' and + result1.cmd == "sleep 1;\necho alldone\n") or + (ansible_version.full < '2.8' and + result1.cmd == "sleep 1;\n echo alldone") - result1.delta|length == 14 - result1.start|length == 26 - result1.finished == 1 From f35194fe0f3e85764ac20c3cc6e7b08bf276e3f2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 015/383] issue #587: mitogen_doas should not become_exe for doas_path Looks like this has always been wrong - when used as a connection method, PlayContext.become_method/become_exe may hold totally unrelated data. --- ansible_mitogen/connection.py | 2 +- ansible_mitogen/transport_config.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index b5f28d34..411b99f1 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -356,7 +356,7 @@ def _connect_mitogen_doas(spec): 'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), - 'doas_path': spec.become_exe(), + 'doas_path': spec.ansible_doas_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } diff --git a/ansible_mitogen/transport_config.py b/ansible_mitogen/transport_config.py index d5f15b10..0bdad4f3 100644 --- a/ansible_mitogen/transport_config.py +++ b/ansible_mitogen/transport_config.py @@ -294,6 +294,12 @@ class Spec(with_metaclass(abc.ABCMeta, object)): Connection-specific arguments. """ + @abc.abstractmethod + def ansible_doas_exe(self): + """ + Value of "ansible_doas_exe" variable. + """ + class PlayContextSpec(Spec): """ @@ -425,6 +431,12 @@ class PlayContextSpec(Spec): def extra_args(self): return self._connection.get_extra_args() + def ansible_doas_exe(self): + return ( + self._connection.get_task_var('ansible_doas_exe') or + os.environ.get('ANSIBLE_DOAS_EXE') + ) + class MitogenViaSpec(Spec): """ @@ -635,3 +647,9 @@ class MitogenViaSpec(Spec): def extra_args(self): return [] # TODO + + def ansible_doas_exe(self): + return ( + self._host_vars.get('ansible_doas_exe') or + os.environ.get('ANSIBLE_DOAS_EXE') + ) From ce80e326feda102d1ee0bf8cce41dddba8566c50 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 016/383] issue #587: disable deprecation_warnings for CI. Our use case justifies it --- tests/ansible/ansible.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/ansible/ansible.cfg b/tests/ansible/ansible.cfg index bec749f7..0135736b 100644 --- a/tests/ansible/ansible.cfg +++ b/tests/ansible/ansible.cfg @@ -13,6 +13,9 @@ retry_files_enabled = False display_args_to_stdout = True forks = 100 +# We use lots of deprecated functionality to support older versions. +deprecation_warnings = False + # issue #434; hosts/delegate_to; integration/delegate_to remote_user = ansible-cfg-remote-user From 504f1961ea782719b5761077222068f5823d5bd2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 017/383] issue #587: update mitogen_doas doc to match varible change. --- docs/ansible_detailed.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index aeaa5e7c..f709f8f3 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -731,7 +731,7 @@ as a become method. When used as a become method: * ``ansible_python_interpreter`` -* ``ansible_become_exe``: path to ``doas`` binary. +* ``ansible_become_exe`` / ``ansible_doas_exe``: path to ``doas`` binary. * ``ansible_become_user`` (default: ``root``) * ``ansible_become_pass`` (default: assume passwordless) * ``mitogen_mask_remote_name``: if :data:`True`, mask the identity of the @@ -746,6 +746,7 @@ When used as the ``mitogen_doas`` connection method: * The inventory hostname has no special meaning. * ``ansible_user``: username to use. * ``ansible_password``: password to use. +* ``ansible_doas_exe``: path to ``doas`` binary. * ``ansible_python_interpreter`` From 54b5fdf761dea87d5d107f4293ab4caedfda22a3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 018/383] issue #587: fix syntax error due to presence of comment --- tests/ansible/integration/async/runner_one_job.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ansible/integration/async/runner_one_job.yml b/tests/ansible/integration/async/runner_one_job.yml index 3b14fa31..a576d4ce 100644 --- a/tests/ansible/integration/async/runner_one_job.yml +++ b/tests/ansible/integration/async/runner_one_job.yml @@ -37,8 +37,8 @@ - result1.ansible_job_id == job1.ansible_job_id - result1.attempts <= 100000 - result1.changed == True + # ansible/b72e989e1837ccad8dcdc926c43ccbc4d8cdfe44 - | - # ansible/b72e989e1837ccad8dcdc926c43ccbc4d8cdfe44 (ansible_version.full >= '2.8' and result1.cmd == "sleep 1;\necho alldone\n") or (ansible_version.full < '2.8' and From a3be746865d2a102913451b117745fe8efdd19ad Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 019/383] issue #587: update MODULE FAILURE message format for post >2.7 --- .../integration/runner/crashy_new_style_module.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/ansible/integration/runner/crashy_new_style_module.yml b/tests/ansible/integration/runner/crashy_new_style_module.yml index 40ee7f88..a29493be 100644 --- a/tests/ansible/integration/runner/crashy_new_style_module.yml +++ b/tests/ansible/integration/runner/crashy_new_style_module.yml @@ -12,7 +12,14 @@ that: - not out.changed - out.rc == 1 - - out.msg == "MODULE FAILURE" + # ansible/62d8c8fde6a76d9c567ded381e9b34dad69afcd6 + - | + (ansible_version.full < '2.7' and out.msg == "MODULE FAILURE") or + (ansible_version.full >= '2.7' and + out.msg == ( + "MODULE FAILURE\n" + + "See stdout/stderr for the exact error" + )) - out.module_stdout == "" - "'Traceback (most recent call last)' in out.module_stderr" - "\"NameError: name 'kaboom' is not defined\" in out.module_stderr" From a25fa566a14f4521b1a236b5f52c2eab87879961 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 020/383] issue #587: update stub_connections/ test to use new doas var --- tests/ansible/integration/stub_connections/mitogen_doas.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ansible/integration/stub_connections/mitogen_doas.yml b/tests/ansible/integration/stub_connections/mitogen_doas.yml index 40d4f4b0..3c1459e9 100644 --- a/tests/ansible/integration/stub_connections/mitogen_doas.yml +++ b/tests/ansible/integration/stub_connections/mitogen_doas.yml @@ -10,7 +10,7 @@ - custom_python_detect_environment: vars: ansible_connection: mitogen_doas - ansible_become_exe: stub-doas.py + ansible_doas_exe: stub-doas.py ansible_user: someuser register: out From 92b4724010d1edd0e073c51ffb8acad9ccd971ff Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 021/383] issue #587: consistent become_exe() behaviour for older Ansibles. --- ansible_mitogen/transport_config.py | 10 +++++++++- .../integration/connection_delegation/local_action.yml | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ansible_mitogen/transport_config.py b/ansible_mitogen/transport_config.py index 0bdad4f3..6fcfba04 100644 --- a/ansible_mitogen/transport_config.py +++ b/ansible_mitogen/transport_config.py @@ -378,7 +378,15 @@ class PlayContextSpec(Spec): ] def become_exe(self): - return self._play_context.become_exe + # In Ansible 2.8, PlayContext.become_exe always has a default value due + # to the new options mechanism. Previously it was only set if a value + # ("somewhere") had been specified for the task. + # For consistency in the tests, here we make older Ansibles behave like + # newer Ansibles. + exe = self._play_context.become_exe + if exe is None and self._play_context.become_method == 'sudo': + exe = 'sudo' + return exe def sudo_args(self): return [ diff --git a/tests/ansible/integration/connection_delegation/local_action.yml b/tests/ansible/integration/connection_delegation/local_action.yml index 91fb9739..05fc3db9 100644 --- a/tests/ansible/integration/connection_delegation/local_action.yml +++ b/tests/ansible/integration/connection_delegation/local_action.yml @@ -27,7 +27,7 @@ 'remote_name': null, 'password': null, 'username': 'root', - 'sudo_path': null, + 'sudo_path': 'sudo', 'sudo_args': ['-H', '-S', '-n'], }, 'method': 'sudo', From 8a18d1862e5950f34d4e5187fae10645faca4a48 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 022/383] issue #587: "state: absent" was removed in 2.8.0 ansible/cc9c72d6f845710b24e952670b534a57f6948513 --- .../regression/issue_332_ansiblemoduleerror_first_occurrence.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/ansible/regression/issue_332_ansiblemoduleerror_first_occurrence.yml b/tests/ansible/regression/issue_332_ansiblemoduleerror_first_occurrence.yml index 0162c210..6f32af19 100644 --- a/tests/ansible/regression/issue_332_ansiblemoduleerror_first_occurrence.yml +++ b/tests/ansible/regression/issue_332_ansiblemoduleerror_first_occurrence.yml @@ -10,5 +10,4 @@ - assert: that: - - out.state == 'absent' - out.msg == 'file (/usr/bin/does-not-exist) is absent, cannot continue' From 838742f000fd6d22912626d89ed740bd0843b231 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 023/383] issue #587: support pausing ansible_tests if flag file exists To support debugging on Azure --- .ci/ansible_tests.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.ci/ansible_tests.py b/.ci/ansible_tests.py index 98e45ab8..8d2d8bba 100755 --- a/.ci/ansible_tests.py +++ b/.ci/ansible_tests.py @@ -3,6 +3,7 @@ import glob import os +import signal import sys import ci_lib @@ -13,6 +14,12 @@ TESTS_DIR = os.path.join(ci_lib.GIT_ROOT, 'tests/ansible') HOSTS_DIR = os.path.join(ci_lib.TMP, 'hosts') +def pause_if_interactive(): + if os.path.exists('/tmp/interactive'): + while True: + signal.pause() + + with ci_lib.Fold('unit_tests'): os.environ['SKIP_MITOGEN'] = '1' ci_lib.run('./run_tests -v') @@ -59,5 +66,11 @@ with ci_lib.Fold('job_setup'): with ci_lib.Fold('ansible'): playbook = os.environ.get('PLAYBOOK', 'all.yml') - run('./run_ansible_playbook.py %s -i "%s" %s', - playbook, HOSTS_DIR, ' '.join(sys.argv[1:])) + try: + run('./run_ansible_playbook.py %s -i "%s" %s', + playbook, HOSTS_DIR, ' '.join(sys.argv[1:])) + except: + pause_if_interactive() + raise + +pause_if_interactive() From fc9dabb45cd86eb408504313274649c04afa25a3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 024/383] issue #587: import spawn_reverse_shell.py script. To support debugging on Azure --- .ci/spawn_reverse_shell.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100755 .ci/spawn_reverse_shell.py diff --git a/.ci/spawn_reverse_shell.py b/.ci/spawn_reverse_shell.py new file mode 100755 index 00000000..8a6b9500 --- /dev/null +++ b/.ci/spawn_reverse_shell.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +""" +Allow poking around Azure while the job is running. +""" + +import os +import pty +import socket +import subprocess +import sys +import time + + +if os.fork(): + sys.exit(0) + + +def try_once(): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(("k3.botanicus.net", 9494)) + open('/tmp/interactive', 'w').close() + + os.dup2(s.fileno(), 0) + os.dup2(s.fileno(), 1) + os.dup2(s.fileno(), 2) + p = pty.spawn("/bin/sh") + + +while True: + try: + try_once() + except: + time.sleep(5) + continue + From be23331bbe58bd76d4b4c4f63588ecb777453ded Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 025/383] issue #587: ensure Azure worker has a working SSH configuration --- .ci/prep_azure.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.ci/prep_azure.py b/.ci/prep_azure.py index 5199a87e..5e445844 100755 --- a/.ci/prep_azure.py +++ b/.ci/prep_azure.py @@ -7,6 +7,28 @@ import ci_lib batches = [] +batches += [ + [ + "sudo chown `whoami`: ~", + "chmod u=rwx,g=rx,o= ~", + + "sudo mkdir /var/run/sshd", + "sudo /etc/init.d/ssh start", + + "mkdir -p ~/.ssh", + "chmod u=rwx,go= ~/.ssh", + + "ssh-keyscan -H localhost >> ~/.ssh/known_hosts", + "chmod u=rw,go= ~/.ssh/known_hosts", + + "cat tests/data/docker/mitogen__has_sudo_pubkey.key > ~/.ssh/id_rsa", + "chmod u=rw,go= ~/.ssh/id_rsa", + + "cat tests/data/docker/mitogen__has_sudo_pubkey.key.pub > ~/.ssh/authorized_keys", + "chmod u=rw,go=r ~/.ssh/authorized_keys", + ] +] + if ci_lib.have_apt(): batches.append([ 'echo force-unsafe-io | sudo tee /etc/dpkg/dpkg.cfg.d/nosync', From 429f5e7afb5a7a2ebccbcd4c292a0b9c03e59c96 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 026/383] issue #587: enable spawn_reverse_shell and tidy up Azure step names --- .ci/azure-pipelines-steps.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.ci/azure-pipelines-steps.yml b/.ci/azure-pipelines-steps.yml index a377d795..d2d48528 100644 --- a/.ci/azure-pipelines-steps.yml +++ b/.ci/azure-pipelines-steps.yml @@ -10,11 +10,14 @@ steps: versionSpec: '$(python.version)' architecture: 'x64' +- script: .ci/spawn_reverse_shell.py + displayName: "Spawn reverse shell" + - script: .ci/prep_azure.py - displayName: "Install requirements." + displayName: "Run prep_azure.py" - script: .ci/$(MODE)_install.py - displayName: "Install requirements." + displayName: "Run $(MODE)_install.py" - script: .ci/$(MODE)_tests.py - displayName: Run tests. + displayName: "Run $(MODE)_tests.py" From 61b651bdd250dddcd9dcda9b219668e85c3f5f06 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 027/383] issue #587: workaround for ansible/ansible#56629 --- tests/ansible/integration/action/synchronize.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/ansible/integration/action/synchronize.yml b/tests/ansible/integration/action/synchronize.yml index 25f86d6d..3e81ce6a 100644 --- a/tests/ansible/integration/action/synchronize.yml +++ b/tests/ansible/integration/action/synchronize.yml @@ -7,6 +7,10 @@ ansible_user: mitogen__has_sudo_pubkey ansible_become_pass: has_sudo_pubkey_password ansible_ssh_private_key_file: /tmp/synchronize-action-key + + # https://github.com/ansible/ansible/issues/56629 + ansible_ssh_pass: '' + ansible_password: '' tasks: # must copy git file to set proper file mode. - copy: From 4a6eba9653316649f33e06238b5f7d6fd6cc285b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:06:14 +0100 Subject: [PATCH 028/383] issue #587: use deadsnakes PPA Python because VSTS version is junk --- .ci/azure-pipelines-steps.yml | 22 +++++++++++++++------- .ci/prep_azure.py | 11 ++++++----- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/.ci/azure-pipelines-steps.yml b/.ci/azure-pipelines-steps.yml index d2d48528..e880eded 100644 --- a/.ci/azure-pipelines-steps.yml +++ b/.ci/azure-pipelines-steps.yml @@ -5,17 +5,25 @@ parameters: sign: false steps: -- task: UsePythonVersion@0 - inputs: - versionSpec: '$(python.version)' - architecture: 'x64' +- script: "PYTHONVERSION=$(python.version) .ci/prep_azure.py" + displayName: "Run prep_azure.py" + +# The VSTS-shipped Pythons available via UsePythonVErsion are pure garbage, +# broken symlinks, incorrect permissions and missing codecs. So we use the +# deadsnakes PPA to get sane Pythons, and setup a virtualenv to install our +# stuff into. The virtualenv can probably be removed again, but this was a +# hard-fought battle and for now I am tired of this crap. +- script: | + sudo ln -fs /usr/bin/python$(python.version) /usr/bin/python + /usr/bin/python -m pip install -U virtualenv setuptools wheel + /usr/bin/python -m virtualenv /tmp/venv -p /usr/bin/python$(python.version) + echo "##vso[task.prependpath]/tmp/venv/bin" + + displayName: activate venv - script: .ci/spawn_reverse_shell.py displayName: "Spawn reverse shell" -- script: .ci/prep_azure.py - displayName: "Run prep_azure.py" - - script: .ci/$(MODE)_install.py displayName: "Run $(MODE)_install.py" diff --git a/.ci/prep_azure.py b/.ci/prep_azure.py index 5e445844..296355aa 100755 --- a/.ci/prep_azure.py +++ b/.ci/prep_azure.py @@ -34,14 +34,15 @@ if ci_lib.have_apt(): 'echo force-unsafe-io | sudo tee /etc/dpkg/dpkg.cfg.d/nosync', 'sudo add-apt-repository ppa:deadsnakes/ppa', 'sudo apt-get update', - 'sudo apt-get -y install python2.6 python2.6-dev libsasl2-dev libldap2-dev', + 'sudo apt-get -y install ' + 'python{pv} ' + 'python{pv}-dev ' + 'libsasl2-dev ' + 'libldap2-dev ' + .format(pv=os.environ['PYTHONVERSION']) ]) -#batches.append([ - #'pip install -r dev_requirements.txt', -#]) - if ci_lib.have_docker(): batches.extend( ['docker pull %s' % (ci_lib.image_for_distro(distro),)] From ea718081d822651fc80171129d9e3d39c12017d2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:14:31 +0100 Subject: [PATCH 029/383] issue #587: Add 2.8.0/Py2.7 job to Azure --- .ci/azure-pipelines.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.ci/azure-pipelines.yml b/.ci/azure-pipelines.yml index dc5f7162..588a3b8f 100644 --- a/.ci/azure-pipelines.yml +++ b/.ci/azure-pipelines.yml @@ -87,3 +87,9 @@ jobs: #VER: 2.6.2 #DISTROS: debian #STRATEGY: linear + + Vanilla_280_27: + python.version: '2.7' + MODE: ansible + VER: 2.8.0 + DISTROS: debian From 45d88b1f3feafe90cdf0c743c87aa49aebd72720 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 26 May 2019 12:14:31 +0100 Subject: [PATCH 030/383] issue #587: attempt to fix Mac Azure job --- .ci/prep_azure.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/.ci/prep_azure.py b/.ci/prep_azure.py index 296355aa..a7d8bc67 100755 --- a/.ci/prep_azure.py +++ b/.ci/prep_azure.py @@ -7,27 +7,28 @@ import ci_lib batches = [] -batches += [ - [ - "sudo chown `whoami`: ~", - "chmod u=rwx,g=rx,o= ~", +if os.uname()[0] == 'Linux': + batches += [ + [ + "sudo chown `whoami`: ~", + "chmod u=rwx,g=rx,o= ~", - "sudo mkdir /var/run/sshd", - "sudo /etc/init.d/ssh start", + "sudo mkdir /var/run/sshd", + "sudo /etc/init.d/ssh start", - "mkdir -p ~/.ssh", - "chmod u=rwx,go= ~/.ssh", + "mkdir -p ~/.ssh", + "chmod u=rwx,go= ~/.ssh", - "ssh-keyscan -H localhost >> ~/.ssh/known_hosts", - "chmod u=rw,go= ~/.ssh/known_hosts", + "ssh-keyscan -H localhost >> ~/.ssh/known_hosts", + "chmod u=rw,go= ~/.ssh/known_hosts", - "cat tests/data/docker/mitogen__has_sudo_pubkey.key > ~/.ssh/id_rsa", - "chmod u=rw,go= ~/.ssh/id_rsa", + "cat tests/data/docker/mitogen__has_sudo_pubkey.key > ~/.ssh/id_rsa", + "chmod u=rw,go= ~/.ssh/id_rsa", - "cat tests/data/docker/mitogen__has_sudo_pubkey.key.pub > ~/.ssh/authorized_keys", - "chmod u=rw,go=r ~/.ssh/authorized_keys", + "cat tests/data/docker/mitogen__has_sudo_pubkey.key.pub > ~/.ssh/authorized_keys", + "chmod u=rw,go=r ~/.ssh/authorized_keys", + ] ] -] if ci_lib.have_apt(): batches.append([ From 8d766d3d329b10283c2579cad666293f521f1c43 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 27 May 2019 22:27:50 +0100 Subject: [PATCH 031/383] issue #587: disable SSH key setup, it breaks unit tests --- .ci/prep_azure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/prep_azure.py b/.ci/prep_azure.py index a7d8bc67..344564e8 100755 --- a/.ci/prep_azure.py +++ b/.ci/prep_azure.py @@ -7,7 +7,7 @@ import ci_lib batches = [] -if os.uname()[0] == 'Linux': +if 0 and os.uname()[0] == 'Linux': batches += [ [ "sudo chown `whoami`: ~", From 2c15c1d4a5309ebc717a0f40b8f6b338597fd2e1 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 27 May 2019 22:27:50 +0100 Subject: [PATCH 032/383] issue #587: docs: update Changelog. --- docs/changelog.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 0b20a852..8e1ddc1a 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -21,6 +21,24 @@ v0.2.8 (unreleased) To avail of fixes in an unreleased version, please download a ZIP file `directly from GitHub `_. +Enhancements +^^^^^^^^^^^^ + +* `#587 `_: partial support for + Ansible 2.8 is now available. This implementation does not yet support the + new `become plugins + `_ + functionality, which will be addressed in a future release. + + +Thanks! +~~~~~~~ + +Mitogen would not be possible without the support of users. A huge thanks for +bug reports, testing, features and fixes in this release contributed by +`Orion Poplawski `_, and +`Ulrich Schreiner `_. + v0.2.7 (2019-05-19) ------------------- From 167d0eff32bcfc5315f7ba0a0898bb7615ed7e03 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 27 May 2019 23:57:47 +0100 Subject: [PATCH 033/383] azure: try enabling Ansible 2.8/Py3.5 job --- .ci/azure-pipelines.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.ci/azure-pipelines.yml b/.ci/azure-pipelines.yml index 588a3b8f..ef344bba 100644 --- a/.ci/azure-pipelines.yml +++ b/.ci/azure-pipelines.yml @@ -88,8 +88,14 @@ jobs: #DISTROS: debian #STRATEGY: linear - Vanilla_280_27: + Ansible_280_27: python.version: '2.7' MODE: ansible VER: 2.8.0 DISTROS: debian + + Ansible_280_35: + python.version: '3.5' + MODE: ansible + VER: 2.8.0 + DISTROS: debian From 1a92995a245e1decdda2cc1e0b2775a6a321eeec Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 30 May 2019 06:00:54 +0100 Subject: [PATCH 034/383] issue #590: include nasty workaround for sys.modules junk --- ansible_mitogen/runner.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py index 30c36be7..a8dae8b1 100644 --- a/ansible_mitogen/runner.py +++ b/ansible_mitogen/runner.py @@ -760,7 +760,12 @@ class NewStyleRunner(ScriptRunner): for fullname, _, _ in self.module_map['custom']: mitogen.core.import_module(fullname) for fullname in self.module_map['builtin']: - mitogen.core.import_module(fullname) + try: + mitogen.core.import_module(fullname) + except ImportError: + # TODO: this is a huge hack to work around issue #590. + if fullname != 'ansible.module_utils.distro._distro': + raise def _setup_excepthook(self): """ From 8f940e2ccb7df4aafdd9ecf815fceca3aef40ec2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 13:08:44 +0100 Subject: [PATCH 035/383] issue #590: teach importer to handle self-replacing modules --- docs/changelog.rst | 6 ++++++ mitogen/core.py | 5 ++++- tests/data/simple_pkg/imports_replaces_self.py | 6 ++++++ tests/data/simple_pkg/replaces_self.py | 4 ++++ tests/importer_test.py | 9 +++++++++ 5 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 tests/data/simple_pkg/imports_replaces_self.py create mode 100644 tests/data/simple_pkg/replaces_self.py diff --git a/docs/changelog.rst b/docs/changelog.rst index 8e1ddc1a..4a3a5612 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -30,6 +30,12 @@ Enhancements `_ functionality, which will be addressed in a future release. +Fixes +^^^^^ + +* `#590 `_: the importer can handle + modules that replace themselves in :mod:`sys.modules` during import. + Thanks! ~~~~~~~ diff --git a/mitogen/core.py b/mitogen/core.py index ff77bba9..0d88d7f0 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1355,7 +1355,10 @@ class Importer(object): exec(code, vars(mod)) else: exec('exec code in vars(mod)') - return mod + + # #590: if a module replaces itself in sys.modules during import, below + # is necessary. This matches PyImport_ExecCodeModuleEx() + return sys.modules.get(fullname, mod) def get_filename(self, fullname): if fullname in self._cache: diff --git a/tests/data/simple_pkg/imports_replaces_self.py b/tests/data/simple_pkg/imports_replaces_self.py new file mode 100644 index 00000000..b1b43813 --- /dev/null +++ b/tests/data/simple_pkg/imports_replaces_self.py @@ -0,0 +1,6 @@ +# issue #590: this module imports a module that replaces itself in sys.modules +# during initialization. +import simple_pkg.replaces_self + +def subtract_one(n): + return simple_pkg.replaces_self.subtract_one(n) diff --git a/tests/data/simple_pkg/replaces_self.py b/tests/data/simple_pkg/replaces_self.py new file mode 100644 index 00000000..5d853ebf --- /dev/null +++ b/tests/data/simple_pkg/replaces_self.py @@ -0,0 +1,4 @@ +# issue #590: this module replaces itself in sys.modules during initialization. +import sys +import simple_pkg.b +sys.modules[__name__] = simple_pkg.b diff --git a/tests/importer_test.py b/tests/importer_test.py index fc6f4bd6..c796f7d0 100644 --- a/tests/importer_test.py +++ b/tests/importer_test.py @@ -12,6 +12,7 @@ import mitogen.utils from mitogen.core import b import testlib +import simple_pkg.imports_replaces_self class ImporterMixin(testlib.RouterMixin): @@ -214,5 +215,13 @@ class Python24LineCacheTest(testlib.TestCase): pass +class SelfReplacingModuleTest(testlib.RouterMixin, testlib.TestCase): + # issue #590 + def test_importer_handles_self_replacement(self): + c = self.router.local() + self.assertEquals(0, + c.call(simple_pkg.imports_replaces_self.subtract_one, 1)) + + if __name__ == '__main__': unittest2.main() From 7a5c436a39b66e197c964656e30733f7d4ace3b2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 13:08:44 +0100 Subject: [PATCH 036/383] issue #590: Ansible test for module_utils.distro use. --- .../lib/modules/custom_python_uses_distro.py | 13 +++++++++++++ .../regression/issue_590__sys_modules_crap.yml | 9 +++++++++ 2 files changed, 22 insertions(+) create mode 100644 tests/ansible/lib/modules/custom_python_uses_distro.py create mode 100644 tests/ansible/regression/issue_590__sys_modules_crap.yml diff --git a/tests/ansible/lib/modules/custom_python_uses_distro.py b/tests/ansible/lib/modules/custom_python_uses_distro.py new file mode 100644 index 00000000..c5ffbb84 --- /dev/null +++ b/tests/ansible/lib/modules/custom_python_uses_distro.py @@ -0,0 +1,13 @@ +#!/usr/bin/python +# issue #590: I am an Ansible new-style Python module that tries to use +# ansible.module_utils.distro. + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils import distro + +def main(): + module = AnsibleModule(argument_spec={}) + module.exit_json(info=distro.info()) + +if __name__ == '__main__': + main() diff --git a/tests/ansible/regression/issue_590__sys_modules_crap.yml b/tests/ansible/regression/issue_590__sys_modules_crap.yml new file mode 100644 index 00000000..83a9a286 --- /dev/null +++ b/tests/ansible/regression/issue_590__sys_modules_crap.yml @@ -0,0 +1,9 @@ + +- hosts: test-targets + tasks: + - custom_python_uses_distro: + register: out + + - assert: + that: + - "'id' in out.info" From 875ff5c06023e56385d0e47bbfbf215329ea21a4 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 13:08:44 +0100 Subject: [PATCH 037/383] issue #590: refactor ModuleFinder and teach it a new special case. Now it's possible to find both packages and modules when the sys.modules[...] state for the package/module is junk. Previously only modules were possible. This also refactors things to make writing better tests for all these cases much simpler. --- mitogen/master.py | 235 ++++++++++++++++++++++++++++------------------ 1 file changed, 146 insertions(+), 89 deletions(-) diff --git a/mitogen/master.py b/mitogen/master.py index 1396f4e1..7188ba14 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -36,6 +36,7 @@ contexts. """ import dis +import errno import imp import inspect import itertools @@ -142,6 +143,41 @@ def get_child_modules(path): return [to_text(name) for _, name, _ in it] +def _looks_like_script(path): + """ + Return :data:`True` if the (possibly extensionless) file at `path` + resembles a Python script. For now we simply verify the file contains + ASCII text. + """ + try: + fp = open(path, 'rb') + except IOError: + e = sys.exc_info()[1] + if e.args[0] == errno.EISDIR: + return False + raise + + try: + sample = fp.read(512).decode('latin-1') + return not set(sample).difference(string.printable) + finally: + fp.close() + + +def _py_filename(path): + if not path: + return None + + if path[-4:] in ('.pyc', '.pyo'): + path = path.rstrip('co') + + if path.endswith('.py'): + return path + + if os.path.exists(path) and _looks_like_script(path): + return path + + def _get_core_source(): """ Master version of parent.get_core_source(). @@ -368,56 +404,22 @@ class LogForwarder(object): return 'LogForwarder(%r)' % (self._router,) -class ModuleFinder(object): - """ - Given the name of a loaded module, make a best-effort attempt at finding - related modules likely needed by a child context requesting the original - module. - """ - def __init__(self): - #: Import machinery is expensive, keep :py:meth`:get_module_source` - #: results around. - self._found_cache = {} - - #: Avoid repeated dependency scanning, which is expensive. - self._related_cache = {} - +class FinderMethod(object): def __repr__(self): - return 'ModuleFinder()' + return '%s()' % (type(self).__name__,) - def _looks_like_script(self, path): - """ - Return :data:`True` if the (possibly extensionless) file at `path` - resembles a Python script. For now we simply verify the file contains - ASCII text. - """ - fp = open(path, 'rb') - try: - sample = fp.read(512).decode('latin-1') - return not set(sample).difference(string.printable) - finally: - fp.close() + def find(self, fullname): + pass - def _py_filename(self, path): - if not path: - return None - if path[-4:] in ('.pyc', '.pyo'): - path = path.rstrip('co') - - if path.endswith('.py'): - return path - - if os.path.exists(path) and self._looks_like_script(path): - return path - - def _get_main_module_defective_python_3x(self, fullname): - """ - Recent versions of Python 3.x introduced an incomplete notion of - importer specs, and in doing so created permanent asymmetry in the - :mod:`pkgutil` interface handling for the `__main__` module. Therefore - we must handle `__main__` specially. - """ +class DefectivePython3xMainMethod(FinderMethod): + """ + Recent versions of Python 3.x introduced an incomplete notion of + importer specs, and in doing so created permanent asymmetry in the + :mod:`pkgutil` interface handling for the `__main__` module. Therefore + we must handle `__main__` specially. + """ + def find(self, fullname): if fullname != '__main__': return None @@ -426,7 +428,7 @@ class ModuleFinder(object): return None path = getattr(mod, '__file__', None) - if not (os.path.exists(path) and self._looks_like_script(path)): + if not (os.path.exists(path) and _looks_like_script(path)): return None fp = open(path, 'rb') @@ -437,11 +439,13 @@ class ModuleFinder(object): return path, source, False - def _get_module_via_pkgutil(self, fullname): - """ - Attempt to fetch source code via pkgutil. In an ideal world, this would - be the only required implementation of get_module(). - """ + +class PkgutilMethod(FinderMethod): + """ + Attempt to fetch source code via pkgutil. In an ideal world, this would + be the only required implementation of get_module(). + """ + def find(self, fullname): try: # Pre-'import spec' this returned None, in Python3.6 it raises # ImportError. @@ -458,7 +462,7 @@ class ModuleFinder(object): return try: - path = self._py_filename(loader.get_filename(fullname)) + path = _py_filename(loader.get_filename(fullname)) source = loader.get_source(fullname) is_pkg = loader.is_package(fullname) except (AttributeError, ImportError): @@ -484,19 +488,27 @@ class ModuleFinder(object): return path, source, is_pkg - def _get_module_via_sys_modules(self, fullname): - """ - Attempt to fetch source code via sys.modules. This is specifically to - support __main__, but it may catch a few more cases. - """ + +class SysModulesMethod(FinderMethod): + """ + Attempt to fetch source code via sys.modules. This is specifically to + support __main__, but it may catch a few more cases. + """ + def find(self, fullname): module = sys.modules.get(fullname) LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module) + if getattr(module, '__name__', None) != fullname: + LOG.debug('sys.modules[%r].__name__ does not match %r, assuming ' + 'this is a hacky module alias and ignoring it', + fullname, fullname) + return + if not isinstance(module, types.ModuleType): LOG.debug('sys.modules[%r] absent or not a regular module', fullname) return - path = self._py_filename(getattr(module, '__file__', '')) + path = _py_filename(getattr(module, '__file__', '')) if not path: return @@ -517,12 +529,19 @@ class ModuleFinder(object): return path, source, is_pkg - def _get_module_via_parent_enumeration(self, fullname): - """ - Attempt to fetch source code by examining the module's (hopefully less - insane) parent package. Required for older versions of - ansible.compat.six and plumbum.colors. - """ + +class ParentEnumerationMethod(FinderMethod): + """ + Attempt to fetch source code by examining the module's (hopefully less + insane) parent package. Required for older versions of + ansible.compat.six and plumbum.colors, and Ansible 2.8 + ansible.module_utils.distro. + + For cases like module_utils.distro, this must handle cases where a package + transmuted itself into a totally unrelated module during import and vice + versa. + """ + def find(self, fullname): if fullname not in sys.modules: # Don't attempt this unless a module really exists in sys.modules, # else we could return junk. @@ -531,30 +550,68 @@ class ModuleFinder(object): pkgname, _, modname = str_rpartition(to_text(fullname), u'.') pkg = sys.modules.get(pkgname) if pkg is None or not hasattr(pkg, '__file__'): + LOG.debug('%r: %r is not a package or lacks __file__ attribute', + self, pkgname) return - pkg_path = os.path.dirname(pkg.__file__) + pkg_path = [os.path.dirname(pkg.__file__)] try: - fp, path, ext = imp.find_module(modname, [pkg_path]) - try: - path = self._py_filename(path) - if not path: - fp.close() - return - - source = fp.read() - finally: - if fp: - fp.close() - - if isinstance(source, mitogen.core.UnicodeType): - # get_source() returns "string" according to PEP-302, which was - # reinterpreted for Python 3 to mean a Unicode string. - source = source.encode('utf-8') - return path, source, False + fp, path, (suffix, _, kind) = imp.find_module(modname, pkg_path) except ImportError: e = sys.exc_info()[1] - LOG.debug('imp.find_module(%r, %r) -> %s', modname, [pkg_path], e) + LOG.debug('%r: imp.find_module(%r, %r) -> %s', + self, modname, [pkg_path], e) + return None + + if kind == imp.PKG_DIRECTORY: + return self._found_package(fullname, path) + else: + return self._found_module(fullname, path, fp) + + def _found_package(self, fullname, path): + path = os.path.join(path, '__init__.py') + LOG.debug('%r: %r is PKG_DIRECTORY: %r', self, fullname, path) + return self._found_module( + fullname=fullname, + path=path, + fp=open(path, 'rb'), + is_pkg=True, + ) + + def _found_module(self, fullname, path, fp, is_pkg=False): + try: + path = _py_filename(path) + if not path: + return + + source = fp.read() + finally: + if fp: + fp.close() + + if isinstance(source, mitogen.core.UnicodeType): + # get_source() returns "string" according to PEP-302, which was + # reinterpreted for Python 3 to mean a Unicode string. + source = source.encode('utf-8') + return path, source, is_pkg + + +class ModuleFinder(object): + """ + Given the name of a loaded module, make a best-effort attempt at finding + related modules likely needed by a child context requesting the original + module. + """ + def __init__(self): + #: Import machinery is expensive, keep :py:meth`:get_module_source` + #: results around. + self._found_cache = {} + + #: Avoid repeated dependency scanning, which is expensive. + self._related_cache = {} + + def __repr__(self): + return 'ModuleFinder()' def add_source_override(self, fullname, path, source, is_pkg): """ @@ -576,10 +633,10 @@ class ModuleFinder(object): self._found_cache[fullname] = (path, source, is_pkg) get_module_methods = [ - _get_main_module_defective_python_3x, - _get_module_via_pkgutil, - _get_module_via_sys_modules, - _get_module_via_parent_enumeration, + DefectivePython3xMainMethod(), + PkgutilMethod(), + SysModulesMethod(), + ParentEnumerationMethod(), ] def get_module_source(self, fullname): @@ -595,7 +652,7 @@ class ModuleFinder(object): return tup for method in self.get_module_methods: - tup = method(self, fullname) + tup = method.find(fullname) if tup: #LOG.debug('%r returned %r', method, tup) break From c1db0d3858424ccaaab2e9dd0ee14fcbb4b91558 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 13:08:44 +0100 Subject: [PATCH 038/383] issue #590: move example modules to module_finder/, fix/add tests --- .../module_finder_testmod/__init__.py | 0 .../module_finder_testmod/empty_mod.py | 0 .../module_finder_testmod/regular_mod.py | 0 .../sibling_dep_mod_abs_import.py | 0 .../sibling_dep_mod_py2_import.py | 0 .../sibling_dep_mod_rel_import.py | 0 .../pkg_like_plumbum/__init__.py | 0 .../pkg_like_plumbum/colors.py | 0 .../simple_pkg/__init__.py | 0 .../data/{ => module_finder}/simple_pkg/a.py | 0 .../data/{ => module_finder}/simple_pkg/b.py | 0 .../simple_pkg/imports_replaces_self.py | 0 .../{ => module_finder}/simple_pkg/ping.py | 0 .../simple_pkg/replaces_self.py | 0 .../six_brokenpkg/__init__.py | 0 .../{ => module_finder}/six_brokenpkg/_six.py | 0 .../{ => module_finder}/webproject/manage.py | 0 .../webproject/serve_django_app.py | 0 .../webproject/webapp/__init__.py | 0 .../webproject/webapp/admin.py | 0 .../webproject/webapp/apps.py | 0 .../webproject/webapp/migrations/__init__.py | 0 .../webproject/webapp/models.py | 0 .../webproject/webapp/tests.py | 0 .../webproject/webapp/views.py | 0 .../webproject/webproject/__init__.py | 0 .../webproject/webproject/settings.py | 0 .../webproject/webproject/urls.py | 0 .../webproject/webproject/wsgi.py | 0 tests/module_finder_test.py | 54 ++++++++++++------- tests/testlib.py | 1 - 31 files changed, 36 insertions(+), 19 deletions(-) rename tests/data/{ => module_finder}/module_finder_testmod/__init__.py (100%) rename tests/data/{ => module_finder}/module_finder_testmod/empty_mod.py (100%) rename tests/data/{ => module_finder}/module_finder_testmod/regular_mod.py (100%) rename tests/data/{ => module_finder}/module_finder_testmod/sibling_dep_mod_abs_import.py (100%) rename tests/data/{ => module_finder}/module_finder_testmod/sibling_dep_mod_py2_import.py (100%) rename tests/data/{ => module_finder}/module_finder_testmod/sibling_dep_mod_rel_import.py (100%) rename tests/data/{ => module_finder}/pkg_like_plumbum/__init__.py (100%) rename tests/data/{ => module_finder}/pkg_like_plumbum/colors.py (100%) rename tests/data/{ => module_finder}/simple_pkg/__init__.py (100%) rename tests/data/{ => module_finder}/simple_pkg/a.py (100%) rename tests/data/{ => module_finder}/simple_pkg/b.py (100%) rename tests/data/{ => module_finder}/simple_pkg/imports_replaces_self.py (100%) rename tests/data/{ => module_finder}/simple_pkg/ping.py (100%) rename tests/data/{ => module_finder}/simple_pkg/replaces_self.py (100%) rename tests/data/{ => module_finder}/six_brokenpkg/__init__.py (100%) rename tests/data/{ => module_finder}/six_brokenpkg/_six.py (100%) rename tests/data/{ => module_finder}/webproject/manage.py (100%) rename tests/data/{ => module_finder}/webproject/serve_django_app.py (100%) rename tests/data/{ => module_finder}/webproject/webapp/__init__.py (100%) rename tests/data/{ => module_finder}/webproject/webapp/admin.py (100%) rename tests/data/{ => module_finder}/webproject/webapp/apps.py (100%) rename tests/data/{ => module_finder}/webproject/webapp/migrations/__init__.py (100%) rename tests/data/{ => module_finder}/webproject/webapp/models.py (100%) rename tests/data/{ => module_finder}/webproject/webapp/tests.py (100%) rename tests/data/{ => module_finder}/webproject/webapp/views.py (100%) rename tests/data/{ => module_finder}/webproject/webproject/__init__.py (100%) rename tests/data/{ => module_finder}/webproject/webproject/settings.py (100%) rename tests/data/{ => module_finder}/webproject/webproject/urls.py (100%) rename tests/data/{ => module_finder}/webproject/webproject/wsgi.py (100%) diff --git a/tests/data/module_finder_testmod/__init__.py b/tests/data/module_finder/module_finder_testmod/__init__.py similarity index 100% rename from tests/data/module_finder_testmod/__init__.py rename to tests/data/module_finder/module_finder_testmod/__init__.py diff --git a/tests/data/module_finder_testmod/empty_mod.py b/tests/data/module_finder/module_finder_testmod/empty_mod.py similarity index 100% rename from tests/data/module_finder_testmod/empty_mod.py rename to tests/data/module_finder/module_finder_testmod/empty_mod.py diff --git a/tests/data/module_finder_testmod/regular_mod.py b/tests/data/module_finder/module_finder_testmod/regular_mod.py similarity index 100% rename from tests/data/module_finder_testmod/regular_mod.py rename to tests/data/module_finder/module_finder_testmod/regular_mod.py diff --git a/tests/data/module_finder_testmod/sibling_dep_mod_abs_import.py b/tests/data/module_finder/module_finder_testmod/sibling_dep_mod_abs_import.py similarity index 100% rename from tests/data/module_finder_testmod/sibling_dep_mod_abs_import.py rename to tests/data/module_finder/module_finder_testmod/sibling_dep_mod_abs_import.py diff --git a/tests/data/module_finder_testmod/sibling_dep_mod_py2_import.py b/tests/data/module_finder/module_finder_testmod/sibling_dep_mod_py2_import.py similarity index 100% rename from tests/data/module_finder_testmod/sibling_dep_mod_py2_import.py rename to tests/data/module_finder/module_finder_testmod/sibling_dep_mod_py2_import.py diff --git a/tests/data/module_finder_testmod/sibling_dep_mod_rel_import.py b/tests/data/module_finder/module_finder_testmod/sibling_dep_mod_rel_import.py similarity index 100% rename from tests/data/module_finder_testmod/sibling_dep_mod_rel_import.py rename to tests/data/module_finder/module_finder_testmod/sibling_dep_mod_rel_import.py diff --git a/tests/data/pkg_like_plumbum/__init__.py b/tests/data/module_finder/pkg_like_plumbum/__init__.py similarity index 100% rename from tests/data/pkg_like_plumbum/__init__.py rename to tests/data/module_finder/pkg_like_plumbum/__init__.py diff --git a/tests/data/pkg_like_plumbum/colors.py b/tests/data/module_finder/pkg_like_plumbum/colors.py similarity index 100% rename from tests/data/pkg_like_plumbum/colors.py rename to tests/data/module_finder/pkg_like_plumbum/colors.py diff --git a/tests/data/simple_pkg/__init__.py b/tests/data/module_finder/simple_pkg/__init__.py similarity index 100% rename from tests/data/simple_pkg/__init__.py rename to tests/data/module_finder/simple_pkg/__init__.py diff --git a/tests/data/simple_pkg/a.py b/tests/data/module_finder/simple_pkg/a.py similarity index 100% rename from tests/data/simple_pkg/a.py rename to tests/data/module_finder/simple_pkg/a.py diff --git a/tests/data/simple_pkg/b.py b/tests/data/module_finder/simple_pkg/b.py similarity index 100% rename from tests/data/simple_pkg/b.py rename to tests/data/module_finder/simple_pkg/b.py diff --git a/tests/data/simple_pkg/imports_replaces_self.py b/tests/data/module_finder/simple_pkg/imports_replaces_self.py similarity index 100% rename from tests/data/simple_pkg/imports_replaces_self.py rename to tests/data/module_finder/simple_pkg/imports_replaces_self.py diff --git a/tests/data/simple_pkg/ping.py b/tests/data/module_finder/simple_pkg/ping.py similarity index 100% rename from tests/data/simple_pkg/ping.py rename to tests/data/module_finder/simple_pkg/ping.py diff --git a/tests/data/simple_pkg/replaces_self.py b/tests/data/module_finder/simple_pkg/replaces_self.py similarity index 100% rename from tests/data/simple_pkg/replaces_self.py rename to tests/data/module_finder/simple_pkg/replaces_self.py diff --git a/tests/data/six_brokenpkg/__init__.py b/tests/data/module_finder/six_brokenpkg/__init__.py similarity index 100% rename from tests/data/six_brokenpkg/__init__.py rename to tests/data/module_finder/six_brokenpkg/__init__.py diff --git a/tests/data/six_brokenpkg/_six.py b/tests/data/module_finder/six_brokenpkg/_six.py similarity index 100% rename from tests/data/six_brokenpkg/_six.py rename to tests/data/module_finder/six_brokenpkg/_six.py diff --git a/tests/data/webproject/manage.py b/tests/data/module_finder/webproject/manage.py similarity index 100% rename from tests/data/webproject/manage.py rename to tests/data/module_finder/webproject/manage.py diff --git a/tests/data/webproject/serve_django_app.py b/tests/data/module_finder/webproject/serve_django_app.py similarity index 100% rename from tests/data/webproject/serve_django_app.py rename to tests/data/module_finder/webproject/serve_django_app.py diff --git a/tests/data/webproject/webapp/__init__.py b/tests/data/module_finder/webproject/webapp/__init__.py similarity index 100% rename from tests/data/webproject/webapp/__init__.py rename to tests/data/module_finder/webproject/webapp/__init__.py diff --git a/tests/data/webproject/webapp/admin.py b/tests/data/module_finder/webproject/webapp/admin.py similarity index 100% rename from tests/data/webproject/webapp/admin.py rename to tests/data/module_finder/webproject/webapp/admin.py diff --git a/tests/data/webproject/webapp/apps.py b/tests/data/module_finder/webproject/webapp/apps.py similarity index 100% rename from tests/data/webproject/webapp/apps.py rename to tests/data/module_finder/webproject/webapp/apps.py diff --git a/tests/data/webproject/webapp/migrations/__init__.py b/tests/data/module_finder/webproject/webapp/migrations/__init__.py similarity index 100% rename from tests/data/webproject/webapp/migrations/__init__.py rename to tests/data/module_finder/webproject/webapp/migrations/__init__.py diff --git a/tests/data/webproject/webapp/models.py b/tests/data/module_finder/webproject/webapp/models.py similarity index 100% rename from tests/data/webproject/webapp/models.py rename to tests/data/module_finder/webproject/webapp/models.py diff --git a/tests/data/webproject/webapp/tests.py b/tests/data/module_finder/webproject/webapp/tests.py similarity index 100% rename from tests/data/webproject/webapp/tests.py rename to tests/data/module_finder/webproject/webapp/tests.py diff --git a/tests/data/webproject/webapp/views.py b/tests/data/module_finder/webproject/webapp/views.py similarity index 100% rename from tests/data/webproject/webapp/views.py rename to tests/data/module_finder/webproject/webapp/views.py diff --git a/tests/data/webproject/webproject/__init__.py b/tests/data/module_finder/webproject/webproject/__init__.py similarity index 100% rename from tests/data/webproject/webproject/__init__.py rename to tests/data/module_finder/webproject/webproject/__init__.py diff --git a/tests/data/webproject/webproject/settings.py b/tests/data/module_finder/webproject/webproject/settings.py similarity index 100% rename from tests/data/webproject/webproject/settings.py rename to tests/data/module_finder/webproject/webproject/settings.py diff --git a/tests/data/webproject/webproject/urls.py b/tests/data/module_finder/webproject/webproject/urls.py similarity index 100% rename from tests/data/webproject/webproject/urls.py rename to tests/data/module_finder/webproject/webproject/urls.py diff --git a/tests/data/webproject/webproject/wsgi.py b/tests/data/module_finder/webproject/webproject/wsgi.py similarity index 100% rename from tests/data/webproject/webproject/wsgi.py rename to tests/data/module_finder/webproject/webproject/wsgi.py diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py index 409adc6d..93755aa0 100644 --- a/tests/module_finder_test.py +++ b/tests/module_finder_test.py @@ -8,6 +8,9 @@ import mitogen.master import testlib +MODS_PATH = testlib.data_path('module_finder') +sys.path.append(MODS_PATH) + class ConstructorTest(testlib.TestCase): klass = mitogen.master.ModuleFinder @@ -51,10 +54,10 @@ class IsStdlibNameTest(testlib.TestCase): class GetMainModuleDefectivePython3x(testlib.TestCase): - klass = mitogen.master.ModuleFinder + klass = mitogen.master.DefectivePython3xMainMethod def call(self, fullname): - return self.klass()._get_main_module_defective_python_3x(fullname) + return self.klass().find(fullname) def test_builtin(self): self.assertEquals(None, self.call('sys')) @@ -77,23 +80,23 @@ class GetMainModuleDefectivePython3x(testlib.TestCase): self.assertFalse(is_pkg) -class GetModuleViaPkgutilTest(testlib.TestCase): - klass = mitogen.master.ModuleFinder +class PkgutilMethodTest(testlib.TestCase): + klass = mitogen.master.PkgutilMethod def call(self, fullname): - return self.klass()._get_module_via_pkgutil(fullname) + return self.klass().find(fullname) def test_empty_source_pkg(self): path, src, is_pkg = self.call('module_finder_testmod') self.assertEquals(path, - testlib.data_path('module_finder_testmod/__init__.py')) + os.path.join(MODS_PATH, 'module_finder_testmod/__init__.py')) self.assertEquals(mitogen.core.b(''), src) self.assertTrue(is_pkg) def test_empty_source_module(self): path, src, is_pkg = self.call('module_finder_testmod.empty_mod') self.assertEquals(path, - testlib.data_path('module_finder_testmod/empty_mod.py')) + os.path.join(MODS_PATH, 'module_finder_testmod/empty_mod.py')) self.assertEquals(mitogen.core.b(''), src) self.assertFalse(is_pkg) @@ -101,17 +104,17 @@ class GetModuleViaPkgutilTest(testlib.TestCase): from module_finder_testmod import regular_mod path, src, is_pkg = self.call('module_finder_testmod.regular_mod') self.assertEquals(path, - testlib.data_path('module_finder_testmod/regular_mod.py')) + os.path.join(MODS_PATH, 'module_finder_testmod/regular_mod.py')) self.assertEquals(mitogen.core.to_text(src), inspect.getsource(regular_mod)) self.assertFalse(is_pkg) -class GetModuleViaSysModulesTest(testlib.TestCase): - klass = mitogen.master.ModuleFinder +class SysModulesMethodTest(testlib.TestCase): + klass = mitogen.master.SysModulesMethod def call(self, fullname): - return self.klass()._get_module_via_sys_modules(fullname) + return self.klass().find(fullname) def test_main(self): import __main__ @@ -133,10 +136,10 @@ class GetModuleViaSysModulesTest(testlib.TestCase): class GetModuleViaParentEnumerationTest(testlib.TestCase): - klass = mitogen.master.ModuleFinder + klass = mitogen.master.ParentEnumerationMethod def call(self, fullname): - return self.klass()._get_module_via_parent_enumeration(fullname) + return self.klass().find(fullname) def test_main_fails(self): import __main__ @@ -157,13 +160,28 @@ class GetModuleViaParentEnumerationTest(testlib.TestCase): # plumbum has been eating too many rainbow-colored pills import pkg_like_plumbum.colors path, src, is_pkg = self.call('pkg_like_plumbum.colors') - self.assertEquals(path, - testlib.data_path('pkg_like_plumbum/colors.py')) + modpath = os.path.join(MODS_PATH, 'pkg_like_plumbum/colors.py') + self.assertEquals(path, modpath) - s = open(testlib.data_path('pkg_like_plumbum/colors.py'), 'rb').read() - self.assertEquals(src, s) + self.assertEquals(src, open(modpath, 'rb').read()) self.assertFalse(is_pkg) + def test_ansible_module_utils_distro_succeeds(self): + # #590: a package that turns itself into a module. + import pkg_like_ansible.module_utils.distro as d + self.assertEquals(d.I_AM, "the module that replaced the package") + self.assertEquals( + sys.modules['pkg_like_ansible.module_utils.distro'].__name__, + 'pkg_like_ansible.module_utils.distro._distro' + ) + + path, src, is_pkg = self.call('pkg_like_ansible.module_utils.distro') + modpath = os.path.join(MODS_PATH, + 'pkg_like_ansible/module_utils/distro/__init__.py') + self.assertEquals(path, modpath) + self.assertEquals(src, open(modpath, 'rb').read()) + self.assertEquals(is_pkg, True) + class ResolveRelPathTest(testlib.TestCase): klass = mitogen.master.ModuleFinder @@ -235,7 +253,7 @@ class FindRelatedTest(testlib.TestCase): if sys.version_info > (2, 6): class DjangoMixin(object): - WEBPROJECT_PATH = testlib.data_path('webproject') + WEBPROJECT_PATH = os.path.join(MODS_PATH, 'webproject') # TODO: rip out Django and replace with a static tree of weird imports # that don't depend on .. Django! The hack below is because the version diff --git a/tests/testlib.py b/tests/testlib.py index 37c3c654..2ee672be 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -41,7 +41,6 @@ except NameError: LOG = logging.getLogger(__name__) DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') -sys.path.append(DATA_DIR) if mitogen.is_master: mitogen.utils.log_to_file() From 4f23f0bec10b8d311a06ef0adeaf8ebe2bd023e9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 13:08:44 +0100 Subject: [PATCH 039/383] issue #590: update comment to indicate the hack is permanent --- ansible_mitogen/runner.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py index a8dae8b1..8dbddadd 100644 --- a/ansible_mitogen/runner.py +++ b/ansible_mitogen/runner.py @@ -763,7 +763,16 @@ class NewStyleRunner(ScriptRunner): try: mitogen.core.import_module(fullname) except ImportError: - # TODO: this is a huge hack to work around issue #590. + # #590: Ansible 2.8 module_utils.distro is a package that + # replaces itself in sys.modules with a non-package during + # import. Prior to replacement, it is a real package containing + # a '_distro' submodule which is used on 2.x. Given a 2.x + # controller and 3.x target, the import hook never needs to run + # again before this replacement occurs, and 'distro' is + # replaced with a module from the stdlib. In this case as this + # loop progresses to the next entry and attempts to preload + # 'distro._distro', the import mechanism will fail. So here we + # silently ignore any failure for it. if fullname != 'ansible.module_utils.distro._distro': raise From 72ab917c89835930579de5f8fa2c0a6f3a3221ab Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 13:08:44 +0100 Subject: [PATCH 040/383] issue #590: add FinderMethod docstrings. --- mitogen/master.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/mitogen/master.py b/mitogen/master.py index 7188ba14..1eed8c60 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -405,11 +405,26 @@ class LogForwarder(object): class FinderMethod(object): + """ + Interface to a method for locating a Python module or package given its + name according to the running Python interpreter. You'd think this was a + simple task, right? Naive young fellow, welcome to the real world. + """ def __repr__(self): return '%s()' % (type(self).__name__,) def find(self, fullname): - pass + """ + Accept a canonical module name and return `(path, source, is_pkg)` + tuples, where: + + * `path`: Unicode string containing path to source file. + * `source`: Bytestring containing source file's content. + * `is_pkg`: :data:`True` if `fullname` is a package. + + :returns: + :data:`None` if not found, or tuple as described above. + """ class DefectivePython3xMainMethod(FinderMethod): From cf1e7129a744e2f4650b16a69ca8bd679d1b513e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 13:08:44 +0100 Subject: [PATCH 041/383] issue #590: add dummy package for new test. --- tests/data/module_finder/pkg_like_ansible/__init__.py | 0 .../module_finder/pkg_like_ansible/module_utils/__init__.py | 0 .../pkg_like_ansible/module_utils/distro/__init__.py | 5 +++++ .../pkg_like_ansible/module_utils/distro/_distro.py | 1 + 4 files changed, 6 insertions(+) create mode 100644 tests/data/module_finder/pkg_like_ansible/__init__.py create mode 100644 tests/data/module_finder/pkg_like_ansible/module_utils/__init__.py create mode 100644 tests/data/module_finder/pkg_like_ansible/module_utils/distro/__init__.py create mode 100644 tests/data/module_finder/pkg_like_ansible/module_utils/distro/_distro.py diff --git a/tests/data/module_finder/pkg_like_ansible/__init__.py b/tests/data/module_finder/pkg_like_ansible/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/data/module_finder/pkg_like_ansible/module_utils/__init__.py b/tests/data/module_finder/pkg_like_ansible/module_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/data/module_finder/pkg_like_ansible/module_utils/distro/__init__.py b/tests/data/module_finder/pkg_like_ansible/module_utils/distro/__init__.py new file mode 100644 index 00000000..3a149657 --- /dev/null +++ b/tests/data/module_finder/pkg_like_ansible/module_utils/distro/__init__.py @@ -0,0 +1,5 @@ +# #590: a package that turns itself into a module. +I_AM = "the package that was replaced" +import sys +from pkg_like_ansible.module_utils.distro import _distro +sys.modules[__name__] = _distro diff --git a/tests/data/module_finder/pkg_like_ansible/module_utils/distro/_distro.py b/tests/data/module_finder/pkg_like_ansible/module_utils/distro/_distro.py new file mode 100644 index 00000000..9f113fef --- /dev/null +++ b/tests/data/module_finder/pkg_like_ansible/module_utils/distro/_distro.py @@ -0,0 +1 @@ +I_AM = "the module that replaced the package" From 5eb10aacef96135ab1b09228ba0f617b0bff1899 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 18:31:21 +0100 Subject: [PATCH 042/383] master: fix _is_stdlib_path() failure on Ubuntu. --- mitogen/master.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/mitogen/master.py b/mitogen/master.py index 1eed8c60..681b870b 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -46,8 +46,9 @@ import pkgutil import re import string import sys -import time +import sysconfig import threading +import time import types import zlib @@ -93,10 +94,15 @@ def _stdlib_paths(): 'real_prefix', # virtualenv: only set inside a virtual environment. 'base_prefix', # venv: always set, equal to prefix if outside. ] - prefixes = (getattr(sys, a) for a in attr_candidates if hasattr(sys, a)) + prefixes = (getattr(sys, a, None) for a in attr_candidates) version = 'python%s.%s' % sys.version_info[0:2] - return set(os.path.abspath(os.path.join(p, 'lib', version)) - for p in prefixes) + s = set(os.path.abspath(os.path.join(p, 'lib', version)) + for p in prefixes if p is not None) + + # When running 'unit2 tests/module_finder_test.py' in a Py2 venv on Ubuntu + # 18.10, above is insufficient to catch the real directory. + s.add(sysconfig.get_config_var('DESTLIB')) + return s def is_stdlib_name(modname): @@ -425,6 +431,7 @@ class FinderMethod(object): :returns: :data:`None` if not found, or tuple as described above. """ + raise NotImplementedError() class DefectivePython3xMainMethod(FinderMethod): From 2f68a5a6608dbb716964599496494586853181ea Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 18:55:42 +0100 Subject: [PATCH 043/383] module_finder_test: mask one more difference between unit2 vs. direct start --- tests/module_finder_test.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py index 93755aa0..a77f1084 100644 --- a/tests/module_finder_test.py +++ b/tests/module_finder_test.py @@ -120,7 +120,13 @@ class SysModulesMethodTest(testlib.TestCase): import __main__ path, src, is_pkg = self.call('__main__') self.assertEquals(path, __main__.__file__) - self.assertEquals(src, open(path, 'rb').read()) + + # linecache adds a line ending to the final line if one is missing. + actual_src = open(path, 'rb').read() + if actual_src[-1] != '\n': + actual_src += '\n' + + self.assertEquals(src, actual_src) self.assertFalse(is_pkg) def test_dylib_fails(self): From 2f29f3e8e638a48d242d40cfda2533988d2fbbea Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 18:56:56 +0100 Subject: [PATCH 044/383] tests: rearrange test modules again, they're used in multiple places They're back on sys.path. --- .../module_finder_testmod/__init__.py | 0 .../module_finder_testmod/empty_mod.py | 0 .../module_finder_testmod/regular_mod.py | 0 .../sibling_dep_mod_abs_import.py | 0 .../sibling_dep_mod_py2_import.py | 0 .../sibling_dep_mod_rel_import.py | 0 .../pkg_like_ansible/__init__.py | 0 .../pkg_like_ansible/module_utils/__init__.py | 0 .../module_utils/distro/__init__.py | 0 .../module_utils/distro/_distro.py | 0 .../pkg_like_plumbum/__init__.py | 0 .../pkg_like_plumbum/colors.py | 0 .../simple_pkg/__init__.py | 0 .../{module_finder => importer}/simple_pkg/a.py | 0 .../{module_finder => importer}/simple_pkg/b.py | 0 .../simple_pkg/imports_replaces_self.py | 0 .../simple_pkg/ping.py | 0 .../simple_pkg/replaces_self.py | 0 .../six_brokenpkg/__init__.py | 0 .../six_brokenpkg/_six.py | 0 .../webproject/manage.py | 0 .../webproject/serve_django_app.py | 0 .../webproject/webapp/__init__.py | 0 .../webproject/webapp/admin.py | 0 .../webproject/webapp/apps.py | 0 .../webproject/webapp/migrations/__init__.py | 0 .../webproject/webapp/models.py | 0 .../webproject/webapp/tests.py | 0 .../webproject/webapp/views.py | 0 .../webproject/webproject/__init__.py | 0 .../webproject/webproject/settings.py | 0 .../webproject/webproject/urls.py | 0 .../webproject/webproject/wsgi.py | 0 tests/module_finder_test.py | 16 +++++++--------- tests/testlib.py | 5 +++++ 35 files changed, 12 insertions(+), 9 deletions(-) rename tests/data/{module_finder => importer}/module_finder_testmod/__init__.py (100%) rename tests/data/{module_finder => importer}/module_finder_testmod/empty_mod.py (100%) rename tests/data/{module_finder => importer}/module_finder_testmod/regular_mod.py (100%) rename tests/data/{module_finder => importer}/module_finder_testmod/sibling_dep_mod_abs_import.py (100%) rename tests/data/{module_finder => importer}/module_finder_testmod/sibling_dep_mod_py2_import.py (100%) rename tests/data/{module_finder => importer}/module_finder_testmod/sibling_dep_mod_rel_import.py (100%) rename tests/data/{module_finder => importer}/pkg_like_ansible/__init__.py (100%) rename tests/data/{module_finder => importer}/pkg_like_ansible/module_utils/__init__.py (100%) rename tests/data/{module_finder => importer}/pkg_like_ansible/module_utils/distro/__init__.py (100%) rename tests/data/{module_finder => importer}/pkg_like_ansible/module_utils/distro/_distro.py (100%) rename tests/data/{module_finder => importer}/pkg_like_plumbum/__init__.py (100%) rename tests/data/{module_finder => importer}/pkg_like_plumbum/colors.py (100%) rename tests/data/{module_finder => importer}/simple_pkg/__init__.py (100%) rename tests/data/{module_finder => importer}/simple_pkg/a.py (100%) rename tests/data/{module_finder => importer}/simple_pkg/b.py (100%) rename tests/data/{module_finder => importer}/simple_pkg/imports_replaces_self.py (100%) rename tests/data/{module_finder => importer}/simple_pkg/ping.py (100%) rename tests/data/{module_finder => importer}/simple_pkg/replaces_self.py (100%) rename tests/data/{module_finder => importer}/six_brokenpkg/__init__.py (100%) rename tests/data/{module_finder => importer}/six_brokenpkg/_six.py (100%) rename tests/data/{module_finder => importer}/webproject/manage.py (100%) rename tests/data/{module_finder => importer}/webproject/serve_django_app.py (100%) rename tests/data/{module_finder => importer}/webproject/webapp/__init__.py (100%) rename tests/data/{module_finder => importer}/webproject/webapp/admin.py (100%) rename tests/data/{module_finder => importer}/webproject/webapp/apps.py (100%) rename tests/data/{module_finder => importer}/webproject/webapp/migrations/__init__.py (100%) rename tests/data/{module_finder => importer}/webproject/webapp/models.py (100%) rename tests/data/{module_finder => importer}/webproject/webapp/tests.py (100%) rename tests/data/{module_finder => importer}/webproject/webapp/views.py (100%) rename tests/data/{module_finder => importer}/webproject/webproject/__init__.py (100%) rename tests/data/{module_finder => importer}/webproject/webproject/settings.py (100%) rename tests/data/{module_finder => importer}/webproject/webproject/urls.py (100%) rename tests/data/{module_finder => importer}/webproject/webproject/wsgi.py (100%) diff --git a/tests/data/module_finder/module_finder_testmod/__init__.py b/tests/data/importer/module_finder_testmod/__init__.py similarity index 100% rename from tests/data/module_finder/module_finder_testmod/__init__.py rename to tests/data/importer/module_finder_testmod/__init__.py diff --git a/tests/data/module_finder/module_finder_testmod/empty_mod.py b/tests/data/importer/module_finder_testmod/empty_mod.py similarity index 100% rename from tests/data/module_finder/module_finder_testmod/empty_mod.py rename to tests/data/importer/module_finder_testmod/empty_mod.py diff --git a/tests/data/module_finder/module_finder_testmod/regular_mod.py b/tests/data/importer/module_finder_testmod/regular_mod.py similarity index 100% rename from tests/data/module_finder/module_finder_testmod/regular_mod.py rename to tests/data/importer/module_finder_testmod/regular_mod.py diff --git a/tests/data/module_finder/module_finder_testmod/sibling_dep_mod_abs_import.py b/tests/data/importer/module_finder_testmod/sibling_dep_mod_abs_import.py similarity index 100% rename from tests/data/module_finder/module_finder_testmod/sibling_dep_mod_abs_import.py rename to tests/data/importer/module_finder_testmod/sibling_dep_mod_abs_import.py diff --git a/tests/data/module_finder/module_finder_testmod/sibling_dep_mod_py2_import.py b/tests/data/importer/module_finder_testmod/sibling_dep_mod_py2_import.py similarity index 100% rename from tests/data/module_finder/module_finder_testmod/sibling_dep_mod_py2_import.py rename to tests/data/importer/module_finder_testmod/sibling_dep_mod_py2_import.py diff --git a/tests/data/module_finder/module_finder_testmod/sibling_dep_mod_rel_import.py b/tests/data/importer/module_finder_testmod/sibling_dep_mod_rel_import.py similarity index 100% rename from tests/data/module_finder/module_finder_testmod/sibling_dep_mod_rel_import.py rename to tests/data/importer/module_finder_testmod/sibling_dep_mod_rel_import.py diff --git a/tests/data/module_finder/pkg_like_ansible/__init__.py b/tests/data/importer/pkg_like_ansible/__init__.py similarity index 100% rename from tests/data/module_finder/pkg_like_ansible/__init__.py rename to tests/data/importer/pkg_like_ansible/__init__.py diff --git a/tests/data/module_finder/pkg_like_ansible/module_utils/__init__.py b/tests/data/importer/pkg_like_ansible/module_utils/__init__.py similarity index 100% rename from tests/data/module_finder/pkg_like_ansible/module_utils/__init__.py rename to tests/data/importer/pkg_like_ansible/module_utils/__init__.py diff --git a/tests/data/module_finder/pkg_like_ansible/module_utils/distro/__init__.py b/tests/data/importer/pkg_like_ansible/module_utils/distro/__init__.py similarity index 100% rename from tests/data/module_finder/pkg_like_ansible/module_utils/distro/__init__.py rename to tests/data/importer/pkg_like_ansible/module_utils/distro/__init__.py diff --git a/tests/data/module_finder/pkg_like_ansible/module_utils/distro/_distro.py b/tests/data/importer/pkg_like_ansible/module_utils/distro/_distro.py similarity index 100% rename from tests/data/module_finder/pkg_like_ansible/module_utils/distro/_distro.py rename to tests/data/importer/pkg_like_ansible/module_utils/distro/_distro.py diff --git a/tests/data/module_finder/pkg_like_plumbum/__init__.py b/tests/data/importer/pkg_like_plumbum/__init__.py similarity index 100% rename from tests/data/module_finder/pkg_like_plumbum/__init__.py rename to tests/data/importer/pkg_like_plumbum/__init__.py diff --git a/tests/data/module_finder/pkg_like_plumbum/colors.py b/tests/data/importer/pkg_like_plumbum/colors.py similarity index 100% rename from tests/data/module_finder/pkg_like_plumbum/colors.py rename to tests/data/importer/pkg_like_plumbum/colors.py diff --git a/tests/data/module_finder/simple_pkg/__init__.py b/tests/data/importer/simple_pkg/__init__.py similarity index 100% rename from tests/data/module_finder/simple_pkg/__init__.py rename to tests/data/importer/simple_pkg/__init__.py diff --git a/tests/data/module_finder/simple_pkg/a.py b/tests/data/importer/simple_pkg/a.py similarity index 100% rename from tests/data/module_finder/simple_pkg/a.py rename to tests/data/importer/simple_pkg/a.py diff --git a/tests/data/module_finder/simple_pkg/b.py b/tests/data/importer/simple_pkg/b.py similarity index 100% rename from tests/data/module_finder/simple_pkg/b.py rename to tests/data/importer/simple_pkg/b.py diff --git a/tests/data/module_finder/simple_pkg/imports_replaces_self.py b/tests/data/importer/simple_pkg/imports_replaces_self.py similarity index 100% rename from tests/data/module_finder/simple_pkg/imports_replaces_self.py rename to tests/data/importer/simple_pkg/imports_replaces_self.py diff --git a/tests/data/module_finder/simple_pkg/ping.py b/tests/data/importer/simple_pkg/ping.py similarity index 100% rename from tests/data/module_finder/simple_pkg/ping.py rename to tests/data/importer/simple_pkg/ping.py diff --git a/tests/data/module_finder/simple_pkg/replaces_self.py b/tests/data/importer/simple_pkg/replaces_self.py similarity index 100% rename from tests/data/module_finder/simple_pkg/replaces_self.py rename to tests/data/importer/simple_pkg/replaces_self.py diff --git a/tests/data/module_finder/six_brokenpkg/__init__.py b/tests/data/importer/six_brokenpkg/__init__.py similarity index 100% rename from tests/data/module_finder/six_brokenpkg/__init__.py rename to tests/data/importer/six_brokenpkg/__init__.py diff --git a/tests/data/module_finder/six_brokenpkg/_six.py b/tests/data/importer/six_brokenpkg/_six.py similarity index 100% rename from tests/data/module_finder/six_brokenpkg/_six.py rename to tests/data/importer/six_brokenpkg/_six.py diff --git a/tests/data/module_finder/webproject/manage.py b/tests/data/importer/webproject/manage.py similarity index 100% rename from tests/data/module_finder/webproject/manage.py rename to tests/data/importer/webproject/manage.py diff --git a/tests/data/module_finder/webproject/serve_django_app.py b/tests/data/importer/webproject/serve_django_app.py similarity index 100% rename from tests/data/module_finder/webproject/serve_django_app.py rename to tests/data/importer/webproject/serve_django_app.py diff --git a/tests/data/module_finder/webproject/webapp/__init__.py b/tests/data/importer/webproject/webapp/__init__.py similarity index 100% rename from tests/data/module_finder/webproject/webapp/__init__.py rename to tests/data/importer/webproject/webapp/__init__.py diff --git a/tests/data/module_finder/webproject/webapp/admin.py b/tests/data/importer/webproject/webapp/admin.py similarity index 100% rename from tests/data/module_finder/webproject/webapp/admin.py rename to tests/data/importer/webproject/webapp/admin.py diff --git a/tests/data/module_finder/webproject/webapp/apps.py b/tests/data/importer/webproject/webapp/apps.py similarity index 100% rename from tests/data/module_finder/webproject/webapp/apps.py rename to tests/data/importer/webproject/webapp/apps.py diff --git a/tests/data/module_finder/webproject/webapp/migrations/__init__.py b/tests/data/importer/webproject/webapp/migrations/__init__.py similarity index 100% rename from tests/data/module_finder/webproject/webapp/migrations/__init__.py rename to tests/data/importer/webproject/webapp/migrations/__init__.py diff --git a/tests/data/module_finder/webproject/webapp/models.py b/tests/data/importer/webproject/webapp/models.py similarity index 100% rename from tests/data/module_finder/webproject/webapp/models.py rename to tests/data/importer/webproject/webapp/models.py diff --git a/tests/data/module_finder/webproject/webapp/tests.py b/tests/data/importer/webproject/webapp/tests.py similarity index 100% rename from tests/data/module_finder/webproject/webapp/tests.py rename to tests/data/importer/webproject/webapp/tests.py diff --git a/tests/data/module_finder/webproject/webapp/views.py b/tests/data/importer/webproject/webapp/views.py similarity index 100% rename from tests/data/module_finder/webproject/webapp/views.py rename to tests/data/importer/webproject/webapp/views.py diff --git a/tests/data/module_finder/webproject/webproject/__init__.py b/tests/data/importer/webproject/webproject/__init__.py similarity index 100% rename from tests/data/module_finder/webproject/webproject/__init__.py rename to tests/data/importer/webproject/webproject/__init__.py diff --git a/tests/data/module_finder/webproject/webproject/settings.py b/tests/data/importer/webproject/webproject/settings.py similarity index 100% rename from tests/data/module_finder/webproject/webproject/settings.py rename to tests/data/importer/webproject/webproject/settings.py diff --git a/tests/data/module_finder/webproject/webproject/urls.py b/tests/data/importer/webproject/webproject/urls.py similarity index 100% rename from tests/data/module_finder/webproject/webproject/urls.py rename to tests/data/importer/webproject/webproject/urls.py diff --git a/tests/data/module_finder/webproject/webproject/wsgi.py b/tests/data/importer/webproject/webproject/wsgi.py similarity index 100% rename from tests/data/module_finder/webproject/webproject/wsgi.py rename to tests/data/importer/webproject/webproject/wsgi.py diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py index a77f1084..d35fb382 100644 --- a/tests/module_finder_test.py +++ b/tests/module_finder_test.py @@ -7,9 +7,7 @@ import unittest2 import mitogen.master import testlib - -MODS_PATH = testlib.data_path('module_finder') -sys.path.append(MODS_PATH) +from testlib import MODS_DIR class ConstructorTest(testlib.TestCase): @@ -89,14 +87,14 @@ class PkgutilMethodTest(testlib.TestCase): def test_empty_source_pkg(self): path, src, is_pkg = self.call('module_finder_testmod') self.assertEquals(path, - os.path.join(MODS_PATH, 'module_finder_testmod/__init__.py')) + os.path.join(MODS_DIR, 'module_finder_testmod/__init__.py')) self.assertEquals(mitogen.core.b(''), src) self.assertTrue(is_pkg) def test_empty_source_module(self): path, src, is_pkg = self.call('module_finder_testmod.empty_mod') self.assertEquals(path, - os.path.join(MODS_PATH, 'module_finder_testmod/empty_mod.py')) + os.path.join(MODS_DIR, 'module_finder_testmod/empty_mod.py')) self.assertEquals(mitogen.core.b(''), src) self.assertFalse(is_pkg) @@ -104,7 +102,7 @@ class PkgutilMethodTest(testlib.TestCase): from module_finder_testmod import regular_mod path, src, is_pkg = self.call('module_finder_testmod.regular_mod') self.assertEquals(path, - os.path.join(MODS_PATH, 'module_finder_testmod/regular_mod.py')) + os.path.join(MODS_DIR, 'module_finder_testmod/regular_mod.py')) self.assertEquals(mitogen.core.to_text(src), inspect.getsource(regular_mod)) self.assertFalse(is_pkg) @@ -166,7 +164,7 @@ class GetModuleViaParentEnumerationTest(testlib.TestCase): # plumbum has been eating too many rainbow-colored pills import pkg_like_plumbum.colors path, src, is_pkg = self.call('pkg_like_plumbum.colors') - modpath = os.path.join(MODS_PATH, 'pkg_like_plumbum/colors.py') + modpath = os.path.join(MODS_DIR, 'pkg_like_plumbum/colors.py') self.assertEquals(path, modpath) self.assertEquals(src, open(modpath, 'rb').read()) @@ -182,7 +180,7 @@ class GetModuleViaParentEnumerationTest(testlib.TestCase): ) path, src, is_pkg = self.call('pkg_like_ansible.module_utils.distro') - modpath = os.path.join(MODS_PATH, + modpath = os.path.join(MODS_DIR, 'pkg_like_ansible/module_utils/distro/__init__.py') self.assertEquals(path, modpath) self.assertEquals(src, open(modpath, 'rb').read()) @@ -259,7 +257,7 @@ class FindRelatedTest(testlib.TestCase): if sys.version_info > (2, 6): class DjangoMixin(object): - WEBPROJECT_PATH = os.path.join(MODS_PATH, 'webproject') + WEBPROJECT_PATH = os.path.join(MODS_DIR, 'webproject') # TODO: rip out Django and replace with a static tree of weird imports # that don't depend on .. Django! The hack below is because the version diff --git a/tests/testlib.py b/tests/testlib.py index 2ee672be..04a48d84 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -41,6 +41,11 @@ except NameError: LOG = logging.getLogger(__name__) DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') +MODS_DIR = os.path.join(DATA_DIR, 'importer') + +sys.path.append(DATA_DIR) +sys.path.append(MODS_DIR) + if mitogen.is_master: mitogen.utils.log_to_file() From ed8acb5153576dc11486f7cd5ab5208b584ca7b3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 19:05:54 +0100 Subject: [PATCH 045/383] master: sysconfig did not exist until 2.7. --- mitogen/master.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/mitogen/master.py b/mitogen/master.py index 681b870b..fb4f505b 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -46,12 +46,16 @@ import pkgutil import re import string import sys -import sysconfig import threading import time import types import zlib +try: + import sysconfig +except ImportError: + sysconfig = None + if not hasattr(pkgutil, 'find_loader'): # find_loader() was new in >=2.5, but the modern pkgutil.py syntax has # been kept intentionally 2.3 compatible so we can reuse it. @@ -101,7 +105,8 @@ def _stdlib_paths(): # When running 'unit2 tests/module_finder_test.py' in a Py2 venv on Ubuntu # 18.10, above is insufficient to catch the real directory. - s.add(sysconfig.get_config_var('DESTLIB')) + if sysconfig is not None: + s.add(sysconfig.get_config_var('DESTLIB')) return s From 50cdf63c27177c75b815af3e01337126f5bf801f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 19:12:43 +0100 Subject: [PATCH 046/383] tests: Py3.x fix. --- tests/module_finder_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py index d35fb382..3bc4088f 100644 --- a/tests/module_finder_test.py +++ b/tests/module_finder_test.py @@ -5,6 +5,7 @@ import sys import unittest2 import mitogen.master +from mitogen.core import b import testlib from testlib import MODS_DIR @@ -122,7 +123,7 @@ class SysModulesMethodTest(testlib.TestCase): # linecache adds a line ending to the final line if one is missing. actual_src = open(path, 'rb').read() if actual_src[-1] != '\n': - actual_src += '\n' + actual_src += b('\n') self.assertEquals(src, actual_src) self.assertFalse(is_pkg) From e94200aeb90a3120c62051c498133ea75d067528 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 19:13:23 +0100 Subject: [PATCH 047/383] issue #590: actually run Ansible test. --- tests/ansible/regression/all.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ansible/regression/all.yml b/tests/ansible/regression/all.yml index 123d87d9..62606b00 100644 --- a/tests/ansible/regression/all.yml +++ b/tests/ansible/regression/all.yml @@ -8,3 +8,4 @@ - include: issue_154__module_state_leaks.yml - include: issue_177__copy_module_failing.yml - include: issue_332_ansiblemoduleerror_first_occurrence.yml +- include: issue_590__sys_modules_crap.yml From 90105e20315d6fc9edbea2b640ed0c6543de03fa Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 19:12:43 +0100 Subject: [PATCH 048/383] tests: Py3.x fix. --- tests/module_finder_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py index 3bc4088f..11062512 100644 --- a/tests/module_finder_test.py +++ b/tests/module_finder_test.py @@ -122,7 +122,7 @@ class SysModulesMethodTest(testlib.TestCase): # linecache adds a line ending to the final line if one is missing. actual_src = open(path, 'rb').read() - if actual_src[-1] != '\n': + if actual_src[-1] != b('\n'): actual_src += b('\n') self.assertEquals(src, actual_src) From a1f2ec222d5b6904c2c73ad10bad1d5cbdae58ca Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 19:38:42 +0100 Subject: [PATCH 049/383] issue #590: fix test for <2.8 Ansibles. --- .../ansible/lib/modules/custom_python_uses_distro.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/ansible/lib/modules/custom_python_uses_distro.py b/tests/ansible/lib/modules/custom_python_uses_distro.py index c5ffbb84..1fc31b4e 100644 --- a/tests/ansible/lib/modules/custom_python_uses_distro.py +++ b/tests/ansible/lib/modules/custom_python_uses_distro.py @@ -2,12 +2,20 @@ # issue #590: I am an Ansible new-style Python module that tries to use # ansible.module_utils.distro. +import ansible from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils import distro + +if ansible.__version__ > '2.8': + from ansible.module_utils import distro +else: + distro = None def main(): module = AnsibleModule(argument_spec={}) - module.exit_json(info=distro.info()) + if ansible.__version__ > '2.8': + module.exit_json(info=distro.info()) + else: + module.exit_json(info={'id': None}) if __name__ == '__main__': main() From f1287e6e9af7397cfd6aa5dc6d49eec7fe916c95 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 19:12:43 +0100 Subject: [PATCH 050/383] tests: Py3.x fix. --- tests/module_finder_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py index 11062512..e61c768f 100644 --- a/tests/module_finder_test.py +++ b/tests/module_finder_test.py @@ -122,7 +122,7 @@ class SysModulesMethodTest(testlib.TestCase): # linecache adds a line ending to the final line if one is missing. actual_src = open(path, 'rb').read() - if actual_src[-1] != b('\n'): + if actual_src[-1:] != b('\n'): actual_src += b('\n') self.assertEquals(src, actual_src) From c1d763f92976c45f4cbfcf91f229fe1134cdac3d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 19:53:12 +0100 Subject: [PATCH 051/383] ci: Ansible 2.8 jobs aren't running against all host types. --- .ci/azure-pipelines.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.ci/azure-pipelines.yml b/.ci/azure-pipelines.yml index ef344bba..95f239ff 100644 --- a/.ci/azure-pipelines.yml +++ b/.ci/azure-pipelines.yml @@ -92,10 +92,8 @@ jobs: python.version: '2.7' MODE: ansible VER: 2.8.0 - DISTROS: debian Ansible_280_35: python.version: '3.5' MODE: ansible VER: 2.8.0 - DISTROS: debian From fe7c3610ffa86a8fe1886ddf13bb59f3be290e04 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 2 Jun 2019 20:52:18 +0100 Subject: [PATCH 052/383] issue #590: disable distro test on vanilla --- tests/ansible/regression/issue_590__sys_modules_crap.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/ansible/regression/issue_590__sys_modules_crap.yml b/tests/ansible/regression/issue_590__sys_modules_crap.yml index 83a9a286..41130b68 100644 --- a/tests/ansible/regression/issue_590__sys_modules_crap.yml +++ b/tests/ansible/regression/issue_590__sys_modules_crap.yml @@ -1,6 +1,9 @@ - hosts: test-targets tasks: + - meta: end_play + when: ansible_version.full < '2.8' + - custom_python_uses_distro: register: out From 0b7fd3f2904f3f9af0293deefad7e9ca361f226f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 02:31:43 +0100 Subject: [PATCH 053/383] issue #591: ansible: restore CWD prior to AnsibleModule initialization. --- ansible_mitogen/runner.py | 11 ++++++++++ docs/changelog.rst | 11 ++++++++++ .../lib/modules/custom_python_os_getcwd.py | 14 +++++++++++++ tests/ansible/regression/all.yml | 1 + .../issue_591__setuptools_cwd_crash.yml | 20 +++++++++++++++++++ 5 files changed, 57 insertions(+) create mode 100644 tests/ansible/lib/modules/custom_python_os_getcwd.py create mode 100644 tests/ansible/regression/issue_591__setuptools_cwd_crash.yml diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py index 8dbddadd..05bc55c0 100644 --- a/ansible_mitogen/runner.py +++ b/ansible_mitogen/runner.py @@ -344,11 +344,22 @@ class Runner(object): env.update(self.env) self._env = TemporaryEnvironment(env) + def _revert_cwd(self): + """ + #591: make a best-effort attempt to return to :attr:`good_temp_dir`. + """ + try: + os.chdir(self.good_temp_dir) + except OSError: + LOG.debug('%r: could not restore CWD to %r', + self, self.good_temp_dir) + def revert(self): """ Revert any changes made to the process after running a module. The base implementation simply restores the original environment. """ + self._revert_cwd() self._env.revert() self.revert_temp_dir() diff --git a/docs/changelog.rst b/docs/changelog.rst index 4a3a5612..1e4c7e18 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -30,18 +30,29 @@ Enhancements `_ functionality, which will be addressed in a future release. + Fixes ^^^^^ * `#590 `_: the importer can handle modules that replace themselves in :mod:`sys.modules` during import. +* `#591 `_: the target's current + working directory is restored to a known-existent directory between tasks to + ensure :func:`os.getcwd` will not fail when called, in the same way that + :class:`AnsibleModule` restores it during initialization. However this + restore happens before the module ever executes, ensuring any code that calls + :func:`os.getcwd` prior to :class:`AnsibleModule` initialization, such as the + Ansible 2.7 ``pip`` module, cannot fail due to the behavior of a prior task. + Thanks! ~~~~~~~ Mitogen would not be possible without the support of users. A huge thanks for bug reports, testing, features and fixes in this release contributed by +`Anton Markelov `_, +`Nigel Metheringham `_, `Orion Poplawski `_, and `Ulrich Schreiner `_. diff --git a/tests/ansible/lib/modules/custom_python_os_getcwd.py b/tests/ansible/lib/modules/custom_python_os_getcwd.py new file mode 100644 index 00000000..7fe3fd1b --- /dev/null +++ b/tests/ansible/lib/modules/custom_python_os_getcwd.py @@ -0,0 +1,14 @@ +#!/usr/bin/python +# #591: call os.getcwd() before AnsibleModule ever gets a chance to fix up the +# process environment. + +import os + +try: + import json +except ImportError: + import simplejson as json + +print(json.dumps({ + 'cwd': os.getcwd() +})) diff --git a/tests/ansible/regression/all.yml b/tests/ansible/regression/all.yml index 62606b00..32852942 100644 --- a/tests/ansible/regression/all.yml +++ b/tests/ansible/regression/all.yml @@ -9,3 +9,4 @@ - include: issue_177__copy_module_failing.yml - include: issue_332_ansiblemoduleerror_first_occurrence.yml - include: issue_590__sys_modules_crap.yml +- include: issue_591__setuptools_cwd_crash.yml diff --git a/tests/ansible/regression/issue_591__setuptools_cwd_crash.yml b/tests/ansible/regression/issue_591__setuptools_cwd_crash.yml new file mode 100644 index 00000000..cbd65193 --- /dev/null +++ b/tests/ansible/regression/issue_591__setuptools_cwd_crash.yml @@ -0,0 +1,20 @@ +# #591: process CWD is not reset before start of module execution. This is +# usually fine, except for modules importing setuptools early, which attempts +# to call getcwd() before AnsibleModule has had a chance to clean up the +# process environment. + +- hosts: test-targets + tasks: + - meta: end_play + when: not is_mitogen + + - custom_python_run_script: + script: | + import os + os.chdir(module.tmpdir) + + # Will crash if process has a nonexistent CWD. + - custom_python_os_getcwd: + script: | + import os + self._connection.get_chain().call(os.getcwd) From e90c05dc9d74e2f0084e2a8ecacaf0862a2f1e96 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 02:31:43 +0100 Subject: [PATCH 054/383] issue #591: fix test for Ansible 2.3. --- .../ansible/regression/issue_591__setuptools_cwd_crash.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ansible/regression/issue_591__setuptools_cwd_crash.yml b/tests/ansible/regression/issue_591__setuptools_cwd_crash.yml index cbd65193..fc73825c 100644 --- a/tests/ansible/regression/issue_591__setuptools_cwd_crash.yml +++ b/tests/ansible/regression/issue_591__setuptools_cwd_crash.yml @@ -11,7 +11,11 @@ - custom_python_run_script: script: | import os - os.chdir(module.tmpdir) + try: + os.chdir(module.tmpdir) + except: + # Ansible 2.3. + os.chdir(os.path.dirname(__file__)) # Will crash if process has a nonexistent CWD. - custom_python_os_getcwd: From 3620fce071e0993cc795275e09d632db11a44061 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 17:40:58 +0100 Subject: [PATCH 055/383] issue #593: expose configurables for SSH keepalive and increase the default --- ansible_mitogen/connection.py | 6 +++++ ansible_mitogen/transport_config.py | 24 +++++++++++++++++++ docs/ansible_detailed.rst | 5 ++++ docs/changelog.rst | 10 ++++++-- .../delegate_to_template.yml | 4 ++++ .../stack_construction.yml | 14 +++++++++++ 6 files changed, 61 insertions(+), 2 deletions(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index 411b99f1..1b6a2e9a 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -145,6 +145,12 @@ def _connect_ssh(spec): 'ssh_args': spec.ssh_args(), 'ssh_debug_level': spec.mitogen_ssh_debug_level(), 'remote_name': get_remote_name(spec), + 'keepalive_count': ( + spec.mitogen_ssh_keepalive_count() or 10 + ), + 'keepalive_interval': ( + spec.mitogen_ssh_keepalive_interval() or 30 + ), } } diff --git a/ansible_mitogen/transport_config.py b/ansible_mitogen/transport_config.py index 6fcfba04..27e368d4 100644 --- a/ansible_mitogen/transport_config.py +++ b/ansible_mitogen/transport_config.py @@ -276,6 +276,18 @@ class Spec(with_metaclass(abc.ABCMeta, object)): The path to the "machinectl" program for the 'setns' transport. """ + @abc.abstractmethod + def mitogen_ssh_keepalive_interval(self): + """ + The SSH ServerAliveInterval. + """ + + @abc.abstractmethod + def mitogen_ssh_keepalive_count(self): + """ + The SSH ServerAliveCount. + """ + @abc.abstractmethod def mitogen_ssh_debug_level(self): """ @@ -427,6 +439,12 @@ class PlayContextSpec(Spec): def mitogen_lxc_info_path(self): return self._connection.get_task_var('mitogen_lxc_info_path') + def mitogen_ssh_keepalive_interval(self): + return self._connection.get_task_var('mitogen_ssh_keepalive_interval') + + def mitogen_ssh_keepalive_count(self): + return self._connection.get_task_var('mitogen_ssh_keepalive_count') + def mitogen_machinectl_path(self): return self._connection.get_task_var('mitogen_machinectl_path') @@ -644,6 +662,12 @@ class MitogenViaSpec(Spec): def mitogen_lxc_info_path(self): return self._host_vars.get('mitogen_lxc_info_path') + def mitogen_ssh_keepalive_interval(self): + return self._host_vars.get('mitogen_ssh_keepalive_interval') + + def mitogen_ssh_keepalive_count(self): + return self._host_vars.get('mitogen_ssh_keepalive_count') + def mitogen_machinectl_path(self): return self._host_vars.get('mitogen_machinectl_path') diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index f709f8f3..e5bd0669 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -1007,6 +1007,11 @@ except connection delegation is supported. otherwise :data:`False`. This will change to off by default in a future release. If you are targetting many hosts on a fast network, please consider disabling SSH compression. +* ``mitogen_ssh_keepalive_count``: integer count of server keepalive messages to + which no reply is received before considering the SSH server dead. Defaults + to 10. +* ``mitogen_ssh_keepalive_count``: integer seconds delay between keepalive + messages. Defaults to 30. Debugging diff --git a/docs/changelog.rst b/docs/changelog.rst index 1e4c7e18..9f5ed993 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -45,6 +45,11 @@ Fixes :func:`os.getcwd` prior to :class:`AnsibleModule` initialization, such as the Ansible 2.7 ``pip`` module, cannot fail due to the behavior of a prior task. +* `#593 `_: the SSH connection method + exposes ``mitogen_ssh_keepalive_interval`` and + ``mitogen_ssh_keepalive_count`` variables, and the default timeout for an SSH + server has been increased from `15*3` seconds to `30*10` seconds. + Thanks! ~~~~~~~ @@ -53,8 +58,9 @@ Mitogen would not be possible without the support of users. A huge thanks for bug reports, testing, features and fixes in this release contributed by `Anton Markelov `_, `Nigel Metheringham `_, -`Orion Poplawski `_, and -`Ulrich Schreiner `_. +`Orion Poplawski `_, +`Ulrich Schreiner `_, and +`Yuki Nishida `_. v0.2.7 (2019-05-19) diff --git a/tests/ansible/integration/connection_delegation/delegate_to_template.yml b/tests/ansible/integration/connection_delegation/delegate_to_template.yml index d7af7f81..bfde1265 100644 --- a/tests/ansible/integration/connection_delegation/delegate_to_template.yml +++ b/tests/ansible/integration/connection_delegation/delegate_to_template.yml @@ -37,6 +37,8 @@ 'hostname': 'alias-host', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, 'python_path': ["/usr/bin/python"], @@ -65,6 +67,8 @@ 'hostname': 'cd-normal-alias', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, 'python_path': ["/usr/bin/python"], diff --git a/tests/ansible/integration/connection_delegation/stack_construction.yml b/tests/ansible/integration/connection_delegation/stack_construction.yml index 50029569..ed298599 100644 --- a/tests/ansible/integration/connection_delegation/stack_construction.yml +++ b/tests/ansible/integration/connection_delegation/stack_construction.yml @@ -71,6 +71,8 @@ 'hostname': 'alias-host', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, "python_path": ["/usr/bin/python"], @@ -112,6 +114,8 @@ 'hostname': 'alias-host', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, "python_path": ["/usr/bin/python"], @@ -164,6 +168,8 @@ 'hostname': 'cd-normal-normal', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, "python_path": ["/usr/bin/python"], @@ -205,6 +211,8 @@ 'hostname': 'alias-host', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, "python_path": ["/usr/bin/python"], @@ -233,6 +241,8 @@ 'hostname': 'cd-normal-alias', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, "python_path": ["/usr/bin/python"], @@ -285,6 +295,8 @@ 'hostname': 'cd-newuser-normal-normal', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, "python_path": ["/usr/bin/python"], @@ -327,6 +339,8 @@ 'hostname': 'alias-host', 'identities_only': False, 'identity_file': null, + 'keepalive_interval': 30, + 'keepalive_count': 10, 'password': null, 'port': null, "python_path": ["/usr/bin/python"], From 06690901e498b6a254f9c33977e8d3e3ffa57d07 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 15:55:50 +0100 Subject: [PATCH 056/383] issue #589: split services example out and make it run. --- docs/services.rst | 50 +--------------------------- examples/service/self_contained.py | 52 ++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 49 deletions(-) create mode 100644 examples/service/self_contained.py diff --git a/docs/services.rst b/docs/services.rst index ef402214..e5f0571e 100644 --- a/docs/services.rst +++ b/docs/services.rst @@ -61,55 +61,7 @@ Pool Example ------- -.. code-block:: python - - import mitogen - import mitogen.service - - - class FileService(mitogen.service.Service): - """ - Simple file server, for demonstration purposes only! Use of this in - real code would be a security vulnerability as it would permit children - to read arbitrary files from the master's disk. - """ - handle = 500 - required_args = { - 'path': str - } - - def dispatch(self, args, msg): - with open(args['path'], 'r') as fp: - return fp.read() - - - def download_file(context, path): - s = mitogen.service.call(context, FileService.handle, { - 'path': path - }) - - with open(path, 'w') as fp: - fp.write(s) - - - @mitogen.core.takes_econtext - def download_some_files(paths, econtext): - for path in paths: - download_file(econtext.master, path) - - - @mitogen.main() - def main(router): - pool = mitogen.service.Pool(router, size=1, services=[ - FileService(router), - ]) - - remote = router.ssh(hostname='k3') - remote.call(download_some_files, [ - '/etc/passwd', - '/etc/hosts', - ]) - pool.stop() +.. literalinclude:: ../examples/service/self_contained.py Reference diff --git a/examples/service/self_contained.py b/examples/service/self_contained.py new file mode 100644 index 00000000..332aa24e --- /dev/null +++ b/examples/service/self_contained.py @@ -0,0 +1,52 @@ +import mitogen +import mitogen.service + + +class FileService(mitogen.service.Service): + """ + Simple file server, for demonstration purposes only! Use of this in + real code would be a security vulnerability as it would permit children + to read any file from the master's disk. + """ + + @mitogen.service.expose(policy=mitogen.service.AllowAny()) + @mitogen.service.arg_spec(spec={ + 'path': str + }) + def read_file(self, path): + with open(path, 'rb') as fp: + return fp.read() + + +def download_file(source_context, path): + s = source_context.call_service( + service_name=FileService, # may also be string 'pkg.mod.FileService' + method_name='read_file', + path=path, + ) + + with open(path, 'w') as fp: + fp.write(s) + + +def download_some_files(source_context, paths): + for path in paths: + download_file(source_context, path) + + +@mitogen.main() +def main(router): + pool = mitogen.service.Pool(router, services=[ + FileService(router), + ]) + + remote = router.ssh(hostname='k3') + remote.call(download_some_files, + source_context=router.myself(), + paths=[ + '/etc/passwd', + '/etc/hosts', + ] + ) + pool.stop() + From 2d083d19df8e08f5ff274d37fe8200272c47ae55 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 15:55:50 +0100 Subject: [PATCH 057/383] issue #589: remove outdated/incomplete examples --- docs/changelog.rst | 5 +++-- examples/service/client.py | 15 --------------- examples/service/server.py | 20 -------------------- 3 files changed, 3 insertions(+), 37 deletions(-) delete mode 100644 examples/service/client.py delete mode 100644 examples/service/server.py diff --git a/docs/changelog.rst b/docs/changelog.rst index 9f5ed993..6f1b24f5 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -59,8 +59,9 @@ bug reports, testing, features and fixes in this release contributed by `Anton Markelov `_, `Nigel Metheringham `_, `Orion Poplawski `_, -`Ulrich Schreiner `_, and -`Yuki Nishida `_. +`Ulrich Schreiner `_, +`Yuki Nishida `_, and +`@ghp-rr `_. v0.2.7 (2019-05-19) diff --git a/examples/service/client.py b/examples/service/client.py deleted file mode 100644 index fc2d8427..00000000 --- a/examples/service/client.py +++ /dev/null @@ -1,15 +0,0 @@ - -import mitogen.master -import mitogen.unix -import mitogen.service -import mitogen.utils - - -PING = 500 - - -mitogen.utils.log_to_file() - -router, parent = mitogen.unix.connect('/tmp/mitosock') -with router: - print(mitogen.service.call(parent, CONNECT_BY_ID, {})) diff --git a/examples/service/server.py b/examples/service/server.py deleted file mode 100644 index 1f8c1475..00000000 --- a/examples/service/server.py +++ /dev/null @@ -1,20 +0,0 @@ - -# The service framework will fundamentally change (i.e. become much nicer, and -# hopefully lose those hard-coded magic numbers somehow), but meanwhile this is -# a taster of how it looks today. - -import mitogen -import mitogen.service -import mitogen.unix - - -class PingService(mitogen.service.Service): - def dispatch(self, dct, msg): - return 'Hello, world' - - -@mitogen.main() -def main(router): - listener = mitogen.unix.Listener(router, path='/tmp/mitosock') - service = PingService(router) - service.run() From 687d4033d5edc32235141d7829a8aacc1a5bb6c2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 27 May 2019 23:11:17 +0100 Subject: [PATCH 058/383] docs: add new contributor entry --- docs/contributors.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/contributors.rst b/docs/contributors.rst index dcfb50fa..584c4cd4 100644 --- a/docs/contributors.rst +++ b/docs/contributors.rst @@ -88,6 +88,9 @@ sponsorship and outstanding future-thinking of its early adopters.

Private Sponsors

    +
  • SkunkWerks — + Mitogen on FreeBSD runs like a kid in a candy store: fast & + sweet.
  • Donald Clark Jackson — Mitogen is an exciting project, and I am happy to support its development.
  • From 874e75276f6275f037599bb02ae6042d278b15a7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 16:17:50 +0100 Subject: [PATCH 059/383] issue #589: ensure real FileService/PushFileService are in the docs --- docs/services.rst | 5 +++++ mitogen/service.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/services.rst b/docs/services.rst index e5f0571e..bcf89740 100644 --- a/docs/services.rst +++ b/docs/services.rst @@ -83,6 +83,11 @@ Reference .. autoclass:: mitogen.service.Service :members: +.. autoclass:: mitogen.service.FileService + :members: +.. autoclass:: mitogen.service.PushFileService + :members: + .. autoclass:: mitogen.service.Pool :members: diff --git a/mitogen/service.py b/mitogen/service.py index 302e81ab..942ed4f7 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -625,7 +625,7 @@ class PushFileService(Service): """ Push-based file service. Files are delivered and cached in RAM, sent recursively from parent to child. A child that requests a file via - :meth:`get` will block until it has ben delivered by a parent. + :meth:`get` will block until it has been delivered by a parent. This service will eventually be merged into FileService. """ From 8fc491ac4309b6e18b5bf68e6569b18cbd37e040 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 16:17:50 +0100 Subject: [PATCH 060/383] issue #589: ensure real FileService/PushFileService are in the docs --- docs/services.rst | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/services.rst b/docs/services.rst index bcf89740..85b08e6d 100644 --- a/docs/services.rst +++ b/docs/services.rst @@ -83,11 +83,15 @@ Reference .. autoclass:: mitogen.service.Service :members: -.. autoclass:: mitogen.service.FileService - :members: -.. autoclass:: mitogen.service.PushFileService +.. autoclass:: mitogen.service.Pool :members: -.. autoclass:: mitogen.service.Pool + +Built-in Services +----------------- + +.. autoclass:: mitogen.service.FileService :members: +.. autoclass:: mitogen.service.PushFileService + :members: From 95fd9b815c67300188001c82fb68ed22af6ac94b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 18:05:40 +0100 Subject: [PATCH 061/383] travis: exclude docs-master from CI --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 921ad12b..75b0752d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,10 @@ notifications: language: python +branches: + except: + - docs-master + cache: - pip - directories: From 1a32a79fa6343a5c655a0e0cd9d84c79aa11f359 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 20:42:59 +0100 Subject: [PATCH 062/383] issue #578: update Changelog. --- docs/changelog.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 6f1b24f5..9135c48f 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -34,6 +34,9 @@ Enhancements Fixes ^^^^^ +* `#578 `_: the extension could crash + while rendering an error message, due to an incorrect format string. + * `#590 `_: the importer can handle modules that replace themselves in :mod:`sys.modules` during import. @@ -59,6 +62,7 @@ bug reports, testing, features and fixes in this release contributed by `Anton Markelov `_, `Nigel Metheringham `_, `Orion Poplawski `_, +`Szabó Dániel Ernő `_, `Ulrich Schreiner `_, `Yuki Nishida `_, and `@ghp-rr `_. From 418fc15e8017ad2a2a7a3c209c3cdbfb27b9c440 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 20:45:48 +0100 Subject: [PATCH 063/383] tests: allow running without hdrhistograms library. --- tests/ansible/lib/callback/fork_histogram.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/ansible/lib/callback/fork_histogram.py b/tests/ansible/lib/callback/fork_histogram.py index 9ce50e13..15260cb5 100644 --- a/tests/ansible/lib/callback/fork_histogram.py +++ b/tests/ansible/lib/callback/fork_histogram.py @@ -10,7 +10,11 @@ import sys import time import ansible.plugins.callback -import hdrh.histogram + +try: + import hdrh.histogram +except ImportError: + hdrh = None def get_fault_count(who=resource.RUSAGE_CHILDREN): @@ -25,9 +29,9 @@ class CallbackModule(ansible.plugins.callback.CallbackBase): if self.hist is not None: return - self.hist = hdrh.histogram.HdrHistogram(1, int(1e6*60), 3) - self.fork_latency_sum_usec = 0.0 - if 'FORK_HISTOGRAM' in os.environ: + if hdrh and 'FORK_HISTOGRAM' in os.environ: + self.hist = hdrh.histogram.HdrHistogram(1, int(1e6*60), 3) + self.fork_latency_sum_usec = 0.0 self.install() def install(self): @@ -54,7 +58,7 @@ class CallbackModule(ansible.plugins.callback.CallbackBase): self.hist.record_value(latency_usec) def playbook_on_stats(self, stats): - if 'FORK_HISTOGRAM' not in os.environ: + if hdrh is None or 'FORK_HISTOGRAM' not in os.environ: return self_faults = get_fault_count(resource.RUSAGE_SELF) - self.faults_at_start From a766fd3be5f1e58cfc4a6f7486baf9fe75e09e39 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 20:48:46 +0100 Subject: [PATCH 064/383] add .*.pid to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index cf9c084d..be62d308 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ htmlcov/ *.egg-info __pycache__/ extra +tests/ansible/.*.pid From 73a87d425d6712053f81ee0416129f5e6d7ab55a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 20:49:27 +0100 Subject: [PATCH 065/383] ci: try bumping more Travis jobs to Ansible 2.8. --- .travis.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 75b0752d..0f6e7d1c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -49,7 +49,7 @@ matrix: env: MODE=debops_common VER=2.4.6.0 # 2.5.7; 3.6 -> 2.7 - python: "3.6" - env: MODE=debops_common VER=2.6.2 + env: MODE=debops_common VER=2.8.0 # ansible_mitogen tests. @@ -61,14 +61,14 @@ matrix: - python: "2.6" env: MODE=ansible VER=2.4.6.0 - python: "2.6" - env: MODE=ansible VER=2.6.2 + env: MODE=ansible VER=2.8.0 # 3.6 -> {debian, centos6, centos7} - python: "3.6" env: MODE=ansible VER=2.4.6.0 - python: "3.6" - env: MODE=ansible VER=2.6.2 + env: MODE=ansible VER=2.8.0 # Sanity check against vanilla Ansible. One job suffices. - python: "2.7" - env: MODE=ansible VER=2.6.2 DISTROS=debian STRATEGY=linear + env: MODE=ansible VER=2.8.0 DISTROS=debian STRATEGY=linear From 2f05b93a087f1f1a5d3117fbe6ddfaba4b540f71 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 23:42:25 +0100 Subject: [PATCH 066/383] update gitignore again --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index be62d308..55f37f29 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,4 @@ htmlcov/ *.egg-info __pycache__/ extra -tests/ansible/.*.pid +**/.*.pid From 7ae926b325fa1de35552bde506420a624b295ff7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 3 Jun 2019 23:43:15 +0100 Subject: [PATCH 067/383] ansible: prevent tempfile.mkstemp() leaks. This avoids a leak present in Ansible 2.7.0..current HEAD, and all similar leaks. See ansible/ansible#57327. --- ansible_mitogen/runner.py | 41 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py index 05bc55c0..843ffe19 100644 --- a/ansible_mitogen/runner.py +++ b/ansible_mitogen/runner.py @@ -112,6 +112,45 @@ else: for token in shlex.split(str(s), comments=comments)] +class TempFileWatcher(object): + """ + Since Ansible 2.7.0, lineinfile leaks file descriptors returned by + :func:`tempfile.mkstemp` (ansible/ansible#57327). Handle this and all + similar cases by recording descriptors produced by mkstemp during module + execution, and cleaning up any leaked descriptors on completion. + """ + def __init__(self): + self._real_mkstemp = tempfile.mkstemp + # (fd, st.st_dev, st.st_ino) + self._fd_dev_inode = [] + tempfile.mkstemp = self._wrap_mkstemp + + def _wrap_mkstemp(self, *args, **kwargs): + fd, path = self._real_mkstemp(*args, **kwargs) + st = os.fstat(fd) + self._fd_dev_inode.append((fd, st.st_dev, st.st_ino)) + return fd, path + + def revert(self): + tempfile.mkstemp = self._real_mkstemp + for tup in self._fd_dev_inode: + self._revert_one(*tup) + + def _revert_one(self, fd, st_dev, st_ino): + try: + st = os.fstat(fd) + except OSError: + # FD no longer exists. + return + + if not (st.st_dev == st_dev and st.st_ino == st_ino): + # FD reused. + return + + LOG.info("a tempfile.mkstemp() FD was leaked during the last task") + os.close(fd) + + class EnvironmentFileWatcher(object): """ Usually Ansible edits to /etc/environment and ~/.pam_environment are @@ -803,6 +842,7 @@ class NewStyleRunner(ScriptRunner): # module, but this has never been a bug report. Instead act like an # interpreter that had its script piped on stdin. self._argv = TemporaryArgv(['']) + self._temp_watcher = TempFileWatcher() self._importer = ModuleUtilsImporter( context=self.service_context, module_utils=self.module_map['custom'], @@ -818,6 +858,7 @@ class NewStyleRunner(ScriptRunner): def revert(self): self.atexit_wrapper.revert() + self._temp_watcher.revert() self._argv.revert() self._stdio.revert() self._revert_excepthook() From 08e7fe4f80e31f529034c6d9453e6ad3acdcc4fc Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 4 Jun 2019 00:47:15 +0100 Subject: [PATCH 068/383] tests: add 2.8 format async error timeout message --- .../integration/async/runner_with_polling_and_timeout.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ansible/integration/async/runner_with_polling_and_timeout.yml b/tests/ansible/integration/async/runner_with_polling_and_timeout.yml index 6d87fe6c..dcfa186f 100644 --- a/tests/ansible/integration/async/runner_with_polling_and_timeout.yml +++ b/tests/ansible/integration/async/runner_with_polling_and_timeout.yml @@ -20,5 +20,6 @@ - job1.failed == True - | job1.msg == "async task did not complete within the requested time" or + job1.msg == "async task did not complete within the requested time - 1s" or job1.msg == "Job reached maximum time limit of 1 seconds." From ab9a80cfd43dc22fba1f950387db2fb58f0287be Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 4 Jun 2019 00:50:06 +0100 Subject: [PATCH 069/383] ci: Ansible 2.8 requires Python 2.7. --- .travis.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0f6e7d1c..206499c1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,7 +47,7 @@ matrix: # 2.4.6.0; 2.7 -> 2.7 - python: "2.7" env: MODE=debops_common VER=2.4.6.0 - # 2.5.7; 3.6 -> 2.7 + # 2.8.0; 3.6 -> 2.7 - python: "3.6" env: MODE=debops_common VER=2.8.0 @@ -60,7 +60,8 @@ matrix: # 2.6 -> {debian, centos6, centos7} - python: "2.6" env: MODE=ansible VER=2.4.6.0 - - python: "2.6" + # 2.7 -> {debian, centos6, centos7} + - python: "2.7" env: MODE=ansible VER=2.8.0 # 3.6 -> {debian, centos6, centos7} From ad5a80f20097449ea56e39d75d9b7c1609d6ab2a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 4 Jun 2019 13:23:16 +0100 Subject: [PATCH 070/383] Use virtualenv Python for stub connections to workaround problem ../data/stubs/stub-kubectl.py exec -it localhost -- /usr/bin/python -c "...": Traceback (most recent call last): File "", line 1, in LookupError: unknown encoding: base64 It's not clear why this is happening. "stub-kubectl.py" is executed with the 2.7 virtualenv, while the exec() that happens inside stub-kubectl was for "/usr/bin/python". That second Python can't find chunks of its stdlib: stat("/usr/lib/python2.7/encodings/base64", 0x7ffde8744c60) = -1 ENOENT (No such file or directory) open("/usr/lib/python2.7/encodings/base64.so", O_RDONLY) = -1 ENOENT (No such file or directory) open("/usr/lib/python2.7/encodings/base64module.so", O_RDONLY) = -1 ENOENT (No such file or directory) open("/usr/lib/python2.7/encodings/base64.py", O_RDONLY) = -1 ENOENT (No such file or directory) open("/usr/lib/python2.7/encodings/base64.pyc", O_RDONLY) = -1 ENOENT (No such file or directory) write(2, "Traceback (most recent call last):\n", 35) = 35 write(2, " File \"\", line 1, in \n", 39) = 39 --- tests/ansible/integration/stub_connections/kubectl.yml | 1 + tests/ansible/integration/stub_connections/lxc.yml | 1 + tests/ansible/integration/stub_connections/lxd.yml | 1 + tests/ansible/integration/stub_connections/mitogen_doas.yml | 1 + tests/ansible/integration/stub_connections/mitogen_sudo.yml | 1 + tests/ansible/integration/stub_connections/setns_lxc.yml | 1 + tests/ansible/integration/stub_connections/setns_lxd.yml | 1 + 7 files changed, 7 insertions(+) diff --git a/tests/ansible/integration/stub_connections/kubectl.yml b/tests/ansible/integration/stub_connections/kubectl.yml index ba53d1e0..867a8c17 100644 --- a/tests/ansible/integration/stub_connections/kubectl.yml +++ b/tests/ansible/integration/stub_connections/kubectl.yml @@ -13,6 +13,7 @@ - custom_python_detect_environment: vars: ansible_connection: kubectl + ansible_python_interpreter: python # avoid Travis virtualenv breakage mitogen_kubectl_path: stub-kubectl.py register: out diff --git a/tests/ansible/integration/stub_connections/lxc.yml b/tests/ansible/integration/stub_connections/lxc.yml index 7a2cd81c..1dbe2a48 100644 --- a/tests/ansible/integration/stub_connections/lxc.yml +++ b/tests/ansible/integration/stub_connections/lxc.yml @@ -10,6 +10,7 @@ - custom_python_detect_environment: vars: ansible_connection: lxc + ansible_python_interpreter: python # avoid Travis virtualenv breakage mitogen_lxc_attach_path: stub-lxc-attach.py register: out diff --git a/tests/ansible/integration/stub_connections/lxd.yml b/tests/ansible/integration/stub_connections/lxd.yml index 86f4b185..7839a35f 100644 --- a/tests/ansible/integration/stub_connections/lxd.yml +++ b/tests/ansible/integration/stub_connections/lxd.yml @@ -10,6 +10,7 @@ - custom_python_detect_environment: vars: ansible_connection: lxd + ansible_python_interpreter: python # avoid Travis virtualenv breakage mitogen_lxc_path: stub-lxc.py register: out diff --git a/tests/ansible/integration/stub_connections/mitogen_doas.yml b/tests/ansible/integration/stub_connections/mitogen_doas.yml index 3c1459e9..5387744e 100644 --- a/tests/ansible/integration/stub_connections/mitogen_doas.yml +++ b/tests/ansible/integration/stub_connections/mitogen_doas.yml @@ -10,6 +10,7 @@ - custom_python_detect_environment: vars: ansible_connection: mitogen_doas + ansible_python_interpreter: python # avoid Travis virtualenv breakage ansible_doas_exe: stub-doas.py ansible_user: someuser register: out diff --git a/tests/ansible/integration/stub_connections/mitogen_sudo.yml b/tests/ansible/integration/stub_connections/mitogen_sudo.yml index b7ca3d26..e78afebc 100644 --- a/tests/ansible/integration/stub_connections/mitogen_sudo.yml +++ b/tests/ansible/integration/stub_connections/mitogen_sudo.yml @@ -10,6 +10,7 @@ - custom_python_detect_environment: vars: ansible_connection: mitogen_sudo + ansible_python_interpreter: python # avoid Travis virtualenv breakage ansible_user: root ansible_become_exe: stub-sudo.py ansible_become_flags: -H --type=sometype --role=somerole diff --git a/tests/ansible/integration/stub_connections/setns_lxc.yml b/tests/ansible/integration/stub_connections/setns_lxc.yml index c57a8c5c..efef3761 100644 --- a/tests/ansible/integration/stub_connections/setns_lxc.yml +++ b/tests/ansible/integration/stub_connections/setns_lxc.yml @@ -18,6 +18,7 @@ -i localhost, -c setns -e mitogen_kind=lxc + -e ansible_python_interpreter=python -e mitogen_lxc_info_path={{git_basedir}}/tests/data/stubs/stub-lxc-info.py -m shell -a "echo hi" diff --git a/tests/ansible/integration/stub_connections/setns_lxd.yml b/tests/ansible/integration/stub_connections/setns_lxd.yml index 7db47661..adee0b14 100644 --- a/tests/ansible/integration/stub_connections/setns_lxd.yml +++ b/tests/ansible/integration/stub_connections/setns_lxd.yml @@ -18,6 +18,7 @@ -i localhost, -c setns -e mitogen_kind=lxd + -e ansible_python_interpreter=python -e mitogen_lxc_path={{git_basedir}}/tests/data/stubs/stub-lxc.py -m shell -a "echo hi" From d981a382c97ee86f276d9f2944da82347135e5b6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 4 Jun 2019 01:34:42 +0100 Subject: [PATCH 071/383] ci: work around various broken aspects of Travis VM image - Symlink broken Ubuntu Python package pieces back together. See many Google hits about this issue. - Remove apt sources that can no longer be updated. --- .ci/ansible_tests.py | 2 ++ .travis.yml | 1 + 2 files changed, 3 insertions(+) diff --git a/.ci/ansible_tests.py b/.ci/ansible_tests.py index 8d2d8bba..51eab874 100755 --- a/.ci/ansible_tests.py +++ b/.ci/ansible_tests.py @@ -63,6 +63,8 @@ with ci_lib.Fold('job_setup'): run("sudo apt-get update") run("sudo apt-get install -y sshpass") + run("bash -c 'sudo ln -vfs /usr/lib/python2.7/plat-x86_64-linux-gnu/_sysconfigdata_nd.py /usr/lib/python2.7 || true'") + run("bash -c 'sudo ln -vfs /usr/lib/python2.7/plat-x86_64-linux-gnu/_sysconfigdata_nd.py $VIRTUAL_ENV/lib/python2.7 || true'") with ci_lib.Fold('ansible'): playbook = os.environ.get('PLAYBOOK', 'all.yml') diff --git a/.travis.yml b/.travis.yml index 206499c1..eae04cb0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,7 @@ cache: - /home/travis/virtualenv install: +- grep -Erl git-lfs\|couchdb /etc/apt | sudo xargs rm -v - .ci/${MODE}_install.py script: From ee7dae75146be984abf68279151311b1b8e3dc73 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 4 Jun 2019 14:19:08 +0100 Subject: [PATCH 072/383] ci: Another round of fixes for random Ansible UI breakage in 2.7/2.8 --- .../runner/custom_binary_single_null.yml | 4 ++-- ...custom_python_new_style_missing_interpreter.yml | 14 +++++++------- .../runner/custom_python_new_style_module.yml | 14 +++++++------- .../custom_python_new_style_missing_interpreter.py | 4 ++++ .../lib/modules/custom_python_new_style_module.py | 4 ++++ 5 files changed, 24 insertions(+), 16 deletions(-) diff --git a/tests/ansible/integration/runner/custom_binary_single_null.yml b/tests/ansible/integration/runner/custom_binary_single_null.yml index d8a1af0c..8e215bf3 100644 --- a/tests/ansible/integration/runner/custom_binary_single_null.yml +++ b/tests/ansible/integration/runner/custom_binary_single_null.yml @@ -17,8 +17,8 @@ - "out.results[0].msg.startswith('MODULE FAILURE')" - "out.results[0].module_stdout.startswith('/bin/sh: ')" - | - out.results[0].module_stdout.endswith('/custom_binary_single_null: cannot execute binary file\r\n') or - out.results[0].module_stdout.endswith('/custom_binary_single_null: Exec format error\r\n') + out.results[0].module_stdout.endswith('custom_binary_single_null: cannot execute binary file\r\n') or + out.results[0].module_stdout.endswith('custom_binary_single_null: Exec format error\r\n') # Can't test this: Mitogen returns 126, 2.5.x returns 126, 2.4.x discarded the diff --git a/tests/ansible/integration/runner/custom_python_new_style_missing_interpreter.yml b/tests/ansible/integration/runner/custom_python_new_style_missing_interpreter.yml index 9f7d08ba..77f2cb5c 100644 --- a/tests/ansible/integration/runner/custom_python_new_style_missing_interpreter.yml +++ b/tests/ansible/integration/runner/custom_python_new_style_missing_interpreter.yml @@ -5,13 +5,13 @@ tasks: - custom_python_new_style_missing_interpreter: foo: true - with_sequence: start=1 end={{end|default(1)}} + with_sequence: start=0 end={{end|default(1)}} register: out - assert: - that: | - (not out.changed) and - (not out.results[0].changed) and - out.results[0].input[0].ANSIBLE_MODULE_ARGS.foo and - out.results[0].msg == 'Here is my input' - + that: + - "not out.changed" + - "not out.results[0].changed" + # Random breaking interface change since 2.7.x + #- "out.results[0].input[0].ANSIBLE_MODULE_ARGS.foo" + - "out.results[0].msg == 'Here is my input'" diff --git a/tests/ansible/integration/runner/custom_python_new_style_module.yml b/tests/ansible/integration/runner/custom_python_new_style_module.yml index d86bff4a..0d29d0ac 100644 --- a/tests/ansible/integration/runner/custom_python_new_style_module.yml +++ b/tests/ansible/integration/runner/custom_python_new_style_module.yml @@ -4,16 +4,16 @@ tasks: - custom_python_new_style_module: foo: true - with_sequence: start=1 end={{end|default(1)}} + with_sequence: start=0 end={{end|default(1)}} register: out - assert: - that: | - (not out.changed) and - (not out.results[0].changed) and - out.results[0].input[0].ANSIBLE_MODULE_ARGS.foo and - out.results[0].msg == 'Here is my input' - + that: + - "not out.changed" + - "not out.results[0].changed" + # Random breaking interface change since 2.7.x + #- "out.results[0].input[0].ANSIBLE_MODULE_ARGS.foo" + - "out.results[0].msg == 'Here is my input'" # Verify sys.argv is not Unicode. - custom_python_detect_environment: diff --git a/tests/ansible/lib/modules/custom_python_new_style_missing_interpreter.py b/tests/ansible/lib/modules/custom_python_new_style_missing_interpreter.py index 66264010..eea4baa4 100644 --- a/tests/ansible/lib/modules/custom_python_new_style_missing_interpreter.py +++ b/tests/ansible/lib/modules/custom_python_new_style_missing_interpreter.py @@ -17,3 +17,7 @@ print(" \"changed\": false,") print(" \"msg\": \"Here is my input\",") print(" \"input\": [%s]" % (input_json,)) print("}") + +# Ansible since 2.7.0/52449cc01a7 broke __file__ and *requires* the module +# process to exit itself. So needless. +sys.exit(0) diff --git a/tests/ansible/lib/modules/custom_python_new_style_module.py b/tests/ansible/lib/modules/custom_python_new_style_module.py index 70ee062d..f9c176c1 100755 --- a/tests/ansible/lib/modules/custom_python_new_style_module.py +++ b/tests/ansible/lib/modules/custom_python_new_style_module.py @@ -23,3 +23,7 @@ print(" \"__package__\": \"%s\"," % (__package__,)) print(" \"msg\": \"Here is my input\",") print(" \"input\": [%s]" % (input_json,)) print("}") + +# Ansible since 2.7.0/52449cc01a7 broke __file__ and *requires* the module +# process to exit itself. So needless. +sys.exit(0) From 1a02a863313fd99ea1d27d11190efa008cec8353 Mon Sep 17 00:00:00 2001 From: Jordan Webb Date: Sat, 8 Jun 2019 18:06:30 -0500 Subject: [PATCH 073/383] Add buildah transport --- ansible_mitogen/connection.py | 15 ++++ .../plugins/connection/mitogen_buildah.py | 44 +++++++++++ ansible_mitogen/strategy.py | 4 +- ansible_mitogen/transport_config.py | 12 +++ mitogen/buildah.py | 73 +++++++++++++++++++ mitogen/core.py | 1 + mitogen/parent.py | 3 + 7 files changed, 150 insertions(+), 2 deletions(-) create mode 100644 ansible_mitogen/plugins/connection/mitogen_buildah.py create mode 100644 mitogen/buildah.py diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index 1b6a2e9a..42fa2ef8 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -154,6 +154,20 @@ def _connect_ssh(spec): } } +def _connect_buildah(spec): + """ + Return ContextService arguments for a Buildah connection. + """ + return { + 'method': 'buildah', + 'kwargs': { + 'username': spec.remote_user(), + 'container': spec.remote_addr(), + 'python_path': spec.python_path(), + 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), + 'remote_name': get_remote_name(spec), + } + } def _connect_docker(spec): """ @@ -373,6 +387,7 @@ def _connect_mitogen_doas(spec): #: generating ContextService keyword arguments matching a connection #: specification. CONNECTION_METHOD = { + 'buildah': _connect_buildah, 'docker': _connect_docker, 'kubectl': _connect_kubectl, 'jail': _connect_jail, diff --git a/ansible_mitogen/plugins/connection/mitogen_buildah.py b/ansible_mitogen/plugins/connection/mitogen_buildah.py new file mode 100644 index 00000000..017214b2 --- /dev/null +++ b/ansible_mitogen/plugins/connection/mitogen_buildah.py @@ -0,0 +1,44 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import +import os.path +import sys + +try: + import ansible_mitogen +except ImportError: + base_dir = os.path.dirname(__file__) + sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) + del base_dir + +import ansible_mitogen.connection + + +class Connection(ansible_mitogen.connection.Connection): + transport = 'buildah' diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index b6b9aaf2..01dff285 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -139,8 +139,8 @@ def wrap_connection_loader__get(name, *args, **kwargs): While the strategy is active, rewrite connection_loader.get() calls for some transports into requests for a compatible Mitogen transport. """ - if name in ('docker', 'kubectl', 'jail', 'local', 'lxc', - 'lxd', 'machinectl', 'setns', 'ssh'): + if name in ('buildah', 'docker', 'kubectl', 'jail', 'local', + 'lxc', 'lxd', 'machinectl', 'setns', 'ssh'): name = 'mitogen_' + name return connection_loader__get(name, *args, **kwargs) diff --git a/ansible_mitogen/transport_config.py b/ansible_mitogen/transport_config.py index 27e368d4..aa4a16d0 100644 --- a/ansible_mitogen/transport_config.py +++ b/ansible_mitogen/transport_config.py @@ -240,6 +240,12 @@ class Spec(with_metaclass(abc.ABCMeta, object)): undesirable in some circumstances. """ + @abc.abstractmethod + def mitogen_buildah_path(self): + """ + The path to the "buildah" program for the 'buildah' transport. + """ + @abc.abstractmethod def mitogen_docker_path(self): """ @@ -424,6 +430,9 @@ class PlayContextSpec(Spec): def mitogen_mask_remote_name(self): return self._connection.get_task_var('mitogen_mask_remote_name') + def mitogen_buildah_path(self): + return self._connection.get_task_var('mitogen_buildah_path') + def mitogen_docker_path(self): return self._connection.get_task_var('mitogen_docker_path') @@ -647,6 +656,9 @@ class MitogenViaSpec(Spec): def mitogen_mask_remote_name(self): return self._host_vars.get('mitogen_mask_remote_name') + def mitogen_buildah_path(self): + return self._host_vars.get('mitogen_buildah_path') + def mitogen_docker_path(self): return self._host_vars.get('mitogen_docker_path') diff --git a/mitogen/buildah.py b/mitogen/buildah.py new file mode 100644 index 00000000..eec415f3 --- /dev/null +++ b/mitogen/buildah.py @@ -0,0 +1,73 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import logging + +import mitogen.core +import mitogen.parent + + +LOG = logging.getLogger(__name__) + + +class Stream(mitogen.parent.Stream): + child_is_immediate_subprocess = False + + container = None + username = None + buildah_path = 'buildah' + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + + def construct(self, container=None, + buildah_path=None, username=None, + **kwargs): + assert container or image + super(Stream, self).construct(**kwargs) + if container: + self.container = container + if buildah_path: + self.buildah_path = buildah_path + if username: + self.username = username + + def _get_name(self): + return u'buildah.' + self.container + + def get_boot_command(self): + args = [] + if self.username: + args += ['--user=' + self.username] + bits = [self.buildah_path, 'run'] + args + ['--', self.container] + + return bits + super(Stream, self).get_boot_command() diff --git a/mitogen/core.py b/mitogen/core.py index 0d88d7f0..ea83f961 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1089,6 +1089,7 @@ class Importer(object): # The Mitogen package is handled specially, since the child context must # construct it manually during startup. MITOGEN_PKG_CONTENT = [ + 'buildah', 'compat', 'debug', 'doas', diff --git a/mitogen/parent.py b/mitogen/parent.py index 3d02bc43..113fdc2e 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2163,6 +2163,9 @@ class Router(mitogen.core.Router): self._write_lock.release() return context + def buildah(self, **kwargs): + return self.connect(u'buildah', **kwargs) + def doas(self, **kwargs): return self.connect(u'doas', **kwargs) From d71bdd1694dae37299b9fe1273a1eeaf9edb9259 Mon Sep 17 00:00:00 2001 From: Jordan Webb Date: Mon, 10 Jun 2019 13:17:54 -0500 Subject: [PATCH 074/383] Add buildah test and stub --- tests/buildah_test.py | 28 ++++++++++++++++++++++++++++ tests/data/stubs/stub-buildah.py | 8 ++++++++ 2 files changed, 36 insertions(+) create mode 100644 tests/buildah_test.py create mode 100755 tests/data/stubs/stub-buildah.py diff --git a/tests/buildah_test.py b/tests/buildah_test.py new file mode 100644 index 00000000..dad2534f --- /dev/null +++ b/tests/buildah_test.py @@ -0,0 +1,28 @@ +import os + +import mitogen + +import unittest2 + +import testlib + + +class ConstructorTest(testlib.RouterMixin, testlib.TestCase): + def test_okay(self): + buildah_path = testlib.data_path('stubs/stub-buildah.py') + context = self.router.buildah( + container='container_name', + buildah_path=buildah_path, + ) + stream = self.router.stream_by_id(context.context_id) + + argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV')) + self.assertEquals(argv[0], buildah_path) + self.assertEquals(argv[1], 'run') + self.assertEquals(argv[2], '--') + self.assertEquals(argv[3], 'container_name') + self.assertEquals(argv[4], stream.python_path) + + +if __name__ == '__main__': + unittest2.main() diff --git a/tests/data/stubs/stub-buildah.py b/tests/data/stubs/stub-buildah.py new file mode 100755 index 00000000..558f117a --- /dev/null +++ b/tests/data/stubs/stub-buildah.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python + +import sys +import os + +os.environ['ORIGINAL_ARGV'] = repr(sys.argv) +os.environ['THIS_IS_STUB_BUILDAH'] = '1' +os.execv(sys.executable, sys.argv[sys.argv.index('--') + 2:]) From 2fbc77a1559e1f9c2b6efcac0289e1a90bf39264 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 9 Mar 2019 00:21:25 +0000 Subject: [PATCH 075/383] issue #170: implement timers. --- mitogen/core.py | 18 +++++ mitogen/master.py | 1 + mitogen/parent.py | 70 ++++++++++++++--- tests/broker_test.py | 1 + tests/timer_test.py | 174 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 254 insertions(+), 10 deletions(-) create mode 100644 tests/timer_test.py diff --git a/mitogen/core.py b/mitogen/core.py index ea83f961..36b4c74c 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2836,6 +2836,11 @@ class Router(object): self.broker.defer(self._async_route, msg) +class NullTimerList(object): + def get_timeout(self): + return None + + class Broker(object): """ Responsible for handling I/O multiplexing in a private thread. @@ -2847,6 +2852,10 @@ class Broker(object): _waker = None _thread = None + # :func:`mitogen.parent._upgrade_broker` replaces this with + # :class:`mitogen.parent.TimerList` during upgrade. + timers = NullTimerList() + #: Seconds grace to allow :class:`streams ` to shutdown gracefully #: before force-disconnecting them during :meth:`shutdown`. shutdown_timeout = 3.0 @@ -2975,10 +2984,19 @@ class Broker(object): """ _vv and IOLOG.debug('%r._loop_once(%r, %r)', self, timeout, self.poller) + + timer_to = self.timers.get_timeout() + if timeout is None: + timeout = timer_to + elif timer_to is not None and timer_to < timeout: + timeout = timer_to + #IOLOG.debug('readers =\n%s', pformat(self.poller.readers)) #IOLOG.debug('writers =\n%s', pformat(self.poller.writers)) for side, func in self.poller.poll(timeout): self._call(side.stream, func) + if timer_to is not None: + self.timers.expire() def _broker_exit(self): """ diff --git a/mitogen/master.py b/mitogen/master.py index fb4f505b..2db78ba0 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -1061,6 +1061,7 @@ class Broker(mitogen.core.Broker): on_join=self.shutdown, ) super(Broker, self).__init__() + self.timers = mitogen.parent.TimerList() def shutdown(self): super(Broker, self).shutdown() diff --git a/mitogen/parent.py b/mitogen/parent.py index 113fdc2e..2c8eab8a 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -38,6 +38,7 @@ import codecs import errno import fcntl import getpass +import heapq import inspect import logging import os @@ -577,6 +578,52 @@ def write_all(fd, s, deadline=None): poller.close() +class Timer(object): + """ + Represents an unexpired timed callback. + """ + def __init__(self, timer_list, when, func): + self.timer_list = timer_list + self.when = when + self.func = func + self.cancelled = False + + def __eq__(self, other): + return self.when == other.when + + def __lt__(self, other): + return self.when < other.when + + def cancel(self): + self.cancelled = True + + +class TimerList(object): + """ + Represent a series of future events. + """ + _now = time.time + + def __init__(self): + self._lst = [] + + def get_timeout(self): + if self._lst: + return max(0, self._lst[0].when - self._now()) + + def schedule(self, when, func): + timer = Timer(self, when, func) + heapq.heappush(self._lst, timer) + return timer + + def expire(self): + now = self._now() + while self._lst and self._lst[0].when <= now: + timer = heapq.heappop(self._lst) + if not timer.cancelled: + timer.func() + + class PartialZlib(object): """ Because the mitogen.core source has a line appended to it during bootstrap, @@ -726,17 +773,20 @@ def _upgrade_broker(broker): root = logging.getLogger() old_level = root.level root.setLevel(logging.CRITICAL) + try: + old = broker.poller + new = PREFERRED_POLLER() + for fd, data in old.readers: + new.start_receive(fd, data) + for fd, data in old.writers: + new.start_transmit(fd, data) + + old.close() + broker.poller = new + finally: + root.setLevel(old_level) - old = broker.poller - new = PREFERRED_POLLER() - for fd, data in old.readers: - new.start_receive(fd, data) - for fd, data in old.writers: - new.start_transmit(fd, data) - - old.close() - broker.poller = new - root.setLevel(old_level) + broker.timer_list = TimerList() LOG.debug('replaced %r with %r (new: %d readers, %d writers; ' 'old: %d readers, %d writers)', old, new, len(new.readers), len(new.writers), diff --git a/tests/broker_test.py b/tests/broker_test.py index 23839a54..2212d8aa 100644 --- a/tests/broker_test.py +++ b/tests/broker_test.py @@ -1,4 +1,5 @@ +import time import threading import mock diff --git a/tests/timer_test.py b/tests/timer_test.py new file mode 100644 index 00000000..e4637ed5 --- /dev/null +++ b/tests/timer_test.py @@ -0,0 +1,174 @@ + +import time + +import mock +import unittest2 + +import mitogen.core +import mitogen.parent + +import testlib + + +class TimerListMixin(object): + klass = mitogen.parent.TimerList + + def setUp(self): + self.list = self.klass() + + +class GetTimeoutTest(TimerListMixin, testlib.TestCase): + def test_empty(self): + self.assertEquals(None, self.list.get_timeout()) + + def test_one_event(self): + self.list.schedule(2, lambda: None) + self.list._now = lambda: 1 + self.assertEquals(1, self.list.get_timeout()) + + def test_two_events_same_moment(self): + self.list.schedule(2, lambda: None) + self.list.schedule(2, lambda: None) + self.list._now = lambda: 1 + self.assertEquals(1, self.list.get_timeout()) + + def test_two_events(self): + self.list.schedule(2, lambda: None) + self.list.schedule(3, lambda: None) + self.list._now = lambda: 1 + self.assertEquals(1, self.list.get_timeout()) + + def test_two_events_expired(self): + self.list.schedule(2, lambda: None) + self.list.schedule(3, lambda: None) + self.list._now = lambda: 3 + self.assertEquals(0, self.list.get_timeout()) + + def test_two_events_in_past(self): + self.list.schedule(2, lambda: None) + self.list.schedule(3, lambda: None) + self.list._now = lambda: 30 + self.assertEquals(0, self.list.get_timeout()) + + def test_two_events_in_past(self): + self.list.schedule(2, lambda: None) + self.list.schedule(3, lambda: None) + self.list._now = lambda: 30 + self.assertEquals(0, self.list.get_timeout()) + + +class ScheduleTest(TimerListMixin, testlib.TestCase): + def test_in_past(self): + self.list._now = lambda: 30 + timer = self.list.schedule(29, lambda: None) + self.assertEquals(29, timer.when) + self.assertEquals(0, self.list.get_timeout()) + + def test_in_future(self): + self.list._now = lambda: 30 + timer = self.list.schedule(31, lambda: None) + self.assertEquals(31, timer.when) + self.assertEquals(1, self.list.get_timeout()) + + def test_same_moment(self): + self.list._now = lambda: 30 + timer = self.list.schedule(31, lambda: None) + timer2 = self.list.schedule(31, lambda: None) + self.assertEquals(31, timer.when) + self.assertEquals(31, timer2.when) + self.assertTrue(timer is not timer2) + self.assertEquals(1, self.list.get_timeout()) + + +class ExpireTest(TimerListMixin, testlib.TestCase): + def test_in_past(self): + timer = self.list.schedule(29, mock.Mock()) + self.list._now = lambda: 30 + self.list.expire() + self.assertEquals(1, len(timer.func.mock_calls)) + + def test_in_future(self): + timer = self.list.schedule(29, mock.Mock()) + self.list._now = lambda: 28 + self.list.expire() + self.assertEquals(0, len(timer.func.mock_calls)) + + def test_same_moment(self): + timer = self.list.schedule(29, mock.Mock()) + timer2 = self.list.schedule(29, mock.Mock()) + self.list._now = lambda: 29 + self.list.expire() + self.assertEquals(1, len(timer.func.mock_calls)) + self.assertEquals(1, len(timer2.func.mock_calls)) + + def test_cancelled(self): + self.list._now = lambda: 29 + timer = self.list.schedule(29, mock.Mock()) + timer.cancel() + self.assertEquals(0, self.list.get_timeout()) + self.list._now = lambda: 29 + self.list.expire() + self.assertEquals(0, len(timer.func.mock_calls)) + self.assertEquals(None, self.list.get_timeout()) + + +class CancelTest(TimerListMixin, testlib.TestCase): + def test_single_cancel(self): + self.list._now = lambda: 29 + timer = self.list.schedule(29, mock.Mock()) + timer.cancel() + self.list.expire() + self.assertEquals(0, len(timer.func.mock_calls)) + + def test_double_cancel(self): + self.list._now = lambda: 29 + timer = self.list.schedule(29, mock.Mock()) + timer.cancel() + timer.cancel() + self.list.expire() + self.assertEquals(0, len(timer.func.mock_calls)) + + +@mitogen.core.takes_econtext +def do_timer_test_econtext(econtext): + do_timer_test(econtext.broker) + + +def do_timer_test(broker): + now = time.time() + latch = mitogen.core.Latch() + broker.defer(lambda: + broker.timers.schedule( + now + 0.250, + lambda: latch.put('hi'), + ) + ) + + assert 'hi' == latch.get() + assert time.time() > (now + 0.250) + + +class BrokerTimerTest(testlib.TestCase): + klass = mitogen.master.Broker + + def test_call_later(self): + broker = self.klass() + try: + do_timer_test(broker) + finally: + broker.shutdown() + broker.join() + + def test_child_upgrade(self): + router = mitogen.master.Router() + try: + c = router.local() + c.call(mitogen.parent.upgrade_router) + c.call(do_timer_test_econtext) + finally: + router.broker.shutdown() + router.broker.join() + + +if __name__ == '__main__': + unittest2.main() From 870e0b6e2d6834a10e6d298a7db91f28e3a87ac0 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 9 Mar 2019 00:21:25 +0000 Subject: [PATCH 076/383] issue #170: add timers to internals.rst. --- docs/internals.rst | 102 +++++++++++++++++++++++---------------------- 1 file changed, 53 insertions(+), 49 deletions(-) diff --git a/docs/internals.rst b/docs/internals.rst index e1dd4a41..96f9269c 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -15,46 +15,49 @@ Constants .. autodata:: CHUNK_SIZE -Poller Classes -============== +Pollers +======= .. currentmodule:: mitogen.core .. autoclass:: Poller - :members: + :members: + +.. currentmodule:: mitogen.parent +.. autoclass:: KqueuePoller .. currentmodule:: mitogen.parent .. autoclass:: EpollPoller .. currentmodule:: mitogen.parent -.. autoclass:: KqueuePoller +.. autoclass:: PollPoller -Latch Class -=========== +Latch +===== .. currentmodule:: mitogen.core .. autoclass:: Latch :members: -PidfulStreamHandler Class -========================= +PidfulStreamHandler +=================== .. currentmodule:: mitogen.core .. autoclass:: PidfulStreamHandler :members: -Side Class -========== +Side +==== .. currentmodule:: mitogen.core .. autoclass:: Side :members: -Stream Classes -============== +Stream +====== .. currentmodule:: mitogen.core .. autoclass:: BasicStream @@ -79,42 +82,24 @@ Stream Classes .. autoclass:: Stream :members: - -Other Stream Subclasses -======================= - .. currentmodule:: mitogen.core - .. autoclass:: IoLogger :members: +.. currentmodule:: mitogen.core .. autoclass:: Waker :members: -Poller Class -============ - -.. currentmodule:: mitogen.core -.. autoclass:: Poller - :members: - -.. currentmodule:: mitogen.parent -.. autoclass:: KqueuePoller - -.. currentmodule:: mitogen.parent -.. autoclass:: EpollPoller - - -Importer Class -============== +Importer +======== .. currentmodule:: mitogen.core .. autoclass:: Importer :members: -Responder Class +ModuleResponder =============== .. currentmodule:: mitogen.master @@ -122,40 +107,59 @@ Responder Class :members: -RouteMonitor Class -================== +RouteMonitor +============ .. currentmodule:: mitogen.parent .. autoclass:: RouteMonitor :members: -Forwarder Class -=============== +TimerList +========= + +.. currentmodule:: mitogen.parent +.. autoclass:: TimerList + :members: + + +Timer +===== + +.. currentmodule:: mitogen.parent +.. autoclass:: Timer + :members: + + +Forwarder +========= .. currentmodule:: mitogen.parent .. autoclass:: ModuleForwarder :members: -ExternalContext Class -===================== +ExternalContext +=============== .. currentmodule:: mitogen.core .. autoclass:: ExternalContext :members: -mitogen.master -============== +Process +======= .. currentmodule:: mitogen.parent -.. autoclass:: ProcessMonitor +.. autoclass:: Process :members: -Blocking I/O Functions -====================== +Helpers +======= + +Blocking I/O +------------ These functions exist to support the blocking phase of setting up a new context. They will eventually be replaced with asynchronous equivalents. @@ -167,8 +171,8 @@ context. They will eventually be replaced with asynchronous equivalents. .. autofunction:: write_all -Subprocess Creation Functions -============================= +Subprocess Functions +------------ .. currentmodule:: mitogen.parent .. autofunction:: create_child @@ -176,8 +180,8 @@ Subprocess Creation Functions .. autofunction:: tty_create_child -Helper Functions -================ +Helpers +------- .. currentmodule:: mitogen.core .. autofunction:: to_text From 77564fdfe2cc9fb6cd794fc62fab4dd5310ef508 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 9 Mar 2019 00:21:25 +0000 Subject: [PATCH 077/383] issue #170: update Changelog; closes #170. --- docs/changelog.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 9135c48f..43d30456 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -102,6 +102,14 @@ Fixes potential influx of 2.8-related bug reports. +Core Library +~~~~~~~~~~~~ + +* `#170 `_: to better support child + process management and a future asynchronous connect implementation, a + :class:`mitogen.parent.TimerList` API is available. + + Thanks! ~~~~~~~ From a5536c3514517af3bc10260e880188cdedcbcacd Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 9 Mar 2019 00:53:16 +0000 Subject: [PATCH 078/383] core: eliminate some quadratric behaviour from IoLogger This is the same problem as used to afflict Stream: large input buffers containing many small messages cause intense string copying. This eliminates the worst of it. --- mitogen/core.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 36b4c74c..fc578c0b 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2405,7 +2405,7 @@ class IoLogger(BasicStream): :class:`BasicStream` subclass that sets up redirection of a standard UNIX file descriptor back into the Python :mod:`logging` package. """ - _buf = '' + _trailer = u'' def __init__(self, broker, name, dest_fd): self._broker = broker @@ -2427,10 +2427,17 @@ class IoLogger(BasicStream): def __repr__(self): return '' % (self._name,) - def _log_lines(self): - while self._buf.find('\n') != -1: - line, _, self._buf = str_partition(self._buf, '\n') - self._log.info('%s', line.rstrip('\n')) + def _log_lines(self, buf): + start = 0 + while True: + nl = min(buf.find('\n', start), start+1024) + if nl == -1: + break + self._log.info('%s', buf[start:nl]) + start = nl + 1 + + if start: + self._trailer = buf[start:] def on_shutdown(self, broker): """Shut down the write end of the logging socket.""" @@ -2447,8 +2454,7 @@ class IoLogger(BasicStream): if not buf: return self.on_disconnect(broker) - self._buf += buf.decode('latin1') - self._log_lines() + self._log_lines(self._trailer + buf.decode('latin1')) class Router(object): From 3aded0ca95c79d24dab3316b19d39d4b0231fdc4 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 9 Mar 2019 00:21:25 +0000 Subject: [PATCH 079/383] issue #170: add TimerList docstrings. --- mitogen/parent.py | 47 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 2c8eab8a..780fe98b 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -580,7 +580,7 @@ def write_all(fd, s, deadline=None): class Timer(object): """ - Represents an unexpired timed callback. + Represents a future event. """ def __init__(self, timer_list, when, func): self.timer_list = timer_list @@ -595,12 +595,32 @@ class Timer(object): return self.when < other.when def cancel(self): + """ + Cancel this event. If it has not yet executed, it will not execute + during any subsequent :meth:`TimerList.expire` call. + """ self.cancelled = True class TimerList(object): """ - Represent a series of future events. + Efficiently manage a list of cancellable future events relative to wall + clock time. An instance of this class is installed as + :attr:`mitogen.master.Broker.timers` by default, and installed as + :attr:`mitogen.core.Broker.timers` in a child context after a call to + :func:`mitogen.parent.upgrade_router`. + + You can use :class:`TimerList` to cause the broker to wake at arbitrary + future moments, useful for implementing timeouts and polling in an + asynchronous context. + + :class:`TimerList` methods can only be called from asynchronous context, + for example via :meth:`mitogen.core.Broker.defer`. + + The broker automatically adjusts its sleep delay according to the installed + timer list, and arranges for timers to expire via automatic calls to + :meth:`expire`. The main user interface to :class:`TimerList` is + :meth:`schedule`. """ _now = time.time @@ -608,15 +628,36 @@ class TimerList(object): self._lst = [] def get_timeout(self): + """ + Return the floating point seconds until the next event is due. + + :returns: + Floating point delay, or 0.0, or :data:`None` if no events are + scheduled. + """ if self._lst: return max(0, self._lst[0].when - self._now()) def schedule(self, when, func): + """ + Schedule a new future event. + + :param float when: + UNIX time in seconds when event should occur. + :param callable func: + Callable to invoke on expiry. + :returns: + A :class:`Timer` instance, exposing :meth:`Timer.cancel`, which may + be used to cancel the future invocation. + """ timer = Timer(self, when, func) heapq.heappush(self._lst, timer) return timer def expire(self): + """ + Invoke callbacks for any events in the past. + """ now = self._now() while self._lst and self._lst[0].when <= now: timer = heapq.heappop(self._lst) @@ -786,7 +827,7 @@ def _upgrade_broker(broker): finally: root.setLevel(old_level) - broker.timer_list = TimerList() + broker.timers = TimerList() LOG.debug('replaced %r with %r (new: %d readers, %d writers; ' 'old: %d readers, %d writers)', old, new, len(new.readers), len(new.writers), From f43f886c37026f6644a802efc5e310fb7414266c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 9 Mar 2019 01:12:14 +0000 Subject: [PATCH 080/383] parent: various style cleanups, remove unused function. --- mitogen/parent.py | 87 ++++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 47 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 780fe98b..12d71e4f 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -137,6 +137,9 @@ SIGNAL_BY_NUM = dict( if name.startswith('SIG') and not name.startswith('SIG_') ) +_core_source_lock = threading.Lock() +_core_source_partial = None + def get_log_level(): return (LOG.level or logging.getLogger().level or logging.INFO) @@ -158,10 +161,6 @@ def get_sys_executable(): return '/usr/bin/python' -_core_source_lock = threading.Lock() -_core_source_partial = None - - def _get_core_source(): """ In non-masters, simply fetch the cached mitogen.core source code via the @@ -213,8 +212,10 @@ def is_immediate_child(msg, stream): def flags(names): - """Return the result of ORing a set of (space separated) :py:mod:`termios` - module constants together.""" + """ + Return the result of ORing a set of (space separated) :py:mod:`termios` + module constants together. + """ return sum(getattr(termios, name, 0) for name in names.split()) @@ -223,13 +224,15 @@ def cfmakeraw(tflags): """Given a list returned by :py:func:`termios.tcgetattr`, return a list modified in a manner similar to the `cfmakeraw()` C library function, but additionally disabling local echo.""" - # BSD: https://github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162 - # Linux: https://github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20 + # BSD: github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162 + # Linux: github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20 iflag, oflag, cflag, lflag, ispeed, ospeed, cc = tflags - iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK ISTRIP INLCR ICRNL IXON IGNPAR') + iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK ' + 'ISTRIP INLCR ICRNL IXON IGNPAR') iflag &= ~flags('IGNBRK BRKINT PARMRK') oflag &= ~flags('OPOST') - lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG IEXTEN NOFLSH TOSTOP PENDIN') + lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG' + 'IEXTEN NOFLSH TOSTOP PENDIN') cflag &= ~flags('CSIZE PARENB') cflag |= flags('CS8 CREAD') return [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] @@ -246,14 +249,6 @@ def disable_echo(fd): termios.tcsetattr(fd, flags, new) -def close_nonstandard_fds(): - for fd in xrange(3, SC_OPEN_MAX): - try: - os.close(fd) - except OSError: - pass - - def create_socketpair(size=None): """ Create a :func:`socket.socketpair` to use for use as a child process's UNIX @@ -309,8 +304,8 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): """ Create a child process whose stdin/stdout is connected to a socket. - :param args: - Argument vector for execv() call. + :param list args: + Program argument vector. :param bool merge_stdio: If :data:`True`, arrange for `stderr` to be connected to the `stdout` socketpair, rather than inherited from the parent process. This may be @@ -326,9 +321,9 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): """ parentfp, childfp = create_socketpair() # When running under a monkey patches-enabled gevent, the socket module - # yields file descriptors who already have O_NONBLOCK, which is - # persisted across fork, totally breaking Python. Therefore, drop - # O_NONBLOCK from Python's future stdin fd. + # yields descriptors who already have O_NONBLOCK, which is persisted across + # fork, totally breaking Python. Therefore, drop O_NONBLOCK from Python's + # future stdin fd. mitogen.core.set_block(childfp.fileno()) stderr_r = None @@ -452,8 +447,7 @@ def tty_create_child(args): slave end. :param list args: - :py:func:`os.execl` argument list. - + Program argument vector. :returns: `(pid, tty_fd, None)` """ @@ -489,8 +483,7 @@ def hybrid_tty_create_child(args): attached to a TTY. :param list args: - :py:func:`os.execl` argument list. - + Program argument vector. :returns: `(pid, socketpair_fd, tty_fd)` """ @@ -1187,9 +1180,9 @@ for _klass in mitogen.core.Poller, PollPoller, KqueuePoller, EpollPoller: if _klass.SUPPORTED: PREFERRED_POLLER = _klass -# For apps that start threads dynamically, it's possible Latch will also get -# very high-numbered wait fds when there are many connections, and so select() -# becomes useless there too. So swap in our favourite poller. +# For processes that start many threads or connections, it's possible Latch +# will also get high-numbered FDs, and so select() becomes useless there too. +# So swap in our favourite poller. if PollPoller.SUPPORTED: mitogen.core.Latch.poller_class = PollPoller else: @@ -1198,16 +1191,14 @@ else: class DiagLogStream(mitogen.core.BasicStream): """ - For "hybrid TTY/socketpair" mode, after a connection has been setup, a - spare TTY file descriptor will exist that cannot be closed, and to which - SSH or sudo may continue writing log messages. + For "hybrid TTY/socketpair" mode, after connection setup a spare TTY master + FD exists that cannot be closed, and to which SSH or sudo may continue + writing log messages. - The descriptor cannot be closed since the UNIX TTY layer will send a - termination signal to any processes whose controlling TTY is the TTY that - has been closed. - - DiagLogStream takes over this descriptor and creates corresponding log - messages for anything written to it. + The descriptor cannot be closed since the UNIX TTY layer sends SIGHUP to + processes whose controlling TTY is the slave TTY whose master side has been + closed. LogProtocol takes over this FD and creates log messages for + anything written to it. """ def __init__(self, fd, stream): @@ -1573,7 +1564,7 @@ class ChildIdAllocator(object): for id_ in self.it: return id_ - master = mitogen.core.Context(self.router, 0) + master = self.router.context_by_id(0) start, end = master.send_await( mitogen.core.Message(dst_id=0, handle=mitogen.core.ALLOCATE_ID) ) @@ -1830,9 +1821,11 @@ class Context(mitogen.core.Context): return not (self == other) def __eq__(self, other): - return (isinstance(other, mitogen.core.Context) and - (other.context_id == self.context_id) and - (other.router == self.router)) + return ( + isinstance(other, mitogen.core.Context) and + (other.context_id == self.context_id) and + (other.router == self.router) + ) def __hash__(self): return hash((self.router, self.context_id)) @@ -2227,13 +2220,13 @@ class Router(mitogen.core.Router): kwargs.setdefault(u'debug', self.debug) kwargs.setdefault(u'profiling', self.profiling) kwargs.setdefault(u'unidirectional', self.unidirectional) + kwargs.setdefault(u'name', name) via = kwargs.pop(u'via', None) if via is not None: - return self.proxy_connect(via, method_name, name=name, - **mitogen.core.Kwargs(kwargs)) - return self._connect(klass, name=name, - **mitogen.core.Kwargs(kwargs)) + return self.proxy_connect(via, method_name, + **mitogen.core.Kwargs(kwargs)) + return self._connect(klass, **mitogen.core.Kwargs(kwargs)) def proxy_connect(self, via_context, method_name, name=None, **kwargs): resp = via_context.call(_proxy_connect, From 5aca9d6c3f65b58b4aba6de92b9f593f55c0f77d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 9 Mar 2019 16:52:42 +0000 Subject: [PATCH 081/383] core: split out iter_split() for use in parent.py. --- mitogen/core.py | 35 +++++++++++++++++++++------------- tests/iter_split_test.py | 41 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 13 deletions(-) create mode 100644 tests/iter_split_test.py diff --git a/mitogen/core.py b/mitogen/core.py index fc578c0b..ed18f6ad 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -596,6 +596,23 @@ def import_module(modname): return __import__(modname, None, None, ['']) +def iter_split(buf, delim, func): + """ + Invoke `func(s)` for each `delim`-delimited chunk in the potentially large + `buf`, avoiding intermediate lists and quadratic copies. Return the + trailing undelimited portion of `buf`. + """ + start = 0 + while True: + nl = buf.find(delim, start) + if nl == -1: + break + func(buf[start:nl]) + start = nl + 1 + + return buf[start:] + + class Py24Pickler(py_pickle.Pickler): """ Exceptions were classic classes until Python 2.5. Sadly for 2.4, cPickle @@ -2427,18 +2444,6 @@ class IoLogger(BasicStream): def __repr__(self): return '' % (self._name,) - def _log_lines(self, buf): - start = 0 - while True: - nl = min(buf.find('\n', start), start+1024) - if nl == -1: - break - self._log.info('%s', buf[start:nl]) - start = nl + 1 - - if start: - self._trailer = buf[start:] - def on_shutdown(self, broker): """Shut down the write end of the logging socket.""" _v and LOG.debug('%r.on_shutdown()', self) @@ -2454,7 +2459,11 @@ class IoLogger(BasicStream): if not buf: return self.on_disconnect(broker) - self._log_lines(self._trailer + buf.decode('latin1')) + self._trailer = iter_split( + buf=self._trailer + buf.decode('latin1'), + delim='\n', + func=lambda s: self._log.info('%s', s) + ) class Router(object): diff --git a/tests/iter_split_test.py b/tests/iter_split_test.py new file mode 100644 index 00000000..a385b2f1 --- /dev/null +++ b/tests/iter_split_test.py @@ -0,0 +1,41 @@ + +import mock +import unittest2 + +import mitogen.core + +import testlib + + +class IterSplitTest(unittest2.TestCase): + func = staticmethod(mitogen.core.iter_split) + + def test_empty_buffer(self): + lst = [] + trailer = self.func(buf='', delim='\n', func=lst.append) + self.assertEquals('', trailer) + self.assertEquals([], lst) + + def test_empty_line(self): + lst = [] + trailer = self.func(buf='\n', delim='\n', func=lst.append) + self.assertEquals('', trailer) + self.assertEquals([''], lst) + + def test_one_line(self): + buf = 'xxxx\n' + lst = [] + trailer = self.func(buf=buf, delim='\n', func=lst.append) + self.assertEquals('', trailer) + self.assertEquals(lst, ['xxxx']) + + def test_one_incomplete(self): + buf = 'xxxx\nyy' + lst = [] + trailer = self.func(buf=buf, delim='\n', func=lst.append) + self.assertEquals('yy', trailer) + self.assertEquals(lst, ['xxxx']) + + +if __name__ == '__main__': + unittest2.main() From 70ff4b674c089a14201bb4598b071777bc6acc30 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 9 Mar 2019 19:16:40 +0000 Subject: [PATCH 082/383] parent: discard cancelled events in TimerList.get_timeout(). Otherwise get_timeout() keeps broker alive via keep_alive() for a cancelled timer during shutdown. --- mitogen/parent.py | 2 ++ tests/timer_test.py | 17 ++++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 12d71e4f..a7dbfbd8 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -628,6 +628,8 @@ class TimerList(object): Floating point delay, or 0.0, or :data:`None` if no events are scheduled. """ + while self._lst and self._lst[0].cancelled: + heapq.heappop(self._lst) if self._lst: return max(0, self._lst[0].when - self._now()) diff --git a/tests/timer_test.py b/tests/timer_test.py index e4637ed5..14a9c080 100644 --- a/tests/timer_test.py +++ b/tests/timer_test.py @@ -56,6 +56,21 @@ class GetTimeoutTest(TimerListMixin, testlib.TestCase): self.list._now = lambda: 30 self.assertEquals(0, self.list.get_timeout()) + def test_one_cancelled(self): + t1 = self.list.schedule(2, lambda: None) + t2 = self.list.schedule(3, lambda: None) + self.list._now = lambda: 0 + t1.cancel() + self.assertEquals(3, self.list.get_timeout()) + + def test_two_cancelled(self): + t1 = self.list.schedule(2, lambda: None) + t2 = self.list.schedule(3, lambda: None) + self.list._now = lambda: 0 + t1.cancel() + t2.cancel() + self.assertEquals(None, self.list.get_timeout()) + class ScheduleTest(TimerListMixin, testlib.TestCase): def test_in_past(self): @@ -105,7 +120,7 @@ class ExpireTest(TimerListMixin, testlib.TestCase): self.list._now = lambda: 29 timer = self.list.schedule(29, mock.Mock()) timer.cancel() - self.assertEquals(0, self.list.get_timeout()) + self.assertEquals(None, self.list.get_timeout()) self.list._now = lambda: 29 self.list.expire() self.assertEquals(0, len(timer.func.mock_calls)) From f6d26c5acbef477cf4de3e76d504cc849e7194ff Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 01:01:28 +0000 Subject: [PATCH 083/383] testlib: disable lsof warnings due to Docker crap --- tests/testlib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testlib.py b/tests/testlib.py index 04a48d84..a0b02a39 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -332,7 +332,7 @@ class TestCase(unittest2.TestCase): def _teardown_check_fds(self): mitogen.core.Latch._on_fork() if get_fd_count() != self._fd_count_before: - import os; os.system('lsof -p %s' % (os.getpid(),)) + import os; os.system('lsof -w -p %s' % (os.getpid(),)) assert 0, "%s leaked FDs. Count before: %s, after: %s" % ( self, self._fd_count_before, get_fd_count(), ) From 3f90030c1edac7b38be608d2d0c54fa517d20747 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 17:55:29 +0000 Subject: [PATCH 084/383] core: docstring style cleanups, dead code. --- mitogen/core.py | 84 ++++++++++++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index ed18f6ad..f9c4f4c2 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -214,7 +214,8 @@ else: class Error(Exception): - """Base for all exceptions raised by Mitogen. + """ + Base for all exceptions raised by Mitogen. :param str fmt: Exception text, or format string if `args` is non-empty. @@ -230,14 +231,18 @@ class Error(Exception): class LatchError(Error): - """Raised when an attempt is made to use a :class:`mitogen.core.Latch` - that has been marked closed.""" + """ + Raised when an attempt is made to use a :class:`mitogen.core.Latch` that + has been marked closed. + """ pass class Blob(BytesType): - """A serializable bytes subclass whose content is summarized in repr() - output, making it suitable for logging binary data.""" + """ + A serializable bytes subclass whose content is summarized in repr() output, + making it suitable for logging binary data. + """ def __repr__(self): return '[blob: %d bytes]' % len(self) @@ -246,8 +251,10 @@ class Blob(BytesType): class Secret(UnicodeType): - """A serializable unicode subclass whose content is masked in repr() - output, making it suitable for logging passwords.""" + """ + A serializable unicode subclass whose content is masked in repr() output, + making it suitable for logging passwords. + """ def __repr__(self): return '[secret]' @@ -321,7 +328,9 @@ def _unpickle_call_error(s): class ChannelError(Error): - """Raised when a channel dies or has been closed.""" + """ + Raised when a channel dies or has been closed. + """ remote_msg = 'Channel closed by remote end.' local_msg = 'Channel closed by local end.' @@ -379,11 +388,13 @@ else: def has_parent_authority(msg, _stream=None): - """Policy function for use with :class:`Receiver` and + """ + Policy function for use with :class:`Receiver` and :meth:`Router.add_handler` that requires incoming messages to originate from a parent context, or on a :class:`Stream` whose :attr:`auth_id ` has been set to that of a parent context or the current - context.""" + context. + """ return (msg.auth_id == mitogen.context_id or msg.auth_id in mitogen.parent_ids) @@ -432,35 +443,42 @@ def is_blacklisted_import(importer, fullname): def set_cloexec(fd): - """Set the file descriptor `fd` to automatically close on - :func:`os.execve`. This has no effect on file descriptors inherited across - :func:`os.fork`, they must be explicitly closed through some other means, - such as :func:`mitogen.fork.on_fork`.""" + """ + Set the file descriptor `fd` to automatically close on :func:`os.execve`. + This has no effect on file descriptors inherited across :func:`os.fork`, + they must be explicitly closed through some other means, such as + :func:`mitogen.fork.on_fork`. + """ flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert fd > 2 fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def set_nonblock(fd): - """Set the file descriptor `fd` to non-blocking mode. For most underlying - file types, this causes :func:`os.read` or :func:`os.write` to raise + """ + Set the file descriptor `fd` to non-blocking mode. For most underlying file + types, this causes :func:`os.read` or :func:`os.write` to raise :class:`OSError` with :data:`errno.EAGAIN` rather than block the thread - when the underlying kernel buffer is exhausted.""" + when the underlying kernel buffer is exhausted. + """ flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) def set_block(fd): - """Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread - when the underlying kernel buffer is exhausted.""" + """ + Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread when + the underlying kernel buffer is exhausted. + """ flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) def io_op(func, *args): - """Wrap `func(*args)` that may raise :class:`select.error`, - :class:`IOError`, or :class:`OSError`, trapping UNIX error codes relating - to disconnection and retry events in various subsystems: + """ + Wrap `func(*args)` that may raise :class:`select.error`, :class:`IOError`, + or :class:`OSError`, trapping UNIX error codes relating to disconnection + and retry events in various subsystems: * When a signal is delivered to the process on Python 2, system call retry is signalled through :data:`errno.EINTR`. The invocation is automatically @@ -491,7 +509,8 @@ def io_op(func, *args): class PidfulStreamHandler(logging.StreamHandler): - """A :class:`logging.StreamHandler` subclass used when + """ + A :class:`logging.StreamHandler` subclass used when :meth:`Router.enable_debug() ` has been called, or the `debug` parameter was specified during context construction. Verifies the process ID has not changed on each call to :meth:`emit`, @@ -599,8 +618,8 @@ def import_module(modname): def iter_split(buf, delim, func): """ Invoke `func(s)` for each `delim`-delimited chunk in the potentially large - `buf`, avoiding intermediate lists and quadratic copies. Return the - trailing undelimited portion of `buf`. + `buf`, avoiding intermediate lists and quadratic string operations. Return + the trailing undelimited portion of `buf`. """ start = 0 while True: @@ -725,8 +744,10 @@ class Message(object): return s def _find_global(self, module, func): - """Return the class implementing `module_name.class_name` or raise - `StreamError` if the module is not whitelisted.""" + """ + Return the class implementing `module_name.class_name` or raise + `StreamError` if the module is not whitelisted. + """ if module == __name__: if func == '_unpickle_call_error' or func == 'CallError': return _unpickle_call_error @@ -2046,9 +2067,6 @@ class Poller(object): if gen and gen < self._generation: yield data - if timeout: - timeout *= 1000 - def poll(self, timeout=None): """ Block the calling thread until one or more FDs are ready for IO. @@ -2715,7 +2733,7 @@ class Router(object): return handle - duplicate_handle_msg = 'cannot register a handle that is already exists' + duplicate_handle_msg = 'cannot register a handle that already exists' refused_msg = 'refused by policy' invalid_handle_msg = 'invalid handle' too_large_msg = 'message too large (max %d bytes)' @@ -2860,8 +2878,8 @@ class Broker(object): """ Responsible for handling I/O multiplexing in a private thread. - **Note:** This is the somewhat limited core version of the Broker class - used by child contexts. The master subclass is documented below. + **Note:** This somewhat limited core version is used by children. The + master subclass is documented below. """ poller_class = Poller _waker = None From 1305420aa5e841788049b021513adb68284f06d6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 18:50:40 +0000 Subject: [PATCH 085/383] docs: remove bytearray from supported types list. --- docs/getting_started.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 020760bc..945e243f 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -341,15 +341,13 @@ The following built-in types may be used as parameters or return values in remote procedure calls: * :class:`bool` -* :class:`bytearray` -* :func:`bytes` +* :func:`bytes` (:class:`str` on Python 2.x) * :class:`dict` * :class:`int` * :func:`list` * :class:`long` -* :class:`str` * :func:`tuple` -* :func:`unicode` +* :func:`unicode` (:class:`str` on Python 3.x) User-defined types may not be used, except for: From dfefc4c05c8ca06c7790dc061c0c8a99891db482 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 18:53:10 +0000 Subject: [PATCH 086/383] core: replace UTF8_CODEC with encodings.utf_8.encode() function. --- mitogen/core.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index f9c4f4c2..c1f5ea38 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -37,6 +37,7 @@ bootstrap implementation sent to every new slave context. import binascii import collections import encodings.latin_1 +import encodings.utf_8 import errno import fcntl import itertools @@ -102,10 +103,9 @@ LOG = logging.getLogger('mitogen') IOLOG = logging.getLogger('mitogen.io') IOLOG.setLevel(logging.INFO) -LATIN1_CODEC = encodings.latin_1.Codec() # str.encode() may take import lock. Deadlock possible if broker calls # .encode() on behalf of thread currently waiting for module. -UTF8_CODEC = encodings.latin_1.Codec() +LATIN1_CODEC = encodings.latin_1.Codec() _v = False _vv = False @@ -288,7 +288,7 @@ class Kwargs(dict): def __init__(self, dct): for k, v in dct.iteritems(): if type(k) is unicode: - k, _ = UTF8_CODEC.encode(k) + k, _ = encodings.utf_8.encode(k) self[k] = v def __repr__(self): @@ -782,7 +782,7 @@ class Message(object): """ Syntax helper to construct a dead message. """ - kwargs['data'], _ = UTF8_CODEC.encode(reason or u'') + kwargs['data'], _ = encodings.utf_8.encode(reason or u'') return cls(reply_to=IS_DEAD, **kwargs) @classmethod @@ -1381,7 +1381,7 @@ class Importer(object): if mod.__package__ and not PY3: # 2.x requires __package__ to be exactly a string. - mod.__package__, _ = UTF8_CODEC.encode(mod.__package__) + mod.__package__, _ = encodings.utf_8.encode(mod.__package__) source = self.get_source(fullname) try: From 237a3babafecd94ed5e9765a5aa49cc9f8d73031 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 18:54:32 +0000 Subject: [PATCH 087/383] core: more succinct iter_split(). --- mitogen/core.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index c1f5ea38..1507a2a3 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -621,15 +621,14 @@ def iter_split(buf, delim, func): `buf`, avoiding intermediate lists and quadratic string operations. Return the trailing undelimited portion of `buf`. """ + dlen = len(delim) start = 0 while True: nl = buf.find(delim, start) if nl == -1: - break + return buf[start:] func(buf[start:nl]) - start = nl + 1 - - return buf[start:] + start = nl + dlen class Py24Pickler(py_pickle.Pickler): From 4f0a946f309fe54cc26b4d52994e0725535d8b92 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 18:55:43 +0000 Subject: [PATCH 088/383] core: pending timers should keep broker alive. --- mitogen/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index 1507a2a3..0935abcb 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2973,7 +2973,7 @@ class Broker(object): progress (e.g. log draining). """ it = (side.keep_alive for (_, (side, _)) in self.poller.readers) - return sum(it, 0) + return sum(it, 0) > 0 or self.timers.get_timeout() is not None def defer_sync(self, func): """ From aba396d4c56136483722c9ed67e86c3325330a80 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 18:56:40 +0000 Subject: [PATCH 089/383] core: bootstrap FD management improvements - open fd 0/1/2 with correct file mode - trap reserve_tty_fd in a file object, since all other FD manual FD management is going away. --- mitogen/core.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 0935abcb..dacdb444 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3381,16 +3381,13 @@ class ExternalContext(object): def _nullify_stdio(self): """ - Open /dev/null to replace stdin, and stdout/stderr temporarily. In case - of odd startup, assume we may be allocated a standard handle. + Open /dev/null to replace stdio temporarily. In case of odd startup, + assume we may be allocated a standard handle. """ - fd = os.open('/dev/null', os.O_RDWR) - try: - for stdfd in (0, 1, 2): - if fd != stdfd: - os.dup2(fd, stdfd) - finally: - if fd not in (0, 1, 2): + for stdfd, mode in ((0, os.O_RDONLY), (1, os.O_RDWR), (2, os.O_RDWR)): + fd = os.open('/dev/null', mode) + if fd != stdfd: + os.dup2(fd, stdfd) os.close(fd) def _setup_stdio(self): @@ -3402,10 +3399,11 @@ class ExternalContext(object): # around a permanent dup() to avoid receiving SIGHUP. try: if os.isatty(2): - self.reserve_tty_fd = os.dup(2) - set_cloexec(self.reserve_tty_fd) + self.reserve_tty_fp = os.fdopen(os.dup(2), 'r+b', 0) + set_cloexec(self.reserve_tty_fp) except OSError: pass + # When sys.stdout was opened by the runtime, overwriting it will not # close FD 1. However when forking from a child that previously used # fdopen(), overwriting it /will/ close FD 1. So we must swallow the From 3ab7998861855c18b23493c5b56b403c29fc7afe Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 18:58:34 +0000 Subject: [PATCH 090/383] tests: add some UTF-8 to ssh_login_banner to encourage breakage. --- tests/data/docker/ssh_login_banner.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/data/docker/ssh_login_banner.txt b/tests/data/docker/ssh_login_banner.txt index 1ae4cd03..8a03fbe4 100644 --- a/tests/data/docker/ssh_login_banner.txt +++ b/tests/data/docker/ssh_login_banner.txt @@ -19,3 +19,5 @@ incidents to law enforcement officials. ************************************************************** NOTE: This system is connected to DOMAIN.COM, please use your password. + +ستتم محاكمة المعتدين. هذا يختبر التدويل From 3f1ef6e24395e39bbcf8462ef33bc64fa538d64e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 18:58:55 +0000 Subject: [PATCH 091/383] master: expect forwarded logs to be in UTF-8. latin1 was causing corruption of internationalized messages. --- mitogen/master.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/master.py b/mitogen/master.py index 2db78ba0..ff6f3922 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -394,7 +394,7 @@ class LogForwarder(object): name = '%s.%s' % (RLOG.name, context.name) self._cache[msg.src_id] = logger = logging.getLogger(name) - name, level_s, s = msg.data.decode('latin1').split('\x00', 2) + name, level_s, s = msg.data.decode('utf-8', 'replace').split('\x00', 2) # See logging.Handler.makeRecord() record = logging.LogRecord( From d1f5e0663d2c9e4edaaed070ebabd645348ee4d5 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 20:00:50 +0000 Subject: [PATCH 092/383] core: move message encoding to Message.pack(), add+refactor tests. The old inline pack is still present in the old location but will be removed in a followup commit. --- mitogen/core.py | 14 +- ...{serialization_test.py => context_test.py} | 35 +- tests/message_test.py | 545 ++++++++++++++++++ 3 files changed, 559 insertions(+), 35 deletions(-) rename tests/{serialization_test.py => context_test.py} (50%) create mode 100644 tests/message_test.py diff --git a/mitogen/core.py b/mitogen/core.py index dacdb444..76baf4d3 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -722,6 +722,10 @@ class Message(object): #: the :class:`mitogen.select.Select` interface. Defaults to :data:`None`. receiver = None + HEADER_FMT = '>hLLLLLL' + HEADER_LEN = struct.calcsize(HEADER_FMT) + HEADER_MAGIC = 0x4d49 # 'MI' + def __init__(self, **kwargs): """ Construct a message from from the supplied `kwargs`. :attr:`src_id` and @@ -732,6 +736,14 @@ class Message(object): vars(self).update(kwargs) assert isinstance(self.data, BytesType) + def pack(self): + return ( + struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, self.dst_id, + self.src_id, self.auth_id, self.handle, + self.reply_to or 0, len(self.data)) + + self.data + ) + def _unpickle_context(self, context_id, name): return _unpickle_context(context_id, name, router=self.router) @@ -930,7 +942,7 @@ def _unpickle_sender(router, context_id, dst_handle): if not (isinstance(router, Router) and isinstance(context_id, (int, long)) and context_id >= 0 and isinstance(dst_handle, (int, long)) and dst_handle > 0): - raise TypeError('cannot unpickle Sender: bad input') + raise TypeError('cannot unpickle Sender: bad input or missing router') return Sender(Context(router, context_id), dst_handle) diff --git a/tests/serialization_test.py b/tests/context_test.py similarity index 50% rename from tests/serialization_test.py rename to tests/context_test.py index 6cf5f8b7..4bc4bd2e 100644 --- a/tests/serialization_test.py +++ b/tests/context_test.py @@ -8,40 +8,7 @@ from mitogen.core import b import testlib -class EvilObject(object): - pass - - -def roundtrip(v): - msg = mitogen.core.Message.pickled(v) - return mitogen.core.Message(data=msg.data).unpickle() - - -class EvilObjectTest(testlib.TestCase): - def test_deserialization_fails(self): - msg = mitogen.core.Message.pickled(EvilObject()) - e = self.assertRaises(mitogen.core.StreamError, - lambda: msg.unpickle() - ) - - -class BlobTest(testlib.TestCase): - klass = mitogen.core.Blob - - # Python 3 pickle protocol 2 does weird stuff depending on whether an empty - # or nonempty bytes is being serialized. For non-empty, it yields a - # _codecs.encode() call. For empty, it yields a bytes() call. - - def test_nonempty_bytes(self): - v = mitogen.core.Blob(b('dave')) - self.assertEquals(b('dave'), roundtrip(v)) - - def test_empty_bytes(self): - v = mitogen.core.Blob(b('')) - self.assertEquals(b(''), roundtrip(v)) - - -class ContextTest(testlib.RouterMixin, testlib.TestCase): +class PickleTest(testlib.RouterMixin, testlib.TestCase): klass = mitogen.core.Context # Ensure Context can be round-tripped by regular pickle in addition to diff --git a/tests/message_test.py b/tests/message_test.py new file mode 100644 index 00000000..79deb2c6 --- /dev/null +++ b/tests/message_test.py @@ -0,0 +1,545 @@ + +import sys +import struct + +import mock +import unittest2 + +import mitogen.core +import mitogen.master +import testlib + +from mitogen.core import b + + +class ConstructorTest(testlib.TestCase): + klass = mitogen.core.Message + + def test_dst_id_default(self): + self.assertEquals(self.klass().dst_id, None) + + def test_dst_id_explicit(self): + self.assertEquals(self.klass(dst_id=1111).dst_id, 1111) + + @mock.patch('mitogen.context_id', 1234) + def test_src_id_default(self): + self.assertEquals(self.klass().src_id, 1234) + + def test_src_id_explicit(self): + self.assertEquals(self.klass(src_id=4321).src_id, 4321) + + @mock.patch('mitogen.context_id', 5555) + def test_auth_id_default(self): + self.assertEquals(self.klass().auth_id, 5555) + + def test_auth_id_explicit(self): + self.assertEquals(self.klass(auth_id=2222).auth_id, 2222) + + def test_handle_default(self): + self.assertEquals(self.klass().handle, None) + + def test_handle_explicit(self): + self.assertEquals(self.klass(handle=1234).handle, 1234) + + def test_reply_to_default(self): + self.assertEquals(self.klass().reply_to, None) + + def test_reply_to_explicit(self): + self.assertEquals(self.klass(reply_to=8888).reply_to, 8888) + + def test_data_default(self): + m = self.klass() + self.assertEquals(m.data, b('')) + self.assertTrue(isinstance(m.data, mitogen.core.BytesType)) + + def test_data_explicit(self): + m = self.klass(data=b('asdf')) + self.assertEquals(m.data, b('asdf')) + self.assertTrue(isinstance(m.data, mitogen.core.BytesType)) + + def test_data_hates_unicode(self): + self.assertRaises(Exception, + lambda: self.klass(data=u'asdf')) + + +class PackTest(testlib.TestCase): + klass = mitogen.core.Message + + def test_header_format_sanity(self): + self.assertEquals(self.klass.HEADER_LEN, + struct.calcsize(self.klass.HEADER_FMT)) + + def test_header_length_correct(self): + s = self.klass(dst_id=123, handle=123).pack() + self.assertEquals(len(s), self.klass.HEADER_LEN) + + def test_magic(self): + s = self.klass(dst_id=123, handle=123).pack() + magic, = struct.unpack('>h', s[:2]) + self.assertEquals(self.klass.HEADER_MAGIC, magic) + + def test_dst_id(self): + s = self.klass(dst_id=123, handle=123).pack() + dst_id, = struct.unpack('>L', s[2:6]) + self.assertEquals(123, dst_id) + + def test_src_id(self): + s = self.klass(src_id=5432, dst_id=123, handle=123).pack() + src_id, = struct.unpack('>L', s[6:10]) + self.assertEquals(5432, src_id) + + def test_auth_id(self): + s = self.klass(auth_id=1919, src_id=5432, dst_id=123, handle=123).pack() + auth_id, = struct.unpack('>L', s[10:14]) + self.assertEquals(1919, auth_id) + + def test_handle(self): + s = self.klass(dst_id=123, handle=9999).pack() + handle, = struct.unpack('>L', s[14:18]) + self.assertEquals(9999, handle) + + def test_reply_to(self): + s = self.klass(dst_id=1231, handle=7777, reply_to=9132).pack() + reply_to, = struct.unpack('>L', s[18:22]) + self.assertEquals(9132, reply_to) + + def test_data_length_empty(self): + s = self.klass(dst_id=1231, handle=7777).pack() + data_length, = struct.unpack('>L', s[22:26]) + self.assertEquals(0, data_length) + + def test_data_length_present(self): + s = self.klass(dst_id=1231, handle=7777, data=b('hello')).pack() + data_length, = struct.unpack('>L', s[22:26]) + self.assertEquals(5, data_length) + + def test_data_empty(self): + s = self.klass(dst_id=1231, handle=7777).pack() + data = s[26:] + self.assertEquals(b(''), data) + + def test_data_present(self): + s = self.klass(dst_id=11, handle=77, data=b('hello')).pack() + data = s[26:] + self.assertEquals(b('hello'), data) + + +class IsDeadTest(testlib.TestCase): + klass = mitogen.core.Message + + def test_is_dead(self): + msg = self.klass(reply_to=mitogen.core.IS_DEAD) + self.assertTrue(msg.is_dead) + + def test_is_not_dead(self): + msg = self.klass(reply_to=5555) + self.assertFalse(msg.is_dead) + + +class DeadTest(testlib.TestCase): + klass = mitogen.core.Message + + def test_no_reason(self): + msg = self.klass.dead() + self.assertEquals(msg.reply_to, mitogen.core.IS_DEAD) + self.assertTrue(msg.is_dead) + self.assertEquals(msg.data, b('')) + + def test_with_reason(self): + msg = self.klass.dead(reason=u'oh no') + self.assertEquals(msg.reply_to, mitogen.core.IS_DEAD) + self.assertTrue(msg.is_dead) + self.assertEquals(msg.data, b('oh no')) + + +class EvilObject(object): + pass + + +class PickledTest(testlib.TestCase): + # getting_started.html#rpc-serialization-rules + klass = mitogen.core.Message + + def roundtrip(self, v, router=None): + msg = self.klass.pickled(v) + msg2 = self.klass(data=msg.data) + msg2.router = router + return msg2.unpickle() + + def test_bool(self): + for b in True, False: + self.assertEquals(b, self.roundtrip(b)) + + @unittest2.skipIf(condition=sys.version_info < (2, 6), + reason='bytearray missing on <2.6') + def test_bytearray(self): + ba = bytearray(b('123')) + self.assertRaises(mitogen.core.StreamError, + lambda: self.roundtrip(ba) + ) + + def test_bytes(self): + by = b('123') + self.assertEquals(by, self.roundtrip(by)) + + def test_dict(self): + d = {1: 2, u'a': 3, b('b'): 4, 'c': {}} + roundtrip = self.roundtrip(d) + self.assertEquals(d, roundtrip) + self.assertTrue(isinstance(roundtrip, dict)) + for k in d: + self.assertTrue(isinstance(roundtrip[k], type(d[k]))) + + def test_int(self): + self.assertEquals(123, self.klass.pickled(123).unpickle()) + + def test_list(self): + l = [1, u'b', b('c')] + roundtrip = self.roundtrip(l) + self.assertTrue(isinstance(roundtrip, list)) + self.assertEquals(l, roundtrip) + for k in range(len(l)): + self.assertTrue(isinstance(roundtrip[k], type(l[k]))) + + @unittest2.skipIf(condition=sys.version_info > (3, 0), + reason='long missing in >3.x') + def test_long(self): + l = long(0xffffffffffff) + roundtrip = self.roundtrip(l) + self.assertEquals(l, roundtrip) + self.assertTrue(isinstance(roundtrip, long)) + + def test_tuple(self): + l = (1, u'b', b('c')) + roundtrip = self.roundtrip(l) + self.assertEquals(l, roundtrip) + self.assertTrue(isinstance(roundtrip, tuple)) + for k in range(len(l)): + self.assertTrue(isinstance(roundtrip[k], type(l[k]))) + + def test_unicode(self): + u = u'abcd' + roundtrip = self.roundtrip(u) + self.assertEquals(u, roundtrip) + self.assertTrue(isinstance(roundtrip, mitogen.core.UnicodeType)) + + #### custom types. see also: types_test.py, call_error_test.py + + # Python 3 pickle protocol 2 does weird stuff depending on whether an empty + # or nonempty bytes is being serialized. For non-empty, it yields a + # _codecs.encode() call. For empty, it yields a bytes() call. + + def test_blob_nonempty(self): + v = mitogen.core.Blob(b('dave')) + roundtrip = self.roundtrip(v) + self.assertTrue(isinstance(roundtrip, mitogen.core.Blob)) + self.assertEquals(b('dave'), roundtrip) + + def test_blob_empty(self): + v = mitogen.core.Blob(b('')) + roundtrip = self.roundtrip(v) + self.assertTrue(isinstance(roundtrip, mitogen.core.Blob)) + self.assertEquals(b(''), v) + + def test_secret_nonempty(self): + s = mitogen.core.Secret(u'dave') + roundtrip = self.roundtrip(s) + self.assertTrue(isinstance(roundtrip, mitogen.core.Secret)) + self.assertEquals(u'dave', roundtrip) + + def test_secret_empty(self): + s = mitogen.core.Secret(u'') + roundtrip = self.roundtrip(s) + self.assertTrue(isinstance(roundtrip, mitogen.core.Secret)) + self.assertEquals(u'', roundtrip) + + def test_call_error(self): + ce = mitogen.core.CallError('nope') + ce2 = self.assertRaises(mitogen.core.CallError, + lambda: self.roundtrip(ce)) + self.assertEquals(ce.args[0], ce2.args[0]) + + def test_context(self): + router = mitogen.master.Router() + try: + c = router.context_by_id(1234) + roundtrip = self.roundtrip(c) + self.assertTrue(isinstance(roundtrip, mitogen.core.Context)) + self.assertEquals(c.context_id, 1234) + finally: + router.broker.shutdown() + router.broker.join() + + def test_sender(self): + router = mitogen.master.Router() + try: + recv = mitogen.core.Receiver(router) + sender = recv.to_sender() + roundtrip = self.roundtrip(sender, router=router) + self.assertTrue(isinstance(roundtrip, mitogen.core.Sender)) + self.assertEquals(roundtrip.context.context_id, mitogen.context_id) + self.assertEquals(roundtrip.dst_handle, sender.dst_handle) + finally: + router.broker.shutdown() + router.broker.join() + + #### + + def test_custom_object_deserialization_fails(self): + self.assertRaises(mitogen.core.StreamError, + lambda: self.roundtrip(EvilObject()) + ) + + +class ReplyTest(testlib.TestCase): + # getting_started.html#rpc-serialization-rules + klass = mitogen.core.Message + + def test_reply_calls_router_route(self): + msg = self.klass(src_id=1234, reply_to=9191) + router = mock.Mock() + msg.reply(123, router=router) + self.assertEquals(1, router.route.call_count) + + def test_reply_pickles_object(self): + msg = self.klass(src_id=1234, reply_to=9191) + router = mock.Mock() + msg.reply(123, router=router) + _, (reply,), _ = router.route.mock_calls[0] + self.assertEquals(reply.dst_id, 1234) + self.assertEquals(reply.unpickle(), 123) + + def test_reply_uses_preformatted_message(self): + msg = self.klass(src_id=1234, reply_to=9191) + router = mock.Mock() + my_reply = mitogen.core.Message.pickled(4444) + msg.reply(my_reply, router=router) + _, (reply,), _ = router.route.mock_calls[0] + self.assertTrue(my_reply is reply) + self.assertEquals(reply.dst_id, 1234) + self.assertEquals(reply.unpickle(), 4444) + + def test_reply_sets_dst_id(self): + msg = self.klass(src_id=1234, reply_to=9191) + router = mock.Mock() + msg.reply(123, router=router) + _, (reply,), _ = router.route.mock_calls[0] + self.assertEquals(reply.dst_id, 1234) + + def test_reply_sets_handle(self): + msg = self.klass(src_id=1234, reply_to=9191) + router = mock.Mock() + msg.reply(123, router=router) + _, (reply,), _ = router.route.mock_calls[0] + self.assertEquals(reply.handle, 9191) + + +class UnpickleTest(testlib.TestCase): + # mostly done by PickleTest, just check behaviour of parameters + klass = mitogen.core.Message + + def test_throw(self): + ce = mitogen.core.CallError('nope') + m = self.klass.pickled(ce) + ce2 = self.assertRaises(mitogen.core.CallError, + lambda: m.unpickle()) + self.assertEquals(ce.args[0], ce2.args[0]) + + def test_no_throw(self): + ce = mitogen.core.CallError('nope') + m = self.klass.pickled(ce) + ce2 = m.unpickle(throw=False) + self.assertEquals(ce.args[0], ce2.args[0]) + + def test_throw_dead(self): + m = self.klass.pickled('derp', reply_to=mitogen.core.IS_DEAD) + self.assertRaises(mitogen.core.ChannelError, + lambda: m.unpickle()) + + def test_no_throw_dead(self): + m = self.klass.pickled('derp', reply_to=mitogen.core.IS_DEAD) + self.assertEquals('derp', m.unpickle(throw_dead=False)) + + +class UnpickleCompatTest(testlib.TestCase): + # try weird variations of pickles from different Python versions. + klass = mitogen.core.Message + + def check(self, value, encoded, **kwargs): + if isinstance(encoded, mitogen.core.UnicodeType): + encoded = encoded.encode('latin1') + m = self.klass(data=encoded) + m.router = mitogen.master.Router() + try: + return m.unpickle(**kwargs) + finally: + m.router.broker.shutdown() + m.router.broker.join() + + def test_py24_bytes(self): + self.check('test', + ('\x80\x02U\x04testq\x00.')) + + def test_py24_unicode(self): + self.check(u'test', + ('\x80\x02X\x04\x00\x00\x00testq\x00.')) + + def test_py24_int(self): + self.check(123, + ('\x80\x02K{.')) + + def test_py24_long(self): + self.check(17592186044415, + ('\x80\x02\x8a\x06\xff\xff\xff\xff\xff\x0f.')) + + def test_py24_dict(self): + self.check({}, + ('\x80\x02}q\x00.')) + + def test_py24_tuple(self): + self.check((1, 2, u'b'), + ('\x80\x02K\x01K\x02X\x01\x00\x00\x00bq\x00\x87q\x01.')) + + def test_py24_bool(self): + self.check(True, + ('\x80\x02\x88.')) + + def test_py24_list(self): + self.check([1, 2, u'b'], + ('\x80\x02]q\x00(K\x01K\x02X\x01\x00\x00\x00bq\x01e.')) + + def test_py24_blob(self): + self.check(mitogen.core.mitogen.core.Blob(b('bigblob')), + ('\x80\x02cmitogen.core\nBlob\nq\x00U\x07bigblobq\x01\x85q\x02Rq\x03.')) + + def test_py24_secret(self): + self.check(mitogen.core.Secret(u'mypassword'), + ('\x80\x02cmitogen.core\nSecret\nq\x00X\n\x00\x00\x00mypasswordq\x01\x85q\x02Rq\x03.')) + + def test_py24_call_error(self): + self.check(mitogen.core.CallError('big error'), + ('\x80\x02cmitogen.core\n_unpickle_call_error\nq\x00X\t\x00\x00\x00big errorq\x01\x85q\x02R.'), throw=False) + + def test_py24_context(self): + self.check(mitogen.core.Context(1234, None), + ('\x80\x02cmitogen.core\n_unpickle_context\nq\x00M\xd2\x04N\x86q\x01Rq\x02.')) + + def test_py24_sender(self): + self.check(mitogen.core.Sender(mitogen.core.Context(55555, None), 4444), + ('\x80\x02cmitogen.core\n_unpickle_sender\nq\x00M\x03\xd9M\\\x11\x86q\x01Rq\x02.')) + + def test_py27_bytes(self): + self.check(b('test'), + ('\x80\x02U\x04testq\x01.')) + + def test_py27_unicode(self): + self.check(u'test', + ('\x80\x02X\x04\x00\x00\x00testq\x01.')) + + def test_py27_int(self): + self.check(123, + ('\x80\x02K{.')) + + def test_py27_long(self): + self.check(17592186044415, + ('\x80\x02\x8a\x06\xff\xff\xff\xff\xff\x0f.')) + + def test_py27_dict(self): + self.check({}, + ('\x80\x02}q\x01.')) + + def test_py27_tuple(self): + self.check((1, 2, u'b'), + ('\x80\x02K\x01K\x02X\x01\x00\x00\x00b\x87q\x01.')) + + def test_py27_bool(self): + self.check(True, + ('\x80\x02\x88.')) + + def test_py27_list(self): + self.check([1, 2, u'b'], + ('\x80\x02]q\x01(K\x01K\x02X\x01\x00\x00\x00be.')) + + def test_py27_blob(self): + self.check(mitogen.core.mitogen.core.Blob(b('bigblob')), + ('\x80\x02cmitogen.core\nBlob\nq\x01U\x07bigblob\x85Rq\x02.')) + + def test_py27_secret(self): + self.check(mitogen.core.Secret(u'mypassword'), + ('\x80\x02cmitogen.core\nSecret\nq\x01X\n\x00\x00\x00mypassword\x85Rq\x02.')) + + def test_py27_call_error(self): + self.check(mitogen.core.CallError(u'big error',), + ('\x80\x02cmitogen.core\n_unpickle_call_error\nq\x01X\t\x00\x00\x00big errorq\x02\x85Rq\x03.'), throw=False) + + def test_py27_context(self): + self.check(mitogen.core.Context(1234, None), + ('\x80\x02cmitogen.core\n_unpickle_context\nq\x01M\xd2\x04N\x86Rq\x02.')) + + def test_py27_sender(self): + self.check(mitogen.core.Sender(mitogen.core.Context(55555, None), 4444), + ('\x80\x02cmitogen.core\n_unpickle_sender\nq\x01M\x03\xd9M\\\x11\x86Rq\x02.')) + + def test_py36_bytes(self): + self.check(b('test'), + ('\x80\x02c_codecs\nencode\nq\x00X\x04\x00\x00\x00testq\x01X\x06\x00\x00\x00latin1q\x02\x86q\x03Rq\x04.')) + + def test_py36_unicode(self): + self.check('test', + ('\x80\x02X\x04\x00\x00\x00testq\x00.')) + + def test_py36_int(self): + self.check(123, + ('\x80\x02K{.')) + + def test_py36_long(self): + self.check(17592186044415, + ('\x80\x02\x8a\x06\xff\xff\xff\xff\xff\x0f.')) + + def test_py36_dict(self): + self.check({}, + ('\x80\x02}q\x00.')) + + def test_py36_tuple(self): + self.check((1, 2, u'b'), + ('\x80\x02K\x01K\x02X\x01\x00\x00\x00bq\x00\x87q\x01.')) + + def test_py36_bool(self): + self.check(True, + ('\x80\x02\x88.')) + + def test_py36_list(self): + self.check([1, 2, u'b'], + ('\x80\x02]q\x00(K\x01K\x02X\x01\x00\x00\x00bq\x01e.')) + + def test_py36_blob(self): + self.check(mitogen.core.mitogen.core.Blob(b('bigblob')), + ('\x80\x02cmitogen.core\nBlob\nq\x00c_codecs\nencode\nq\x01X\x07\x00\x00\x00bigblobq\x02X\x06\x00\x00\x00latin1q\x03\x86q\x04Rq\x05\x85q\x06Rq\x07.')) + + def test_py36_secret(self): + self.check(mitogen.core.Secret('mypassword'), + ('\x80\x02cmitogen.core\nSecret\nq\x00X\n\x00\x00\x00mypasswordq\x01\x85q\x02Rq\x03.')) + + def test_py36_call_error(self): + self.check(mitogen.core.CallError('big error'), + ('\x80\x02cmitogen.core\n_unpickle_call_error\nq\x00X\t\x00\x00\x00big errorq\x01\x85q\x02Rq\x03.'), throw=False) + + def test_py36_context(self): + self.check(mitogen.core.Context(1234, None), + ('\x80\x02cmitogen.core\n_unpickle_context\nq\x00M\xd2\x04N\x86q\x01Rq\x02.')) + + def test_py36_sender(self): + self.check(mitogen.core.Sender(mitogen.core.Context(55555, None), 4444), + ('\x80\x02cmitogen.core\n_unpickle_sender\nq\x00M\x03\xd9M\\\x11\x86q\x01Rq\x02.')) + + +class ReprTest(testlib.TestCase): + klass = mitogen.core.Message + + def test_repr(self): + # doesn't crash + repr(self.klass.pickled('test')) + + +if __name__ == '__main__': + unittest2.main() From bbf0b22493c80af08f1220c68cc3785159815f8b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 20:01:54 +0000 Subject: [PATCH 093/383] Import minimal jail_test. --- tests/data/stubs/stub-jexec.py | 17 +++++++++++++++++ tests/jail_test.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100755 tests/data/stubs/stub-jexec.py create mode 100644 tests/jail_test.py diff --git a/tests/data/stubs/stub-jexec.py b/tests/data/stubs/stub-jexec.py new file mode 100755 index 00000000..22028cf7 --- /dev/null +++ b/tests/data/stubs/stub-jexec.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +import json +import os +import subprocess +import sys + +os.environ['ORIGINAL_ARGV'] = json.dumps(sys.argv) +os.environ['THIS_IS_STUB_JEXEC'] = '1' + +# This must be a child process and not exec() since Mitogen replaces its stderr +# descriptor, causing the last user of the slave PTY to close it, resulting in +# the master side indicating EIO. +print sys.argv +woeifj +subprocess.check_call(sys.argv[sys.argv.index('somejail') + 1:]) +os._exit(0) diff --git a/tests/jail_test.py b/tests/jail_test.py new file mode 100644 index 00000000..0a0a21f8 --- /dev/null +++ b/tests/jail_test.py @@ -0,0 +1,31 @@ + +import os + +import mitogen +import mitogen.parent + +import unittest2 + +import testlib + + +class ConstructorTest(testlib.RouterMixin, testlib.TestCase): + jexec_path = testlib.data_path('stubs/stub-jexec.py') + + def test_okay(self): + context = self.router.jail( + jexec_path=self.jexec_path, + container='somejail', + ) + argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV')) + self.assertEquals(argv[:4], [ + self.jexec_path, + '-u', + 'someuser', + '--', + ]) + self.assertEquals('1', context.call(os.getenv, 'THIS_IS_STUB_jail')) + + +if __name__ == '__main__': + unittest2.main() From de9a8b2a80fe4cb78d231cf0559a4b2771b47ce7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 20:02:41 +0000 Subject: [PATCH 094/383] tests: add setns_test that works if password localhost sudo works. --- tests/setns_test.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 tests/setns_test.py diff --git a/tests/setns_test.py b/tests/setns_test.py new file mode 100644 index 00000000..b8a75788 --- /dev/null +++ b/tests/setns_test.py @@ -0,0 +1,38 @@ + +import os +import socket + +import mitogen +import mitogen.parent + +import unittest2 + +import testlib + + +class DockerTest(testlib.DockerMixin, testlib.TestCase): + def test_okay(self): + # Magic calls must happen as root. + try: + root = self.router.sudo() + except mitogen.core.StreamError: + raise unittest2.SkipTest("requires sudo to localhost root") + + via_ssh = self.docker_ssh( + username='mitogen__has_sudo', + password='has_sudo_password', + ) + + via_setns = self.router.setns( + kind='docker', + container=self.dockerized_ssh.container_name, + via=root, + ) + + self.assertEquals( + via_ssh.call(socket.gethostname), + via_setns.call(socket.gethostname), + ) + +if __name__ == '__main__': + unittest2.main() From d8dc5420ceba82abe8d6a926d3e288fa230a67ac Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 20:40:51 +0000 Subject: [PATCH 095/383] tests: install OpenBSD doas port in Debian image. To allow fancy new improved doas_test. --- tests/data/docker/README.md | 7 +++++++ tests/data/docker/doas-debian.tar.gz | Bin 0 -> 15371 bytes tests/image_prep/_container_setup.yml | 19 +++++++++++++++++++ 3 files changed, 26 insertions(+) create mode 100644 tests/data/docker/README.md create mode 100644 tests/data/docker/doas-debian.tar.gz diff --git a/tests/data/docker/README.md b/tests/data/docker/README.md new file mode 100644 index 00000000..d3d37d52 --- /dev/null +++ b/tests/data/docker/README.md @@ -0,0 +1,7 @@ + + +# doas-debian.tar.gz + +A dynamically linked copy of the OpenBSD ``doas`` tool for Debian, port is from +https://github.com/multiplexd/doas (the slicer69 port is broken, it reads the +password from stdin). diff --git a/tests/data/docker/doas-debian.tar.gz b/tests/data/docker/doas-debian.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..9211fff25ca6e8a5edb6dac0eac5a4a1188b9bb6 GIT binary patch literal 15371 zcma)j1ydYd*DdZIT$AALE(5_G9^5T>a2q^8@WI_(g1fs68r&TQcX#=C?=QGjU8lPD z+Gn4&R@bTSuIfz@i3Bz57pxBjdzy|8R5MQ=+PsBD39Ho34EzfH3Wa+}%WT{@C1qbb zAEBGs1ie3=hV(uq9ltnEAuqdl^|Ac%CW&pz#2JH!x*7`;jtqNw~X&okO36<;nH{~ezMr<$MrF!OG)lj( zS0AH&(*1^0ZN1XC&)G~4@hU$V$D@`iI3x~$lg z?}1&ncdvFDe}CO`o&6+xG}Xpm=yN0V+(PT+kL+kN@g2V8h>T!15s+*o_y>6DFd{jj;T7Sr6m4ab4umaXYeEwV_kLKjhGP zrN11PgsSvU_&g`}W8sMXBP054Ou#1~I;Y$Xm^L9K%oMWUy>L};NK39U^zK6T-lEhAO(vo{ zG)b4$u^*l{v?@gguynoFEi*H!GV?Ep_KhS=-zmV*MI}P3asBg!^q+Fw+Y+udYqs3(r0si9y6_L_1_-_ohlr3{!j_GN#xXnxbiJ*?-aJ7g^)?ay@*CI<|SSBoj%OKaGRMC@GCqT^su_CXE$Nx{^Wt){T(} z^Lw>=$Li&dd>R>@hAkQ?V|AnO(0m5bQr-sj8eF;>2f8tphQL!tasz`B6#0aHJ4s8P zc11f^ej%l1M=y0=%^5g{jzdW|E^g$T7Chfl`N&JIAxV%{}=QPiutIfRFMPAMyW-d(uN$f zK{p7UxF-OZ)L$v}GFaDS$W`ICocochdd!r_vGB+Io9>tmC*3^4!WQz2MkANa8T5AJ zTpG!*`!<@@eoYjLHhfcSF}>z_WXe+|PHv!h)}X0d`!>6mb@2qOIHEwWNpaNdzCeb7 z=ocN{mo_tp{a2(lrVS7i^IM~S> z{$Rc7fBQ>eS9LvOpv2k;fUwMgH<##eD;uh1X-aEirR96Jt=L$Ql)*Z~@AL8i4sB&QZ^Gxy3z+fJvdQ=ZY~N&2`MI8`hW-!ZcvP|4-tft z$f);`4y?uQM+8v;*|SF;5y16?7sjtBU|!CN$(SsV(a0k{xSnVe{HP6X z=JdmZ7=ZjkugJkJgcs&Mq>xZTib)^^M2nOBrwun$N8aUr@e z@gawFf+j6ru^_yh8$&>Ph#1(xL}>i6^3yz4GyPwn8+bHJwyxMKCh!>1q)A8Mr&~1t zqeb#bi`BH_k5lj!2bhsai_mBshz@Dy>;hfc{ErplC#&uRC)!U3o7wybAa)>D6CYTJ z2j@mFkQtIe)E#=I1pc%H72*$W*&!QNc|H1~QJ}KYK9|^QIQXIo&M>W^qnO%q3%(R| z`HyHV5A0wjS?Yi#viMIlO3%MEC>bDGy_Y zzSOB4voTKO1R-iWB(pH409+~D5O}LP*HRQ}BSRq-rRP}+x|J??20jCn`(`M4~-Q6>2-#;KxQ{3@#~CLH{Ukl(3~=G+p_Eq*3T=XXV4 z(`E%edwqs<2o6a;7aLMlE_jutkQC0kru8MfL;FOeOC1V7LlVAU_LQH=<~Tn~zNk#* zzsg>S3TIpcl;2_mQa+iX91DFWqn6%$;(#9pqE87MVQsOW0O9fXPvAjy@>3-M)RuZp zd-lWN$ESR(JN-{UZtmuD-G{tv<);M0z-Nokkm+2}r=s9ehtC<4;=YV~{?X4AZbYA2 z5su+M5&BYsPx?azEG4h9F=7T{PYLAVD-ur$N@BhwpBzuca|u-6Kkty{XJKhot3`<&l?8?Y==CS%@n$G%Mz0;x%~A}?PBtW9=qs_ zEqm-6-Po#(FzI&Q3QUKcxaP7JN{Ec)D@VK{_>nQo-a<@Oh6+VHk>4bMzhL`({4&9ss zq!s~I1K-~m#JyaGG^-8*fKAuk|5$*#lUXm_(p_65(uVuCWIejRjbcW3LmDD#4oTH* zh@+ydb)fOZ!GAYGn36>}rR;RjNF&WgSH6V$NbenSVv{n?M125KkBkX5tAuY_=Jhzk z_9d}ju?DA3CIde##{80gEe=}`6JkpQV9S?l24!EvoVabuyc&fAFDvW^-YAwEr5t3EsDa3yyM1KnA zH68XTW4yUzVUCnW6tEt+(1mB-@~$NJV|;hVtvMwtPfpk#;pB^(F&dDZ$a(M3KM{J` zFtB2o!>;K&N1T1%cD%7=f^15V`@C3t<`D*NM)hc?P;)E-#1%8U;0(mz&9;cfo1pGs zafAL|(f{I%ZDl7$7h&-t&g4wtm8x0f0lt9se=f&cd2ZqAzRycqR7uop^)NkdE8?!6 z{NrdkskQP_OTg|YqF)@twTg{?_D`CKci}@ zpeNKb(KQM5)Qdry92Qo807wjPsc1kYBOh1cP^*w#Eq0-r6%ZT%KvD4ANnM5q>sX$wttTn;(V9$beZTICJzOPay+XQA95MP*j$v|sUcj`SW9rD9N;cwd}^56xIQ||)yX;w z6zA<|W(*L;_gs<4hB*Tc{Jp*FnW=&U-}lg4cw{iK^tU*Ew}8)(&_?c9kRDVtdG*&b zMfBs2aIL=HvjHca2r=1Wz@Ga9`QW@c4DQpvQRqd=%G0>l8`*3<47LBI zp9^h{mCR}zp=}$ZwHIjkd!a^4Xb$BM2)zGTP>s_bvK*qXg;UPx`EvSYZl5#P0CC>- zZO;7iH1x6$>kBij)we%_FX4}b{;s)9kzlb&c49<>AuBkET_iqwf^ql4c1v4>TVW7P ze0s6)Z$2XfCJLP`IFxq7{bx1f6YeaoKL|CkDIKWFnwJuUU~JzzvgagM=9A#JJ>1oq z-s`iLy)ozckji;ix?4JQfmDfr{QoK!bZGyUo=VT>Kf>bTyG!MEJ`j>H7UT`s#|?r< zd#fFX$pelAN|hcmq7yOt{dkT!nhFXJ?KxJQ|h*+g{%9nP+5ZIQEt%Ej3a&Zis%YT-@j5|P4UU{ha z@;ss8J9A)PIS%Tk3mU87~=FlD#vTt&NbtRE|Av;{U3r3bfMlyB;$jZRck6Z zu##r!-`=*=|CWyJ)&)|RH(a0}iYql88+4Xe5laHy8Is?ba*yiR&>yBZ7Gi=0CXL6F&kQ@1j<* zhbq`wH(8lrc6iSI&Z-{p#mkgA5ha^AH*Wzs&JD#47p+VwoitmBziYJEr2OUKj9q5MsE!G7Y}QZQ-xL&k8;Ux!wixDy`f6;D|53YKYhCD(<{ABs+a#KGWc0xBlL+Ez#4RZ2bj#bH82DcW#6cCmS(> zyjzh42i@F}nT!-ee%N$vaXC-T3tYdhGGPfuweZfvp%coP@%^dhuZzgccr>XEGDWn< z=CG%8Ge{i-)tl89m%+$|{37EE{ecx=?s!tY4x{(2F~ORajQ_ms{>)%2@^?vt+Vq#u z64DSr2tkGf;x0zxd}59G+e=ES7#Hz^_xxZY%Y12;o)BtBW$2%Xg7SZf#y)#c=2)Cc z{k)ufR?;WM;MS~s`g9|u&N~GCKudxt8wJ>=@6+awk*V@$SyEYuuz9wjzqo~1qk?dH zw^W}(c(5r(aYL!So<|5={~TBMTsfH=AH>8m&ct*l#*uI{1YIaFkrK<$ooekCF=Z?e ze@y4;!{Hdb!s$O+)88TD>g})aUnP4s3-!8h{MJu1 zzLerU+&Ymy%H1YH*Aw@IKTGU$2-*3T${oQ1PTp`v4e zDZ0`hFN@!ont60CQD6{JlvBIPFBb`qeg{;qJG1FSM-^u6`BYwx9v-2SDa}vFKjJ}= z|NdL3vY+ui?JQ^P;f>AL5?hdK8x6VdS3ru)BvZ!)%NX`bEu$Yvh+dF;P0IKZV&jIq@it)tBo(}vRS&}DiC6(FU!)C(ZTA!cf*^E0-o745r zv^0U~pm)`4gGh3DBkdSXMLRalB2ZllMvgl7+DXY7!BqoYPEEdV3{|&ESO|KIO}{SL zwtNv@bO~N@L*IyXa5qb3%|gSrm0_MTdieIczrd4!sW<*%X#`Vyh7c0;bssGP4Cr4D zk_=S8bePnTyLVE{^zx~frqi3Ci#y?ZBgv(x!qPdu*XjaY1{qj2)Xh>#Q<8(>Tdru3 z3?kB0uBrM!OQ@6KYH6jEHT1}avPP$JkDGsXL1;qN!?8=b{kqY*_etw0bMxHZni(yN z!s~s{;6uw`{P_EU_i3R4YM}Sj`9sTyR#^ns)u2aYYf_!hf0-dxW<1F38T2RGz8Y z@@d-g*b4)wJ7#_9d%^Iw_&mx^?B;o*Lki*lp-K`Zqo9KPC?3Z7R93#i{|7z#M6}WTLy5D*0{~c&b($oL&AHU9%Y)0~rAYLA2kRZ_6 zN@BW^yVvjdP8^>pR5BkY+&*s-x4RjjQ>SDxvNC0Zy@-!y>1}$qsy!HDPg3wIS}M&U zXMuKVKyqO~G7A>fQS`E8NBBNYgON0t4P=`MLIz|l)X=^<)BXIKqT^mjl2(Xg+8-oY zN3FSGTM2r`A>X4W!!nc)q;PjwykH| zMJXO8kEXMfEc2@evRSsfT&;4B`vKPZL?HQ}7Ic>(5aUKILO=)~S^waLV;*>o27`IQT^i*@vh0lR+w5Y z&%e_izDA?!vHPQ2Heav>SbA23W)Mlyo;K%&=H!>?;=`K@zE0!*UEFgcTQ^=iX8f*? z3DQ%%YETLgTP<^E}E^0k8)>02GRFs0uCr?Y~ex+V2U zlIM#07N21v17~heZ;6j*;(J3e@QhfDpR)@e4zEnU6~^T!Y(j&(pR7t)84I7H(%Srh ztf=NSG@X3!Tz^b$-I*hNF&01PFC6I96K)=DDR1%W1gTB&KrDoEJgTUwpoNYGoB!oq?`GrW#0_WUCzHc_Y89`%;6Nq%DCU_b8^)GR*w~PW1fn(e=y@xDLVEtNdqNHSI8MO>U=~#s2moT_W>?)k{s% zCI~*!BW2^Hmq)SsC!40vFWY6KHhm6}kGXFgKeo6<*Bp_)*tV&6YfQ09t*fkjwL-gQ z&DcS^_V-btfv7fz-@*I>_?w7k~<)2zNHTq)o$a zkt~0+A7FZzp;LoZKgnMXBtPVK!YIj4G#O_^l;7oZd=NADfT!5*JlJa{-3>NNHCO>%O9hhjJt}EOlWz5Kczyi1fe*Dz#*PqU@h}p$RF2RyUi1R#@FqjA;&6`mO`ZMWQ(g zoncmUb?zs%eYk~%JG|ZtEwoQ@i6v9)t0im9#q@KXDmJ}A2mEbp^ln&Pshh31GkWP{ zmxrL}a$BbKT*gCX7r5ai$@Jq7{x^nBW|H!tMy$%d#8^LeF5+?#XXfE*+xDQ47}LT` z0Eb$Y1U+{Ec9FWa{(`urbA;Ca9+zx?6xw(TLV^P6Y?z zWc&jp76lPq!~(*`*cubBJ<9$J9iF7G<BNsXV}3-FbwIUNHLk>c^`MYNT{uWC-mKC z$sqo%$zz1^_Ory}J}nEbW7iM)g8M&uY>BUQ$yV5lfT%my!F`XywafLgw?b1wBLv9F zc`b?&g3v}m$XoD>-qn0f=2dXkewmUBpLJ!#z3U=RN1)ljYAwV!zp`ot!*QJC`QN)b zQv8X`sJ@j?jTTBg1LR-lf3ZN0SmtAAzoh364vOjQWQTTG|alpU0JHdu^rW@HO zgPJk8>( zFVZ${o(<<=fM|As%!qBm*f`$xW34bw zlsQ=3n~L~@hBI@o_6HGhFN_hV=iq3>Q9uPHr~=lz;(BR)X_!OD$&=IN>27?ZV7ZE_ zzIS4S`elzVq7z_Y?adiyOORk|n)rQ@1-RC=C@?NXbnhaC$CIDAoyW%|n+$!KJG<;%?nlwEX=1Mx z37xOfVEl(uT>2>}jmT?FJOFiq$oXPl<8*`AXE3eKIX}H(FU!zhWE)%OrvIb)ZF8q#NRVWh(b?p%39FmtuXx(zp^ghVHxCjiU3>TzijCdwwIx`g z-5V)pMY~|NYZF|2{_S$vfr|jo#9~%%TRCVI=}#M7K}J2hEI|b+3h<7bNBu$;)g3e?#R}gKe7ici?p#X_h93PZ0ZlwBu)+t<0ww zoBFlxR&)Ind}wz99rG?WtHu1OOga!eI4he3)aGV!gdb%l6X>S8&^8)ras-PH z(N*y8o}|hg^k`pwNdo?W5nz4iPEzs1t=|9MWw*1VLMJjR>F~@(s7!NiPgqGqx1SsO zQHbq6$kg4`BophJ28X!0n#m=uzxa(MTq1)wHcmBHGDFiDdy~i6iH^LlG>p7rIK7~u zC3adck)*V`g=R7?^5u|Fp2Z%HoQ}Z5w$)XgP_Y_2hZ`n`+ZnuaR$!+DVsuB*QW@6% zvE3|%t=w~vF61Q!xCKwfRZDG`_Iq%XHrnFmQTcFB`%v#-s?S@2g>8Oz@@l?L>r-_O zvEH}spCWzw6=MjJAUw^P>r?4N%}DD@Z9*G8mrzI52UYqzvCkM~0E*x6y-<0v$uu{l z0?6lgE{Sa@dMF6d2n+$EO_$|HgTa_BS-G3Y{#V8t9Z-TbSJ)Ig`?6j$7oKdjB)m$0 zm<6|F$fN$mjQ)vYSZYQ(*L#Ma$MyUsYp1RApD_L`rssslm&>p)gs*Y6F|p6ZOf^WI zD25=0A6dF|CEE;)M%Sb8N@wV>U7PYDG6M#+KLSO4ufB#Eyc@Xjqu*xvO{_>(pl}v@ zg6)Su9;N9*(*JZdAWl`Ac6!~qN%DbuNa#)bjNg^cM88L~K`t@9l&f}c@45_EyEfX_ zBk2CS)&LfaJQ$BZ7}ITkt5coVobhyK!zVKibM^D@LYJ6!pZ~7;lG_Fc{uTXI_T1@aQSKmY7$2ijs=W?u*Q**(!L!NVyENfKVdd_lO^9+?tEd3l4=fX$Gb&diG1H_BMvV~VyQQa8#b!fDq#?C%IqK6q`#!6sxR>=JZY?-w{(kH)eY*&S4NsCJF@M~M?^2d%k zXbqyAAJfaEY&ov}_QkV`wLDZyFMyf23M&l5Mt66;h6p?c6r*dvvSMnOy41GFe3#`l z9H}XNCSWKmbEr}&^D_wk?`~3^PUC}%pz{}N;YV1r;6P`lf)3@kf+!4RO1c1m;nS02 zs}4nM)Ov_F=`2{C&0L>oHbhgKuD~d`5cCj|z0MT4eybj~{`%$!VwobVzGw?dLkUMg z;H+<;eaGLak?(fgiMb|h&cL@$X8KRyvBeO~-;k!nMj+Tr_q%S}>pAaBHGo(eF!oBv zZ@4cF0mh@#!WedrFXpCb@E)5t+TPZ||MOZVL+<=#BFBpAer$eNHJhCk-Llz5)E)M{ z+6iswuVFKKHPb{$+_kLPM9_cy^Dio48gBI&kv-C{uC$u%$cT}=|B6faloNkGhExKP zEmzV0JD92oB-Z@J&~^#Bx-$$+hgcXoUkaLj^%9f;tiuwja+tr?8gZ!a`~4txXc5 z`Y#cGw6TQJ=lqI7fZ7;WzX|(RQPXf~AoKlNtxhN#&-BJDYcL#Tu{0rmvA65$^IDiO zB|ielrBH=^g@xA%TcF?PQvNs>++BL!+JMNx{Wf0s!p!siIk{pv<+J_%1B-?N>9_a^ zV!B<S;|!RV#5 z&F_6}onDEs#-VhZg_D9&k!fxv{EAk#&aGH;F_PyzwK#Qx!gl9Kg3Jr=(P?vy%Owk- z3_+gA>qsFw8rqHIIA%!}Ps0u9#z}~@HcIxs`k}ucA?2xqZ7ryq5jk(6<;*}pgB}I- zn-RCixdX9@>T=0mrW<52(wJjrh`$ zY{xT{0~v#lOxtOiiimbYY`mrYlcc2O7!YP70V@ z!R3cd+^-ty^W%FB{^%a7my(+jvA9u_1Yr=R*uuLWa$49!%F=0bh$6>SDSU~SK_y)m z=U&wDO0qu##L^x0DGdR}l$ALyOHgS=%_W(lnjYq;YcEN;y*)}$k4-f0IfIz=j@?j(>p>2B6F$&5!u^t_j|imM9kvRq z%jOii`GwPMv(v^HhZJ5`HwJag{U;cg@Y_0!cB%v#t zgR5k*w_8hB;zc;TG!;>XP21S40Yq%Xq`}VBtNeYeyQ^{LtvB!Mt4=u0oN#1@QBjcI z@j=2os_T5{F&BTuS1uh$RDeA2ohz+y`WLcLiEhoRvr91$M$VHGR>@O0&8=lxo9zYl7Blx0vDT2?r;kdsS2{9%(%w=89)p zQg@@E)PWk;bMZ1l_fHga8{Q}aK^xy7FUM3Ay~yac?LOU)xkZM9h!mce9qeWo%1pPn zs}+P)xJvwDr*8_d7?Dm!{T8pL$tzR+tWza?KkT07Lia5lu5g;d#)qn9L@60W1z&eE z0Exr2{`-+u-QPUJE7PR*v6{nqTPIS5^V^%Y5syI18Gjj^Rx~9o-*{V40O1yY`^*V5 zT1M|0D^a?EmO>*MhA+QeJ4FUG&`W-4Jdz1PO7!PFXEcLz8aT^Z%SH_~6mI)8Y)@xJ z>$v68v2|z~qlPf0cC`~f+H{6vfYDuv@er)BC5vln!rL=UIhSwv-~1iqM7F8D-sMo2 zujqww8>hJaTnj}VuFlZi7OHR0xLGJ!f?dSU3_JH?H_<=-bNo5QO9Rd;ipZghGq_Rd zG|}P`iU)s6??gL|I6 zfD(4bW<)k4*f{Vfhy16yiJiahbK7G44vo4U5%@iSlXEgb)Qp}sOk3ts<6mLnHeHji z(2DGtt8;=r9g*=UDyfj*B;id58aMtxLnwGQ<>x&lKt)f4e9sFGS#zKVu2mEJ7Ad~G z?5k;351vd#S)fc~EMvz4xkQvvRRKz&iG~=i&mYEE1r2!YqjLQKi86*TiJa{S23vUS zLrVQxFiOy{%ZNb3uu?k=d_`GEmDaLF8e=Am`y*dnGn60%6uP+03&(aIW(50eEIf!T z4vtlssBgiP(P`E|X^!ohpGnC&-H$;>V_N`7H&EP=g?q(PwZZzrTd(Gi=PsX7tVGx< zn#qn+{+7~1VPK^DDi&e1fhj`6Ud9Rr`@U;hmh+LV7^C^IMy}e2)thI?jAHF~rUMQ4<}0qCpy*im1?qpJ#s) z7e~XA6n3advUg%V^Uf~AV2pU{ed&Lji6w$7 zH#%qBD5zp1tdMKCsD}W}8;Va%xH$eF1Fg8=*9jPgT0^KL%9UzP|JBjQYkf(JiZ4w7 zKfJ_pPYgJ26}sX;dD(AzYSvsv0!Ng^b27HO0Rz7P?h2egTd2=YfBlwyxAfMcAeHn4 zZ?;pVV8)NpV|@o7jS`BKQGHD}olB;K3;F^;zye^WtkLkdu4Q?9f2$31RhP>QY#LXo zoQ^9CYix{mD_v-8I^i=vF!W#z6wA096I58Pjv71(lzL86>d^p6}a6JZf!0&P%LXx=J%U!Y{7APoqA8n?ny0BK^QZ(1X8l+%mJ?? zE9Y+}@6EVJDR>13(kT)zyee9`Q*E?-q2(yY1rK1nurZYxpi5)`uRQDMp<8;0(YyZh@vi_ zmn}yhf1MWcw!Di&@cH5Sdg=gFZnW*@gq{QjJ zNBTWwHs=3Dsv+83z;5+RMfQbH8$BVgs1T@ z9B>5#8P&WhRI%ww4_EM(TcXQWr|H^C7!o6;iheYdA9^aN)=QNz;SPGr#TZ!!C#s&p z@ zaY@UaFh9)|c}jDTw<@!LSeB`*`A%9XV~SB3Ngu66l0odWTpzWbuc)ZRj@wY;_&ZmU z`Cnyaz9HvI#c-dSCLfukQjk8vy#g1x5J{`&7)^U4@JA+z!P@r`sR*w!m1K@09?oD> z+(EU7Y|Uw9+hG*Rxc}G^xkf##2#bAtO|?xzHf*`&r=z$Sog>><6nM}z2~f%estZAq zHR;YN4kzjfWL#Nth&253_9^^{A}3RV?eb)JYIb0*1#M3(COrei{e2W!CUKvplfNwK z?CE@e3(M!TM`_IR7b12@`p54{9cy8bSpM#ik##nNwQLPQq%x$%64r0=8 z$hc{gl!JW8QHjYhpu{L#DY}gNSIK}G9KO|^KFVlPHN$VC11+Pq->J&JZ;xJmVY(i+ zyfx9VM3|n7lhy#0Hx>AU5KyG0BY$j<4r9ZcNe|&t7>MLq_{q|btG4OPi3h0vyl$CXK)B#PC!bqX{ollpX zAYk#bG8%O)3b(ldAFG61Bz!n)OL8)^gMq4<7 zY^fqmWIG*_#-kCVYQ#^QC09bfT;x_{ywQ-Y#GS!zY?)zvoUq)G080?3o9F(Yep{yI z_2Mb_KVeq0FIN%PJnhi0Orycmy|T?@XPD;Yjm~Q~Ny{{7wDpH;-UyU)SEh?Xd2o z3F)f}38wdk&Y{*5ur$9fL>K)_tYXZlt*!6fFrCU6yGKJW6xp$;_riQOuulfW@u&Ds%I#lEq01P%#J_Vjb-WCn#IFo7yZGEf(B>Y?-aY>z#NK*g@>kk}XW(48>WZi-W1XGeSxA_XIr! z7YxZGBt0u|Cp?!pkVf z6mIsnZEWuVk|B$~A6zOfT3OzY_}{IE{eh@en`hS>ffNCP4cBa@9NQ@Li6n@W-v{lAYy8P|n_d%qbXeRgv>3eG{mpe7~!DSnYh| zeZ5HWOuQ7-uK&F1;#CL8=oVFWx4zD)jd4Ai961^>G=O_!W_c@qy&F=v2zXa7=@JBV z|J=M6_s=?ct{gD{)OjED`uQG~eE^=W!~y8{zd*Kr;;m2Ag8UfwOnW;<*%W|VyQvsf z$7@RXcxCrHn5E8embb^u?9;O!OT8rqcNgO^6ekwA&j|*Qj5wo@sTkh{u1I~axX*bx zLMN>skf$d~Z}Px5;fcRS$%_eDtU-Qet@ZEddv$(wA6h0Cjy>}qF*lzoP;0AG_jEtp zp6+5^2Trtes@{iO-e>{W!vKYwjsCUGU8D4E#HO~DZp!ZWJb#2|xsrEUWBk z;%Bd<{C@TQ(8%e?=u^bA&13677GHnAH*tT?+j4`O&5UOMDgW1rydhY%l_^rq1%e55@uO8N77r(I4{B7cng_%#6vkCFU*X76iE&D0ALuPDC#2=%VVkhj?^~K! z`qz@|^aX+swgU+gA`(5YV{nG>9lGH`eu5FePat{1RibvHMGkw@wZ4`pCq&3shzQsO z9NTxssbulXICHY)Pi_h~P&6ISGY{ zPvU3G!z^f$9fPB5?q={zw$(Z*3jUY2!=7vGtcP>k=it&7Hv8Z#FmUw3Z!mte>C*OR zHpaOvaaNGu0DQ!mo=%+f_oiXbiTf|t+`qC6cHjL|h}p`uS{-M}GSO%_r-h#bc!ZPW zPwAMe1=_kwN@N?K@S##<+t1Od<9rta1>QLfB{!;ix3T&wiC|^tu5|=VR1%zgb{eWEgTIKM^jZo!tJ2}Ap=-=pXN|46 zHOlKP5Y67uXS6BIyEQIa4Bw2cx|RcMv^o@38r|jH&Br*2RHgly=Hs(9uJ3|hzbJM# zJk$r-S6n4x0%vwZBFJ6`#dsOAuYKaq%a0DbZ|iY#E^h&c zy^+CZ{pPyQPT?F_zry%8u8det`Tsq=gpurt^a=Vqzare2pSriQ#?d{nOO9V%YoU58 z?TW~qL#RgIqH!g?=NCMiR5Z^XrW#Z$iY* z4n~JBt;(B??^Ap7LNeV;T1Q)BzOU!i!~W4PtWT_LAHl5K@^&o4&9Q9|(fN5c%hivQXA;>0y!&zk~ik@yk%5*Ru}H<#*2Wl7nZ)?DyA< zlhG)D$_M{~tc_>tSIidQ3qET9HrfxBO+;auh4{%d|1CD3&)xq2w*3Fcu(z7>0YgUr K;14wn1@(W&x0d1n literal 0 HcmV?d00001 diff --git a/tests/image_prep/_container_setup.yml b/tests/image_prep/_container_setup.yml index dc0bbf53..f2a5c5ff 100644 --- a/tests/image_prep/_container_setup.yml +++ b/tests/image_prep/_container_setup.yml @@ -79,6 +79,25 @@ - shell: locale-gen when: distro == "Debian" + - unarchive: + dest: / + src: ../data/docker/doas-debian.tar.gz + when: distro == "Debian" + + - file: + path: /usr/local/bin/doas + mode: 'u=rwxs,go=rx' + owner: root + group: root + when: distro == "Debian" + + - copy: + dest: /etc/doas.conf + content: | + permit :mitogen__group + permit :root + when: distro == "Debian" + # Vanilla Ansible needs simplejson on CentOS 5. - shell: mkdir -p /usr/lib/python2.4/site-packages/simplejson/ when: distro == "CentOS" and ver == "5" From eff81795911b4af3e7b906cbfeede8c832b64693 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 21:23:10 +0000 Subject: [PATCH 096/383] tests: add real test for doas. --- tests/doas_test.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/tests/doas_test.py b/tests/doas_test.py index 0e27c2ab..43284367 100644 --- a/tests/doas_test.py +++ b/tests/doas_test.py @@ -1,7 +1,9 @@ +import getpass import os import mitogen +import mitogen.doas import mitogen.parent import unittest2 @@ -27,5 +29,38 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): self.assertEquals('1', context.call(os.getenv, 'THIS_IS_STUB_DOAS')) +class DoasTest(testlib.DockerMixin, testlib.TestCase): + # Only mitogen/debian-test has doas. + mitogen_test_distro = 'debian' + + def test_password_required(self): + ssh = self.docker_ssh( + username='mitogen__has_sudo', + password='has_sudo_password', + ) + e = self.assertRaises(mitogen.core.StreamError, + lambda: self.router.doas(via=ssh) + ) + self.assertTrue(mitogen.doas.password_required_msg in str(e)) + + def test_password_incorrect(self): + ssh = self.docker_ssh( + username='mitogen__has_sudo', + password='has_sudo_password', + ) + e = self.assertRaises(mitogen.core.StreamError, + lambda: self.router.doas(via=ssh, password='x') + ) + self.assertTrue(mitogen.doas.password_incorrect_msg in str(e)) + + def test_password_okay(self): + ssh = self.docker_ssh( + username='mitogen__has_sudo', + password='has_sudo_password', + ) + context = self.router.su(via=ssh, password='rootpassword') + self.assertEquals('root', context.call(getpass.getuser)) + + if __name__ == '__main__': unittest2.main() From bb0c98f4514303b179e5d2473caee146a6f3abc7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 21:24:45 +0000 Subject: [PATCH 097/383] tests: note location of related tests. --- tests/types_test.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/types_test.py b/tests/types_test.py index 8f120931..8e441c65 100644 --- a/tests/types_test.py +++ b/tests/types_test.py @@ -16,6 +16,11 @@ from mitogen.core import b import testlib +#### +#### see also message_test.py / PickledTest +#### + + class BlobTest(testlib.TestCase): klass = mitogen.core.Blob From 12569ad70a9f97a99bfd5ec81153f3cb122ea6a9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 21:26:32 +0000 Subject: [PATCH 098/383] tests/bench: import ssh-roundtrip.py. --- tests/bench/ssh-roundtrip.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 tests/bench/ssh-roundtrip.py diff --git a/tests/bench/ssh-roundtrip.py b/tests/bench/ssh-roundtrip.py new file mode 100644 index 00000000..8745505d --- /dev/null +++ b/tests/bench/ssh-roundtrip.py @@ -0,0 +1,35 @@ +""" +Measure latency of SSH RPC. +""" + +import sys +import time + +import mitogen +import mitogen.utils +import ansible_mitogen.affinity + +mitogen.utils.setup_gil() +ansible_mitogen.affinity.policy.assign_worker() + +try: + xrange +except NameError: + xrange = range + +def do_nothing(): + pass + +@mitogen.main() +def main(router): + f = router.ssh(hostname=sys.argv[1]) + f.call(do_nothing) + t0 = time.time() + end = time.time() + 5.0 + i = 0 + while time.time() < end: + f.call(do_nothing) + i += 1 + t1 = time.time() + + print('++', float(1e3 * (t1 - t0) / (1.0+i)), 'ms') From d368971749e1dd5cfa08e4859b5df579b7aff633 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 21:27:15 +0000 Subject: [PATCH 099/383] core: introduce mitogen.core.pipe() It's used in later commit. This is an os.pipe() wrapper that traps the file descriptors in a file object, to ensure leaked objects will eventually be collected, and a central place exists to track open/closed status. --- mitogen/core.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/mitogen/core.py b/mitogen/core.py index 76baf4d3..4e7b347b 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -615,6 +615,20 @@ def import_module(modname): return __import__(modname, None, None, ['']) +def pipe(): + """ + Create a UNIX pipe pair using :func:`os.pipe`, wrapping the returned + descriptors in Python file objects in order to manage their lifetime and + ensure they are closed when their last reference is discarded and they have + not been closed explicitly. + """ + rfd, wfd = os.pipe() + return ( + os.fdopen(rfd, 'rb', 0), + os.fdopen(wfd, 'wb', 0) + ) + + def iter_split(buf, delim, func): """ Invoke `func(s)` for each `delim`-delimited chunk in the potentially large From c7ebb39ad4a44bcd13d0886c6eee0623e3482d7b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 21:29:47 +0000 Subject: [PATCH 100/383] core: introduce Protocol, DelimitedProtocol and BufferedWriter. They aren't wired in yet as of this commit, and continue duplicating other code. --- mitogen/core.py | 137 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/mitogen/core.py b/mitogen/core.py index 4e7b347b..398b80ed 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1485,6 +1485,143 @@ class LogHandler(logging.Handler): self.local.in_emit = False +class Protocol(object): + """ + Implement the program behaviour associated with activity on a + :class:`Stream`. The protocol in use may vary over a stream's life, for + example to allow :class:`mitogen.parent.BootstrapProtocol` to initialize + the connected child before handing it off to :class:`MitogenProtocol`. A + stream's active protocol is tracked in the :attr:`Stream.protocol` + attribute, and modified via :meth:`Stream.set_protocol`. + + Protocols do not handle IO, they are entirely reliant on the interface + provided by :class:`Stream` and :class:`Side`, allowing the underlying IO + implementation to be replaced without modifying behavioural logic. + """ + stream = None + + @classmethod + def build_stream(cls, *args, **kwargs): + stream = Stream() + stream.set_protocol(cls(*args, **kwargs)) + return stream + + def __repr__(self): + return '%s.%s(%s)' % ( + self.__class__.__module__, + self.__class__.__name__, + self.stream and self.stream.name, + ) + + def on_shutdown(self, broker): + _v and LOG.debug('%r.on_shutdown()', self) + self.stream.on_disconnect(broker) + + def on_disconnect(self, broker): + LOG.debug('%r.on_disconnect()', self) + if self.stream.receive_side: + broker.stop_receive(self.stream) + self.stream.receive_side.close() + if self.stream.transmit_side: + broker._stop_transmit(self.stream) + self.stream.transmit_side.close() + + +class DelimitedProtocol(Protocol): + """ + Provide a :meth:`Protocol.on_receive` implementation for protocols that are + delimited by a fixed string, like text based protocols. Each message is + passed to :meth:`on_line_received` as it arrives, with incomplete messages + passed to :meth:`on_partial_line_received`. + + When emulating user input it is often necessary to respond to incomplete + lines, such as when a "Password: " prompt is sent. + :meth:`on_partial_line_received` may be called repeatedly with an + increasingly complete message. When a complete message is finally received, + :meth:`on_line_received` will be called once for it before the buffer is + discarded. + """ + #: The delimiter. Defaults to newline. + delimiter = b('\n') + _trailer = b('') + + def on_receive(self, broker): + IOLOG.debug('%r.on_receive()', self) + buf = self.stream.receive_side.read() + if not buf: + return self.stream.on_disconnect(broker) + + self._trailer = mitogen.core.iter_split( + buf=self._trailer + buf, + delim=self.delimiter, + func=self.on_line_received, + ) + if self._trailer: + self.on_partial_line_received(self._trailer) + + def on_line_received(self, line): + pass + + def on_partial_line_received(self, line): + pass + + +class BufferedWriter(object): + """ + Implement buffered output while avoiding quadratic string operations. This + is currently constructed by each protocol, in future it may become fixed + for each stream instead. + """ + def __init__(self, broker, protocol): + self._broker = broker + self._protocol = protocol + self._buf = collections.deque() + self._len = 0 + + def write(self, s): + """ + Transmit `s` immediately, falling back to enqueuing it and marking the + stream writeable if no OS buffer space is available. + """ + if not self._len: + # Modifying epoll/Kqueue state is expensive, as are needless broker + # loops. Rather than wait for writeability, just write immediately, + # and fall back to the broker loop on error or full buffer. + try: + n = self._protocol.stream.transmit_side.write(s) + if n: + if n == len(s): + return + s = s[n:] + except OSError: + pass + + self._broker._start_transmit(self._protocol.stream) + self._buf.append(s) + self._len += len(s) + + def on_transmit(self, broker): + """ + Respond to stream writeability by retrying previously buffered + :meth:`write` calls. + """ + if self._buf: + buf = self._buf.popleft() + written = self._protocol.stream.transmit_side.write(buf) + if not written: + _v and LOG.debug('%r.on_transmit(): disconnection detected', self) + self._protocol.on_disconnect(broker) + return + elif written != len(buf): + self._buf.appendleft(BufferType(buf, written)) + + _vv and IOLOG.debug('%r.on_transmit() -> len %d', self, written) + self._len -= written + + if not self._buf: + broker._stop_transmit(self._protocol.stream) + + class Side(object): """ Represent a single side of a :class:`BasicStream`. This exists to allow From f66611dc83c6a9bbd4c40a42e14985466e7d392a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 21:52:27 +0000 Subject: [PATCH 101/383] parent: docstring improvements, cfmakeraw() regression. --- mitogen/parent.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index a7dbfbd8..63794cfe 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -221,9 +221,11 @@ def flags(names): def cfmakeraw(tflags): - """Given a list returned by :py:func:`termios.tcgetattr`, return a list + """ + Given a list returned by :py:func:`termios.tcgetattr`, return a list modified in a manner similar to the `cfmakeraw()` C library function, but - additionally disabling local echo.""" + additionally disabling local echo. + """ # BSD: github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162 # Linux: github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20 iflag, oflag, cflag, lflag, ispeed, ospeed, cc = tflags @@ -231,7 +233,7 @@ def cfmakeraw(tflags): 'ISTRIP INLCR ICRNL IXON IGNPAR') iflag &= ~flags('IGNBRK BRKINT PARMRK') oflag &= ~flags('OPOST') - lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG' + lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG ' 'IEXTEN NOFLSH TOSTOP PENDIN') cflag &= ~flags('CSIZE PARENB') cflag |= flags('CS8 CREAD') @@ -251,12 +253,11 @@ def disable_echo(fd): def create_socketpair(size=None): """ - Create a :func:`socket.socketpair` to use for use as a child process's UNIX - stdio channels. As socket pairs are bidirectional, they are economical on - file descriptor usage as the same descriptor can be used for ``stdin`` and + Create a :func:`socket.socketpair` for use as a child's UNIX stdio + channels. As socketpairs are bidirectional, they are economical on file + descriptor usage as one descriptor can be used for ``stdin`` and ``stdout``. As they are sockets their buffers are tunable, allowing large - buffers to be configured in order to improve throughput for file transfers - and reduce :class:`mitogen.core.Broker` IO loop iterations. + buffers to improve file transfer throughput and reduce IO loop iterations. """ parentfp, childfp = socket.socketpair() parentfp.setsockopt(socket.SOL_SOCKET, @@ -599,8 +600,8 @@ class TimerList(object): """ Efficiently manage a list of cancellable future events relative to wall clock time. An instance of this class is installed as - :attr:`mitogen.master.Broker.timers` by default, and installed as - :attr:`mitogen.core.Broker.timers` in a child context after a call to + :attr:`mitogen.master.Broker.timers` by default, and as + :attr:`mitogen.core.Broker.timers` in children after a call to :func:`mitogen.parent.upgrade_router`. You can use :class:`TimerList` to cause the broker to wake at arbitrary From fb23ecae0121ed588ed681f2cd97ebf2c110e7ad Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 10 Mar 2019 23:01:52 +0000 Subject: [PATCH 102/383] tests: jail_test fixes. --- tests/data/stubs/stub-jexec.py | 4 +--- tests/jail_test.py | 10 ++++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/data/stubs/stub-jexec.py b/tests/data/stubs/stub-jexec.py index 22028cf7..3f3e3bdc 100755 --- a/tests/data/stubs/stub-jexec.py +++ b/tests/data/stubs/stub-jexec.py @@ -11,7 +11,5 @@ os.environ['THIS_IS_STUB_JEXEC'] = '1' # This must be a child process and not exec() since Mitogen replaces its stderr # descriptor, causing the last user of the slave PTY to close it, resulting in # the master side indicating EIO. -print sys.argv -woeifj -subprocess.check_call(sys.argv[sys.argv.index('somejail') + 1:]) +subprocess.call(sys.argv[sys.argv.index('somejail') + 1:]) os._exit(0) diff --git a/tests/jail_test.py b/tests/jail_test.py index 0a0a21f8..7239d32f 100644 --- a/tests/jail_test.py +++ b/tests/jail_test.py @@ -17,14 +17,16 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): jexec_path=self.jexec_path, container='somejail', ) + stream = self.router.stream_by_id(context.context_id) + argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV')) self.assertEquals(argv[:4], [ self.jexec_path, - '-u', - 'someuser', - '--', + 'somejail', + stream.conn.options.python_path, + '-c', ]) - self.assertEquals('1', context.call(os.getenv, 'THIS_IS_STUB_jail')) + self.assertEquals('1', context.call(os.getenv, 'THIS_IS_STUB_JEXEC')) if __name__ == '__main__': From 0f7bbcece9c81dec8a0e562247bded61f4950c45 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 11 Mar 2019 05:11:43 +0000 Subject: [PATCH 103/383] parent: remove unused Timer parameter. --- mitogen/parent.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 63794cfe..04e832a9 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -576,11 +576,11 @@ class Timer(object): """ Represents a future event. """ - def __init__(self, timer_list, when, func): - self.timer_list = timer_list + cancelled = False + + def __init__(self, when, func): self.when = when self.func = func - self.cancelled = False def __eq__(self, other): return self.when == other.when @@ -636,7 +636,7 @@ class TimerList(object): def schedule(self, when, func): """ - Schedule a new future event. + Schedule a future event. :param float when: UNIX time in seconds when event should occur. @@ -646,7 +646,7 @@ class TimerList(object): A :class:`Timer` instance, exposing :meth:`Timer.cancel`, which may be used to cancel the future invocation. """ - timer = Timer(self, when, func) + timer = Timer(when, func) heapq.heappush(self._lst, timer) return timer From 46ebd56c7aad5e05df7f893c0a045a8e395699ab Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 11 Mar 2019 18:44:27 +0000 Subject: [PATCH 104/383] core/master: docstring, repr, and debug log message cleanups Debug output is vastly more readable now. --- mitogen/core.py | 85 ++++++++++++++++++++++++++--------------------- mitogen/master.py | 6 ++-- 2 files changed, 51 insertions(+), 40 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 398b80ed..b97360f1 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1342,7 +1342,7 @@ class Importer(object): tup = msg.unpickle() fullname = tup[0] - _v and LOG.debug('Importer._on_load_module(%r)', fullname) + _v and LOG.debug('importer: received %s', fullname) self._lock.acquire() try: @@ -1382,7 +1382,7 @@ class Importer(object): def load_module(self, fullname): fullname = to_text(fullname) - _v and LOG.debug('Importer.load_module(%r)', fullname) + _v and LOG.debug('importer: requesting %s', fullname) self._refuse_imports(fullname) event = threading.Event() @@ -1507,18 +1507,17 @@ class Protocol(object): return stream def __repr__(self): - return '%s.%s(%s)' % ( - self.__class__.__module__, + return '%s(%s)' % ( self.__class__.__name__, self.stream and self.stream.name, ) def on_shutdown(self, broker): - _v and LOG.debug('%r.on_shutdown()', self) + _v and LOG.debug('%r: shutting down', self) self.stream.on_disconnect(broker) def on_disconnect(self, broker): - LOG.debug('%r.on_disconnect()', self) + LOG.debug('%r: disconnecting', self) if self.stream.receive_side: broker.stop_receive(self.stream) self.stream.receive_side.close() @@ -1624,21 +1623,29 @@ class BufferedWriter(object): class Side(object): """ - Represent a single side of a :class:`BasicStream`. This exists to allow - streams implemented using unidirectional (e.g. UNIX pipe) and bidirectional - (e.g. UNIX socket) file descriptors to operate identically. + Represent one side of a :class:`Stream`. This allows unidirectional (e.g. + pipe) and bidirectional (e.g. socket) streams to operate identically. + + Sides are also responsible for tracking the open/closed state of the + underlying FD, preventing erroneous duplicate calls to :func:`os.close` due + to duplicate :meth:`Stream.on_disconnect` calls, which would otherwise risk + silently succeeding by closing an unrelated descriptor. For this reason, it + is crucial only one :class:`Side` exists per unique descriptor. :param mitogen.core.Stream stream: The stream this side is associated with. - - :param int fd: - Underlying file descriptor. - + :param object fp: + The file or socket object managing the underlying file descriptor. Any + object may be used that supports `fileno()` and `close()` methods. + :param bool cloexec: + If :data:`True`, the descriptor has its :data:`fcntl.FD_CLOEXEC` flag + enabled using :func:`fcntl.fcntl`. :param bool keep_alive: - Value for :attr:`keep_alive` - - During construction, the file descriptor has its :data:`os.O_NONBLOCK` flag - enabled using :func:`fcntl.fcntl`. + If :data:`True`, the continued existence of this side will extend the + shutdown grace period until it has been unregistered from the broker. + :param bool blocking: + If :data:`False`, the descriptor has its :data:`os.O_NONBLOCK` flag + enabled using :func:`fcntl.fcntl`. """ _fork_refs = weakref.WeakValueDictionary() @@ -1674,8 +1681,8 @@ class Side(object): Call :func:`os.close` on :attr:`fd` if it is not :data:`None`, then set it to :data:`None`. """ + _vv and IOLOG.debug('%r.close()', self) if not self.closed: - _vv and IOLOG.debug('%r.close()', self) self.closed = True os.close(self.fd) @@ -1699,7 +1706,7 @@ class Side(object): return b('') s, disconnected = io_op(os.read, self.fd, n) if disconnected: - LOG.debug('%r.read(): disconnected: %s', self, disconnected) + LOG.debug('%r: disconnected during read: %s', self, disconnected) return b('') return s @@ -1720,7 +1727,7 @@ class Side(object): written, disconnected = io_op(os.write, self.fd, s) if disconnected: - LOG.debug('%r.write(): disconnected: %s', self, disconnected) + LOG.debug('%r: disconnected during write: %s', self, disconnected) return None return written @@ -2004,7 +2011,7 @@ class Context(object): return _unpickle_context, (self.context_id, name) def on_disconnect(self): - _v and LOG.debug('%r.on_disconnect()', self) + _v and LOG.debug('%r: disconnecting', self) fire(self, 'disconnect') def send_async(self, msg, persist=False): @@ -2416,7 +2423,7 @@ class Latch(object): :meth:`put` to write a byte to our socket pair. """ _vv and IOLOG.debug( - '%r._get_sleep(timeout=%r, block=%r, rfd=%d, wfd=%d)', + '%r._get_sleep(timeout=%r, block=%r, fd=%d/%d)', self, timeout, block, rsock.fileno(), wsock.fileno() ) @@ -2514,10 +2521,9 @@ class Waker(BasicStream): self.transmit_side = Side(self, wfd) def __repr__(self): - return 'Waker(%r rfd=%r, wfd=%r)' % ( - self._broker, - self.receive_side and self.receive_side.fd, - self.transmit_side and self.transmit_side.fd, + return 'Waker(fd=%r/%r)' % ( + self.stream.receive_side and self.stream.receive_side.fd, + self.stream.transmit_side and self.stream.transmit_side.fd, ) @property @@ -2626,7 +2632,7 @@ class IoLogger(BasicStream): def on_shutdown(self, broker): """Shut down the write end of the logging socket.""" - _v and LOG.debug('%r.on_shutdown()', self) + _v and LOG.debug('%r: shutting down', self) if not IS_WSL: # #333: WSL generates invalid readiness indication on shutdown() self._wsock.shutdown(socket.SHUT_WR) @@ -2713,12 +2719,13 @@ class Router(object): corresponding :attr:`_context_by_id` member. This is replaced by :class:`mitogen.parent.RouteMonitor` in an upgraded context. """ - LOG.error('%r._on_del_route() %r', self, msg) if msg.is_dead: return target_id_s, _, name = bytes_partition(msg.data, b(':')) target_id = int(target_id_s, 10) + LOG.error('%r: deleting route to %s (%d)', + self, to_text(name), target_id) context = self._context_by_id.get(target_id) if context: fire(context, 'disconnect') @@ -2790,7 +2797,8 @@ class Router(object): the stream's receive side to the I/O multiplexer. This method remains public while the design has not yet settled. """ - _v and LOG.debug('register(%r, %r)', context, stream) + _v and LOG.debug('%s: registering %r to stream %r', + self, context, stream) self._write_lock.acquire() try: self._stream_by_id[context.context_id] = stream @@ -2916,7 +2924,7 @@ class Router(object): def on_shutdown(self, broker): """Called during :meth:`Broker.shutdown`, informs callbacks registered with :meth:`add_handle_cb` the connection is dead.""" - _v and LOG.debug('%r.on_shutdown(%r)', self, broker) + _v and LOG.debug('%r: shutting down', self, broker) fire(self, 'shutdown') for handle, (persist, fn) in self._handle_map.iteritems(): _v and LOG.debug('%r.on_shutdown(): killing %r: %r', self, handle, fn) @@ -3199,7 +3207,7 @@ class Broker(object): to shut down gracefully, then discard the :class:`Poller`. """ for _, (side, _) in self.poller.readers + self.poller.writers: - LOG.debug('_broker_main() force disconnecting %r', side) + LOG.debug('%r: force disconnecting %r', self, side) side.stream.on_disconnect(self) self.poller.close() @@ -3253,7 +3261,7 @@ class Broker(object): Request broker gracefully disconnect streams and stop. Safe to call from any thread. """ - _v and LOG.debug('%r.shutdown()', self) + _v and LOG.debug('%r: shutting down', self) def _shutdown(): self._alive = False if self._alive and not self._exitted: @@ -3267,7 +3275,7 @@ class Broker(object): self._thread.join() def __repr__(self): - return 'Broker(%#x)' % (id(self),) + return 'Broker(%04x)' % (id(self) & 0xffff,) class Dispatcher(object): @@ -3281,6 +3289,9 @@ class Dispatcher(object): mode, any exception that occurs is recorded, and causes all subsequent calls with the same `chain_id` to fail with the same exception. """ + def __repr__(self): + return 'Dispatcher' + def __init__(self, econtext): self.econtext = econtext #: Chain ID -> CallError if prior call failed. @@ -3297,7 +3308,7 @@ class Dispatcher(object): def _parse_request(self, msg): data = msg.unpickle(throw=False) - _v and LOG.debug('_dispatch_one(%r)', data) + _v and LOG.debug('%r: dispatching %r', self, data) chain_id, modname, klass, func, args, kwargs = data obj = import_module(modname) @@ -3331,7 +3342,7 @@ class Dispatcher(object): def _dispatch_calls(self): for msg in self.recv: chain_id, ret = self._dispatch_one(msg) - _v and LOG.debug('_dispatch_calls: %r -> %r', msg, ret) + _v and LOG.debug('%r: %r -> %r', self, msg, ret) if msg.reply_to: msg.reply(ret) elif isinstance(ret, CallError) and chain_id is None: @@ -3411,8 +3422,8 @@ class ExternalContext(object): th.start() def _on_shutdown_msg(self, msg): - _v and LOG.debug('_on_shutdown_msg(%r)', msg) if not msg.is_dead: + _v and LOG.debug('shutdown request from context %d', msg.src_id) self.broker.shutdown() def _on_parent_disconnect(self): @@ -3421,7 +3432,7 @@ class ExternalContext(object): mitogen.parent_id = None LOG.info('Detachment complete') else: - _v and LOG.debug('%r: parent stream is gone, dying.', self) + _v and LOG.debug('parent stream is gone, dying.') self.broker.shutdown() def detach(self): diff --git a/mitogen/master.py b/mitogen/master.py index ff6f3922..7bd2e78e 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -817,7 +817,7 @@ class ModuleResponder(object): ) def __repr__(self): - return 'ModuleResponder(%r)' % (self._router,) + return 'ModuleResponder' def add_source_override(self, fullname, path, source, is_pkg): """ @@ -927,8 +927,8 @@ class ModuleResponder(object): dst_id=stream.remote_id, handle=mitogen.core.LOAD_MODULE, ) - LOG.debug('%s: sending module %s (%.2f KiB)', - stream.name, fullname, len(msg.data) / 1024.0) + LOG.debug('%s: sending %s (%.2f KiB) to %s', + self, fullname, len(msg.data) / 1024.0, stream.name) self._router._async_route(msg) stream.sent_modules.add(fullname) if tup[2] is not None: From bf77d4ab1dda7def346e411c8bcd72aa5504cea4 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 11 Mar 2019 22:04:32 +0000 Subject: [PATCH 105/383] testlib: have LogCapturer.raw() return unicode on 2.x. --- tests/testlib.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/testlib.py b/tests/testlib.py index a0b02a39..4cfd1b1c 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -283,7 +283,11 @@ class LogCapturer(object): self.logger.level = logging.DEBUG def raw(self): - return self.sio.getvalue() + s = self.sio.getvalue() + # Python 2.x logging package hard-wires UTF-8 output. + if isinstance(s, mitogen.core.BytesType): + s = s.decode('utf-8') + return s def msgs(self): return self.handler.msgs From 33ecc8a5d2240b03226548e72bd541faf600e342 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 12 Mar 2019 01:07:30 +0000 Subject: [PATCH 106/383] issue #507: log fatal errors to syslog. Next round should log entire exception text, but this is useful enough already. --- mitogen/core.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index b97360f1..0890735d 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -50,6 +50,7 @@ import signal import socket import struct import sys +import syslog import threading import time import traceback @@ -3245,8 +3246,10 @@ class Broker(object): fire(self, 'shutdown') self._broker_shutdown() - except Exception: - LOG.exception('_broker_main() crashed') + except Exception as e: + LOG.exception('broker crashed') + syslog.syslog(syslog.LOG_ERR, 'broker crashed: %s' % (e,)) + syslog.closelog() # prevent test 'fd leak'. self._alive = False # Ensure _alive is consistent on crash. self._exitted = True From 37beb3a5c5786afbac02e390b61e362f40123eba Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 12 Mar 2019 01:08:11 +0000 Subject: [PATCH 107/383] core: teach iter_split() to break on callback returning False. --- mitogen/core.py | 15 +++++++++++---- tests/iter_split_test.py | 27 +++++++++++++++++++++++---- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 0890735d..a715158b 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -634,16 +634,23 @@ def iter_split(buf, delim, func): """ Invoke `func(s)` for each `delim`-delimited chunk in the potentially large `buf`, avoiding intermediate lists and quadratic string operations. Return - the trailing undelimited portion of `buf`. + the trailing undelimited portion of `buf`, or any unprocessed portion of + `buf` after `func(s)` returned :data:`False`. + + :returns: + `(trailer, cont)`, where `cont` is :data:`False` if the last call to + `func(s)` returned :data:`False`. """ dlen = len(delim) start = 0 - while True: + cont = True + while cont: nl = buf.find(delim, start) if nl == -1: - return buf[start:] - func(buf[start:nl]) + break + cont = not func(buf[start:nl]) is False start = nl + dlen + return buf[start:], cont class Py24Pickler(py_pickle.Pickler): diff --git a/tests/iter_split_test.py b/tests/iter_split_test.py index a385b2f1..6178ef0c 100644 --- a/tests/iter_split_test.py +++ b/tests/iter_split_test.py @@ -12,30 +12,49 @@ class IterSplitTest(unittest2.TestCase): def test_empty_buffer(self): lst = [] - trailer = self.func(buf='', delim='\n', func=lst.append) + trailer, cont = self.func(buf='', delim='\n', func=lst.append) + self.assertTrue(cont) self.assertEquals('', trailer) self.assertEquals([], lst) def test_empty_line(self): lst = [] - trailer = self.func(buf='\n', delim='\n', func=lst.append) + trailer, cont = self.func(buf='\n', delim='\n', func=lst.append) + self.assertTrue(cont) self.assertEquals('', trailer) self.assertEquals([''], lst) def test_one_line(self): buf = 'xxxx\n' lst = [] - trailer = self.func(buf=buf, delim='\n', func=lst.append) + trailer, cont = self.func(buf=buf, delim='\n', func=lst.append) + self.assertTrue(cont) self.assertEquals('', trailer) self.assertEquals(lst, ['xxxx']) def test_one_incomplete(self): buf = 'xxxx\nyy' lst = [] - trailer = self.func(buf=buf, delim='\n', func=lst.append) + trailer, cont = self.func(buf=buf, delim='\n', func=lst.append) + self.assertTrue(cont) self.assertEquals('yy', trailer) self.assertEquals(lst, ['xxxx']) + def test_returns_false_immediately(self): + buf = 'xxxx\nyy' + func = lambda buf: False + trailer, cont = self.func(buf=buf, delim='\n', func=func) + self.assertFalse(cont) + self.assertEquals('yy', trailer) + + def test_returns_false_second_call(self): + buf = 'xxxx\nyy\nzz' + it = iter([True, False]) + func = lambda buf: next(it) + trailer, cont = self.func(buf=buf, delim='\n', func=func) + self.assertFalse(cont) + self.assertEquals('zz', trailer) + if __name__ == '__main__': unittest2.main() From 8d1b01d8efba5adcc996c93700826377c3d8e21f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 12 Mar 2019 01:15:27 +0000 Subject: [PATCH 108/383] Refactor Stream, introduce quasi-asynchronous connect, much more Split Stream into many, many classes * mitogen.parent.Connection: Handles connection setup logic only. * Maintain references to stdout and stderr streams. * Manages TimerList timer to cancel connection attempt after deadline * Blocking setup code replaced by async equivalents running on the broker * mitogen.parent.Options: Tracks connection-specific options. This keeps the connection class small, but more importantly, it is generic to the future desire to build and execute command lines without starting a full connection. * mitogen.core.Protocol: Handles program behaviour relating to events on a stream. Protocol performs no IO of its own, instead deferring it to Stream and Side. This makes testing much easier, and means libssh can reimplement Stream and Side to reuse MitogenProtocol * mitogen.core.MitogenProtocol: Guts of the old Mitogen stream implementtion * mitogen.core.BufferedWriter: Guts of the old Mitogen buffered transmit implementation, made generic * mitogen.core.DelineatedProtocol: Guts of the old IoLogger, knows how to split up input and pass it on to a on_line_received()/on_partial_line_received() callback. * mitogen.parent.BootstrapProtocol: Asynchronous equivalent of the old blocking connect code. Waits for various prompts (MITO001 etc) and writes the bootstrap using a BufferedWriter. On success, switches the stream to MitogenProtocol. * mitogen.core.Message: move encoding parts of MitogenProtocol out to Message (where it belongs) and write a bunch of new tests for pickling. * The bizarre Stream.construct() is gone now, Option.__init__ is its own constructor. Should fix many LGTM errors. * Update all connection methods: Every connection method is updated to use async logic, defining protocols as required to handle interactive prompts like in SSH or su. Add new real integration tests for at least doas and su. * Eliminate manual fd management: File descriptors are trapped in file objects at their point of origin, and Side is updated to use file objects rather than raw descriptors. This eliminates a whole class of bugs where unrelated FDs could be closed by the wrong component. Now an FD's open/closed status is fused to it everywhere in the library. * Halve file descriptor usage: now FD open/close state is tracked by its file object, we don't need to duplicate FDs everywhere so that receive/transmit side can be closed independently. Instead both sides back on to the same file object. Closes #26, Closes #470. * Remove most uses of dup/dup2: Closes #256. File descriptors are trapped in a common file object and shared among classes. The remaining few uses for dup/dup2 are as close to minimal as possible. * Introduce mitogen.parent.Process: uniform interface for subprocesses created either via mitogen.fork or the subprocess module. Remove all the crap where we steal a pid from subprocess guts. Now we use subprocess to manage its processes as it should be. Closes #169 by using the new Timers facility to poll for a slow-to-exit subprocess. * Fix su password race: Closes #363. DelineatedProtocol naturally retries partially received lines, preventing the cause of the original race. * Delete old blocking IO utility functions iter_read()/write_all()/discard_until(). Closes #26 Closes #147 Closes #169 Closes #256 Closes #363 Closes #419 Closes #470 --- mitogen/core.py | 441 +++++++------- mitogen/doas.py | 138 +++-- mitogen/docker.py | 54 +- mitogen/fakessh.py | 49 +- mitogen/fork.py | 96 +-- mitogen/jail.py | 39 +- mitogen/kubectl.py | 33 +- mitogen/lxc.py | 32 +- mitogen/lxd.py | 34 +- mitogen/master.py | 25 +- mitogen/parent.py | 962 +++++++++++++++--------------- mitogen/service.py | 5 +- mitogen/setns.py | 61 +- mitogen/ssh.py | 328 +++++----- mitogen/su.py | 158 +++-- mitogen/sudo.py | 126 ++-- mitogen/unix.py | 103 ++-- preamble_size.py | 12 +- tests/data/iter_read_generator.py | 13 - tests/data/write_all_consumer.py | 9 - tests/docker_test.py | 2 +- tests/first_stage_test.py | 9 +- tests/lxc_test.py | 2 +- tests/lxd_test.py | 2 +- tests/mitogen_protocol_test.py | 34 ++ tests/parent_test.py | 206 ++----- tests/poller_test.py | 20 +- tests/responder_test.py | 4 +- tests/router_test.py | 25 +- tests/service_test.py | 4 +- tests/ssh_test.py | 14 +- tests/stream_test.py | 33 - tests/su_test.py | 35 +- tests/sudo_test.py | 9 +- tests/unix_test.py | 28 +- 35 files changed, 1550 insertions(+), 1595 deletions(-) delete mode 100755 tests/data/iter_read_generator.py delete mode 100755 tests/data/write_all_consumer.py create mode 100644 tests/mitogen_protocol_test.py delete mode 100644 tests/stream_test.py diff --git a/mitogen/core.py b/mitogen/core.py index a715158b..6c6f1ec2 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1374,10 +1374,12 @@ class Importer(object): if not present: funcs = self._callbacks.get(fullname) if funcs is not None: - _v and LOG.debug('_request_module(%r): in flight', fullname) + _v and LOG.debug('%s: existing request for %s in flight', + self, fullname) funcs.append(callback) else: - _v and LOG.debug('_request_module(%r): new request', fullname) + _v and LOG.debug('%s: requesting %s from parent', + self, fullname) self._callbacks[fullname] = [callback] self._context.send( Message(data=b(fullname), handle=GET_MODULE) @@ -1493,6 +1495,80 @@ class LogHandler(logging.Handler): self.local.in_emit = False +class Stream(object): + #: A :class:`Side` representing the stream's receive file descriptor. + receive_side = None + + #: A :class:`Side` representing the stream's transmit file descriptor. + transmit_side = None + + #: A :class:`Protocol` representing the protocol active on the stream. + protocol = None + + #: In parents, the :class:`mitogen.parent.Connection` instance. + conn = None + + name = u'default' + + def set_protocol(self, protocol): + self.protocol = protocol + self.protocol.stream = self + + def accept(self, rfp, wfp): + self.receive_side = Side(self, rfp) + self.transmit_side = Side(self, wfp) + + def __repr__(self): + return "" % (self.name,) + + def on_receive(self, broker): + """ + Called by :class:`Broker` when the stream's :attr:`receive_side` has + been marked readable using :meth:`Broker.start_receive` and the broker + has detected the associated file descriptor is ready for reading. + + Subclasses must implement this if :meth:`Broker.start_receive` is ever + called on them, and the method must call :meth:`on_disconect` if + reading produces an empty string. + """ + buf = self.receive_side.read(self.protocol.read_size) + if not buf: + LOG.debug('%r: empty read, disconnecting', self) + return self.on_disconnect(broker) + + self.protocol.on_receive(broker, buf) + + def on_transmit(self, broker): + """ + Called by :class:`Broker` when the stream's :attr:`transmit_side` + has been marked writeable using :meth:`Broker._start_transmit` and + the broker has detected the associated file descriptor is ready for + writing. + + Subclasses must implement this if :meth:`Broker._start_transmit` is + ever called on them. + """ + self.protocol.on_transmit(broker) + + def on_shutdown(self, broker): + """ + Called by :meth:`Broker.shutdown` to allow the stream time to + gracefully shutdown. The base implementation simply called + :meth:`on_disconnect`. + """ + fire(self, 'shutdown') + self.protocol.on_shutdown(broker) + + def on_disconnect(self, broker): + """ + Called by :class:`Broker` to force disconnect the stream. The base + implementation simply closes :attr:`receive_side` and + :attr:`transmit_side` and unregisters the stream from the broker. + """ + fire(self, 'disconnect') + self.protocol.on_disconnect(broker) + + class Protocol(object): """ Implement the program behaviour associated with activity on a @@ -1506,11 +1582,13 @@ class Protocol(object): provided by :class:`Stream` and :class:`Side`, allowing the underlying IO implementation to be replaced without modifying behavioural logic. """ + stream_class = Stream stream = None + read_size = CHUNK_SIZE @classmethod def build_stream(cls, *args, **kwargs): - stream = Stream() + stream = cls.stream_class() stream.set_protocol(cls(*args, **kwargs)) return stream @@ -1547,24 +1625,30 @@ class DelimitedProtocol(Protocol): increasingly complete message. When a complete message is finally received, :meth:`on_line_received` will be called once for it before the buffer is discarded. + + If :func:`on_line_received` returns :data:`False`, remaining data is passed + unprocessed to the stream's current protocol's :meth:`on_receive`. This + allows switching from line-oriented to binary while the input buffer + contains both kinds of data. """ #: The delimiter. Defaults to newline. delimiter = b('\n') _trailer = b('') - def on_receive(self, broker): + def on_receive(self, broker, buf): IOLOG.debug('%r.on_receive()', self) - buf = self.stream.receive_side.read() - if not buf: - return self.stream.on_disconnect(broker) - - self._trailer = mitogen.core.iter_split( + self._trailer, cont = mitogen.core.iter_split( buf=self._trailer + buf, delim=self.delimiter, func=self.on_line_received, ) + if self._trailer: - self.on_partial_line_received(self._trailer) + if cont: + self.on_partial_line_received(self._trailer) + else: + assert self.stream.protocol is not self + self.stream.protocol.on_receive(broker, self._trailer) def on_line_received(self, line): pass @@ -1656,23 +1740,24 @@ class Side(object): enabled using :func:`fcntl.fcntl`. """ _fork_refs = weakref.WeakValueDictionary() + closed = False - def __init__(self, stream, fd, cloexec=True, keep_alive=True, blocking=False): + def __init__(self, stream, fp, cloexec=True, keep_alive=True, blocking=False): #: The :class:`Stream` for which this is a read or write side. self.stream = stream #: Integer file descriptor to perform IO on, or :data:`None` if #: :meth:`close` has been called. - self.fd = fd - self.closed = False + self.fp = fp + self.fd = fp.fileno() #: If :data:`True`, causes presence of this side in #: :class:`Broker`'s active reader set to defer shutdown until the #: side is disconnected. self.keep_alive = keep_alive self._fork_refs[id(self)] = self if cloexec: - set_cloexec(fd) + set_cloexec(self.fd) if not blocking: - set_nonblock(fd) + set_nonblock(self.fd) def __repr__(self): return '' % (self.stream, self.fd) @@ -1692,7 +1777,7 @@ class Side(object): _vv and IOLOG.debug('%r.close()', self) if not self.closed: self.closed = True - os.close(self.fd) + self.fp.close() def read(self, n=CHUNK_SIZE): """ @@ -1728,9 +1813,8 @@ class Side(object): Number of bytes written, or :data:`None` if disconnection was detected. """ - if self.closed or self.fd is None: - # Refuse to touch the handle after closed, it may have been reused - # by another thread. + if self.closed: + # Don't touch the handle after close, it may be reused elsewhere. return None written, disconnected = io_op(os.write, self.fd, s) @@ -1740,67 +1824,10 @@ class Side(object): return written -class BasicStream(object): - #: A :class:`Side` representing the stream's receive file descriptor. - receive_side = None - - #: A :class:`Side` representing the stream's transmit file descriptor. - transmit_side = None - - def on_receive(self, broker): - """ - Called by :class:`Broker` when the stream's :attr:`receive_side` has - been marked readable using :meth:`Broker.start_receive` and the broker - has detected the associated file descriptor is ready for reading. - - Subclasses must implement this if :meth:`Broker.start_receive` is ever - called on them, and the method must call :meth:`on_disconect` if - reading produces an empty string. - """ - pass - - def on_transmit(self, broker): - """ - Called by :class:`Broker` when the stream's :attr:`transmit_side` - has been marked writeable using :meth:`Broker._start_transmit` and - the broker has detected the associated file descriptor is ready for - writing. - - Subclasses must implement this if :meth:`Broker._start_transmit` is - ever called on them. - """ - pass - - def on_shutdown(self, broker): - """ - Called by :meth:`Broker.shutdown` to allow the stream time to - gracefully shutdown. The base implementation simply called - :meth:`on_disconnect`. - """ - _v and LOG.debug('%r.on_shutdown()', self) - fire(self, 'shutdown') - self.on_disconnect(broker) - - def on_disconnect(self, broker): - """ - Called by :class:`Broker` to force disconnect the stream. The base - implementation simply closes :attr:`receive_side` and - :attr:`transmit_side` and unregisters the stream from the broker. - """ - LOG.debug('%r.on_disconnect()', self) - if self.receive_side: - broker.stop_receive(self) - self.receive_side.close() - if self.transmit_side: - broker._stop_transmit(self) - self.transmit_side.close() - fire(self, 'disconnect') - - -class Stream(BasicStream): +class MitogenProtocol(Protocol): """ - :class:`BasicStream` subclass implementing mitogen's :ref:`stream - protocol `. + :class:`Protocol` implementing mitogen's :ref:`stream protocol + `. """ #: If not :data:`None`, :class:`Router` stamps this into #: :attr:`Message.auth_id` of every message received on this stream. @@ -1811,24 +1838,24 @@ class Stream(BasicStream): #: :data:`mitogen.parent_ids`. is_privileged = False - def __init__(self, router, remote_id, **kwargs): + def __init__(self, router, remote_id): self._router = router self.remote_id = remote_id - self.name = u'default' self.sent_modules = set(['mitogen', 'mitogen.core']) - self.construct(**kwargs) self._input_buf = collections.deque() - self._output_buf = collections.deque() self._input_buf_len = 0 - self._output_buf_len = 0 + self._writer = BufferedWriter(router.broker, self) + #: Routing records the dst_id of every message arriving from this #: stream. Any arriving DEL_ROUTE is rebroadcast for any such ID. self.egress_ids = set() - def construct(self): - pass - - def _internal_receive(self, broker, buf): + def on_receive(self, broker, buf): + """ + Handle the next complete message on the stream. Raise + :class:`StreamError` on failure. + """ + _vv and IOLOG.debug('%r.on_receive()', self) if self._input_buf and self._input_buf_len < 128: self._input_buf[0] += buf else: @@ -1838,60 +1865,45 @@ class Stream(BasicStream): while self._receive_one(broker): pass - def on_receive(self, broker): - """Handle the next complete message on the stream. Raise - :class:`StreamError` on failure.""" - _vv and IOLOG.debug('%r.on_receive()', self) - - buf = self.receive_side.read() - if not buf: - return self.on_disconnect(broker) - - self._internal_receive(broker, buf) - - HEADER_FMT = '>hLLLLLL' - HEADER_LEN = struct.calcsize(HEADER_FMT) - HEADER_MAGIC = 0x4d49 # 'MI' - corrupt_msg = ( - 'Corruption detected: frame signature incorrect. This likely means ' - 'some external process is interfering with the connection. Received:' + '%s: Corruption detected: frame signature incorrect. This likely means' + ' some external process is interfering with the connection. Received:' '\n\n' '%r' ) def _receive_one(self, broker): - if self._input_buf_len < self.HEADER_LEN: + if self._input_buf_len < Message.HEADER_LEN: return False msg = Message() msg.router = self._router (magic, msg.dst_id, msg.src_id, msg.auth_id, msg.handle, msg.reply_to, msg_len) = struct.unpack( - self.HEADER_FMT, - self._input_buf[0][:self.HEADER_LEN], + Message.HEADER_FMT, + self._input_buf[0][:Message.HEADER_LEN], ) - if magic != self.HEADER_MAGIC: - LOG.error(self.corrupt_msg, self._input_buf[0][:2048]) - self.on_disconnect(broker) + if magic != Message.HEADER_MAGIC: + LOG.error(self.corrupt_msg, self.stream.name, self._input_buf[0][:2048]) + self.stream.on_disconnect(broker) return False if msg_len > self._router.max_message_size: LOG.error('Maximum message size exceeded (got %d, max %d)', msg_len, self._router.max_message_size) - self.on_disconnect(broker) + self.stream.on_disconnect(broker) return False - total_len = msg_len + self.HEADER_LEN + total_len = msg_len + Message.HEADER_LEN if self._input_buf_len < total_len: _vv and IOLOG.debug( '%r: Input too short (want %d, got %d)', - self, msg_len, self._input_buf_len - self.HEADER_LEN + self, msg_len, self._input_buf_len - Message.HEADER_LEN ) return False - start = self.HEADER_LEN + start = Message.HEADER_LEN prev_start = start remain = total_len bits = [] @@ -1906,7 +1918,7 @@ class Stream(BasicStream): msg.data = b('').join(bits) self._input_buf.appendleft(buf[prev_start+len(bit):]) self._input_buf_len -= total_len - self._router._async_route(msg, self) + self._router._async_route(msg, self.stream) return True def pending_bytes(self): @@ -1918,50 +1930,16 @@ class Stream(BasicStream): For an accurate result, this method should be called from the Broker thread, for example by using :meth:`Broker.defer_sync`. """ - return self._output_buf_len + return self._writer._len def on_transmit(self, broker): """Transmit buffered messages.""" _vv and IOLOG.debug('%r.on_transmit()', self) - - if self._output_buf: - buf = self._output_buf.popleft() - written = self.transmit_side.write(buf) - if not written: - _v and LOG.debug('%r.on_transmit(): disconnection detected', self) - self.on_disconnect(broker) - return - elif written != len(buf): - self._output_buf.appendleft(BufferType(buf, written)) - - _vv and IOLOG.debug('%r.on_transmit() -> len %d', self, written) - self._output_buf_len -= written - - if not self._output_buf: - broker._stop_transmit(self) + self._writer.on_transmit(broker) def _send(self, msg): _vv and IOLOG.debug('%r._send(%r)', self, msg) - pkt = struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, msg.dst_id, - msg.src_id, msg.auth_id, msg.handle, - msg.reply_to or 0, len(msg.data)) + msg.data - - if not self._output_buf_len: - # Modifying epoll/Kqueue state is expensive, as are needless broker - # loops. Rather than wait for writeability, just write immediately, - # and fall back to the broker loop on error or full buffer. - try: - n = self.transmit_side.write(pkt) - if n: - if n == len(pkt): - return - pkt = pkt[n:] - except OSError: - pass - - self._router.broker._start_transmit(self) - self._output_buf.append(pkt) - self._output_buf_len += len(pkt) + self._writer.write(msg.pack()) def send(self, msg): """Send `data` to `handle`, and tell the broker we have output. May @@ -1969,17 +1947,8 @@ class Stream(BasicStream): self._router.broker.defer(self._send, msg) def on_shutdown(self, broker): - """Override BasicStream behaviour of immediately disconnecting.""" - _v and LOG.debug('%r.on_shutdown(%r)', self, broker) - - def accept(self, rfd, wfd): - # TODO: what is this os.dup for? - self.receive_side = Side(self, os.dup(rfd)) - self.transmit_side = Side(self, os.dup(wfd)) - - def __repr__(self): - cls = type(self) - return "%s.%s('%s')" % (cls.__module__, cls.__name__, self.name) + """Disable :class:`Protocol` immediate disconnect behaviour.""" + _v and LOG.debug('%r: shutting down', self) class Context(object): @@ -2005,18 +1974,17 @@ class Context(object): :param str name: Context name. """ + name = None remote_name = None def __init__(self, router, context_id, name=None): self.router = router self.context_id = context_id - self.name = name + if name: + self.name = to_text(name) def __reduce__(self): - name = self.name - if name and not isinstance(name, UnicodeType): - name = UnicodeType(name, 'utf-8') - return _unpickle_context, (self.context_id, name) + return _unpickle_context, (self.context_id, self.name) def on_disconnect(self): _v and LOG.debug('%r: disconnecting', self) @@ -2161,7 +2129,7 @@ class Poller(object): self._wfds = {} def __repr__(self): - return '%s(%#x)' % (type(self).__name__, id(self)) + return '%s' % (type(self).__name__,) def _update(self, fd): """ @@ -2509,7 +2477,7 @@ class Latch(object): ) -class Waker(BasicStream): +class Waker(Protocol): """ :class:`BasicStream` subclass implementing the `UNIX self-pipe trick`_. Used to wake the multiplexer when another thread needs to modify its state @@ -2517,17 +2485,20 @@ class Waker(BasicStream): .. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html """ + read_size = 1 broker_ident = None + @classmethod + def build_stream(cls, broker): + stream = super(Waker, cls).build_stream(broker) + stream.accept(*pipe()) + return stream + def __init__(self, broker): self._broker = broker self._lock = threading.Lock() self._deferred = [] - rfd, wfd = os.pipe() - self.receive_side = Side(self, rfd) - self.transmit_side = Side(self, wfd) - def __repr__(self): return 'Waker(fd=%r/%r)' % ( self.stream.receive_side and self.stream.receive_side.fd, @@ -2545,7 +2516,7 @@ class Waker(BasicStream): finally: self._lock.release() - def on_receive(self, broker): + def on_receive(self, broker, buf): """ Drain the pipe and fire callbacks. Since :attr:`_deferred` is synchronized, :meth:`defer` and :meth:`on_receive` can conspire to @@ -2554,7 +2525,6 @@ class Waker(BasicStream): _vv and IOLOG.debug('%r.on_receive()', self) self._lock.acquire() try: - self.receive_side.read(1) deferred = self._deferred self._deferred = [] finally: @@ -2566,7 +2536,7 @@ class Waker(BasicStream): except Exception: LOG.exception('defer() crashed: %r(*%r, **%r)', func, args, kwargs) - self._broker.shutdown() + broker.shutdown() def _wake(self): """ @@ -2574,7 +2544,7 @@ class Waker(BasicStream): teardown, the FD may already be closed, so ignore EBADF. """ try: - self.transmit_side.write(b(' ')) + self.stream.transmit_side.write(b(' ')) except OSError: e = sys.exc_info()[1] if e.args[0] != errno.EBADF: @@ -2601,7 +2571,8 @@ class Waker(BasicStream): if self._broker._exitted: raise Error(self.broker_shutdown_msg) - _vv and IOLOG.debug('%r.defer() [fd=%r]', self, self.transmit_side.fd) + _vv and IOLOG.debug('%r.defer() [fd=%r]', self, + self.stream.transmit_side.fd) self._lock.acquire() try: if not self._deferred: @@ -2611,53 +2582,45 @@ class Waker(BasicStream): self._lock.release() -class IoLogger(BasicStream): +class IoLoggerProtocol(DelimitedProtocol): """ - :class:`BasicStream` subclass that sets up redirection of a standard - UNIX file descriptor back into the Python :mod:`logging` package. + Handle redirection of standard IO into the :mod:`logging` package. """ - _trailer = u'' - - def __init__(self, broker, name, dest_fd): - self._broker = broker - self._name = name - self._rsock, self._wsock = socket.socketpair() - os.dup2(self._wsock.fileno(), dest_fd) - set_cloexec(self._wsock.fileno()) + @classmethod + def build_stream(cls, name, dest_fd): + """ + Even though the descriptor `dest_fd` will hold the opposite end of the + socket open, we must keep a separate dup() of it (i.e. wsock) in case + some code decides to overwrite `dest_fd` later, which would thus break + :meth:`on_shutdown`. + """ + rsock, wsock = socket.socketpair() + os.dup2(wsock.fileno(), dest_fd) + stream = super(IoLoggerProtocol, cls).build_stream(name) + stream.name = name + stream.accept(rsock, wsock) + return stream + def __init__(self, name): self._log = logging.getLogger(name) # #453: prevent accidental log initialization in a child creating a # feedback loop. self._log.propagate = False self._log.handlers = logging.getLogger().handlers[:] - self.receive_side = Side(self, self._rsock.fileno()) - self.transmit_side = Side(self, dest_fd, cloexec=False, blocking=True) - self._broker.start_receive(self) - - def __repr__(self): - return '' % (self._name,) - def on_shutdown(self, broker): """Shut down the write end of the logging socket.""" _v and LOG.debug('%r: shutting down', self) if not IS_WSL: - # #333: WSL generates invalid readiness indication on shutdown() - self._wsock.shutdown(socket.SHUT_WR) - self._wsock.close() - self.transmit_side.close() + # #333: WSL generates invalid readiness indication on shutdown(). + # This modifies the *kernel object* inherited by children, causing + # EPIPE on subsequent writes to any dupped FD in any process. The + # read side can then drain completely of prior buffered data. + self.stream.transmit_side.fp.shutdown(socket.SHUT_WR) + self.stream.transmit_side.close() - def on_receive(self, broker): - _vv and IOLOG.debug('%r.on_receive()', self) - buf = self.receive_side.read() - if not buf: - return self.on_disconnect(broker) - - self._trailer = iter_split( - buf=self._trailer + buf.decode('latin1'), - delim='\n', - func=lambda s: self._log.info('%s', s) - ) + def on_line_received(self, line): + self._log.info('%s', line.decode('utf-8', 'replace')) class Router(object): @@ -3008,11 +2971,11 @@ class Router(object): self, msg.src_id, in_stream, expect, msg) return - if in_stream.auth_id is not None: - msg.auth_id = in_stream.auth_id + if in_stream.protocol.auth_id is not None: + msg.auth_id = in_stream.protocol.auth_id # Maintain a set of IDs the source ever communicated with. - in_stream.egress_ids.add(msg.dst_id) + in_stream.protocol.egress_ids.add(msg.dst_id) if msg.dst_id == mitogen.context_id: return self._invoke(msg, in_stream) @@ -3027,12 +2990,13 @@ class Router(object): return if in_stream and self.unidirectional and not \ - (in_stream.is_privileged or out_stream.is_privileged): + (in_stream.protocol.is_privileged or + out_stream.protocol.is_privileged): self._maybe_send_dead(msg, self.unidirectional_msg, - in_stream.remote_id, out_stream.remote_id) + in_stream.protocol.remote_id, out_stream.protocol.remote_id) return - out_stream._send(msg) + out_stream.protocol._send(msg) def route(self, msg): """ @@ -3074,11 +3038,11 @@ class Broker(object): def __init__(self, poller_class=None, activate_compat=True): self._alive = True self._exitted = False - self._waker = Waker(self) + self._waker = Waker.build_stream(self) #: Arrange for `func(\*args, \**kwargs)` to be executed on the broker #: thread, or immediately if the current thread is the broker thread. #: Safe to call from any thread. - self.defer = self._waker.defer + self.defer = self._waker.protocol.defer self.poller = self.poller_class() self.poller.start_receive( self._waker.receive_side.fd, @@ -3112,7 +3076,7 @@ class Broker(object): """ _vv and IOLOG.debug('%r.start_receive(%r)', self, stream) side = stream.receive_side - assert side and side.fd is not None + assert side and not side.closed self.defer(self.poller.start_receive, side.fd, (side, stream.on_receive)) @@ -3133,7 +3097,7 @@ class Broker(object): """ _vv and IOLOG.debug('%r._start_transmit(%r)', self, stream) side = stream.transmit_side - assert side and side.fd is not None + assert side and not side.closed self.poller.start_transmit(side.fd, (side, stream.on_transmit)) def _stop_transmit(self, stream): @@ -3246,7 +3210,7 @@ class Broker(object): :meth:`shutdown` is called. """ # For Python 2.4, no way to retrieve ident except on thread. - self._waker.broker_ident = thread.get_ident() + self._waker.protocol.broker_ident = thread.get_ident() try: while self._alive: self._loop_once() @@ -3486,18 +3450,16 @@ class ExternalContext(object): else: self.parent = Context(self.router, parent_id, 'parent') - in_fd = self.config.get('in_fd', 100) - out_fd = self.config.get('out_fd', 1) - self.stream = Stream(self.router, parent_id) + in_fp = os.fdopen(os.dup(self.config.get('in_fd', 100)), 'rb', 0) + out_fp = os.fdopen(os.dup(self.config.get('out_fd', 1)), 'wb', 0) + self.stream = MitogenProtocol.build_stream(self.router, parent_id) + self.stream.accept(in_fp, out_fp) self.stream.name = 'parent' - self.stream.accept(in_fd, out_fd) self.stream.receive_side.keep_alive = False listen(self.stream, 'disconnect', self._on_parent_disconnect) listen(self.broker, 'exit', self._on_broker_exit) - os.close(in_fd) - def _reap_first_stage(self): try: os.wait() # Reap first stage. @@ -3584,7 +3546,7 @@ class ExternalContext(object): try: if os.isatty(2): self.reserve_tty_fp = os.fdopen(os.dup(2), 'r+b', 0) - set_cloexec(self.reserve_tty_fp) + set_cloexec(self.reserve_tty_fp.fileno()) except OSError: pass @@ -3600,8 +3562,12 @@ class ExternalContext(object): sys.stdout.close() self._nullify_stdio() - self.stdout_log = IoLogger(self.broker, 'stdout', 1) - self.stderr_log = IoLogger(self.broker, 'stderr', 2) + self.loggers = [] + for name, fd in (('stdout', 1), ('stderr', 2)): + log = IoLoggerProtocol.build_stream(name, fd) + self.broker.start_receive(log) + self.loggers.append(log) + # Reopen with line buffering. sys.stdout = os.fdopen(1, 'w', 1) @@ -3621,11 +3587,11 @@ class ExternalContext(object): self.dispatcher = Dispatcher(self) self.router.register(self.parent, self.stream) self.router._setup_logging() - self.log_handler.uncork() sys.executable = os.environ.pop('ARGV0', sys.executable) - _v and LOG.debug('Connected to context %s; my ID is %r', - self.parent, mitogen.context_id) + _v and LOG.debug('Parent is context %r (%s); my ID is %r', + self.parent.context_id, self.parent.name, + mitogen.context_id) _v and LOG.debug('pid:%r ppid:%r uid:%r/%r, gid:%r/%r host:%r', os.getpid(), os.getppid(), os.geteuid(), os.getuid(), os.getegid(), os.getgid(), @@ -3633,6 +3599,9 @@ class ExternalContext(object): _v and LOG.debug('Recovered sys.executable: %r', sys.executable) self.broker._py24_25_compat() + if self.config.get('send_ec2', True): + self.stream.transmit_side.write(b('MITO002\n')) + self.log_handler.uncork() self.dispatcher.run() _v and LOG.debug('ExternalContext.main() normal exit') except KeyboardInterrupt: diff --git a/mitogen/doas.py b/mitogen/doas.py index 1b687fb2..fc37325b 100644 --- a/mitogen/doas.py +++ b/mitogen/doas.py @@ -29,6 +29,7 @@ # !mitogen: minify_safe import logging +import re import mitogen.core import mitogen.parent @@ -37,77 +38,106 @@ from mitogen.core import b LOG = logging.getLogger(__name__) +password_incorrect_msg = 'doas password is incorrect' +password_required_msg = 'doas password is required' + class PasswordError(mitogen.core.StreamError): pass -class Stream(mitogen.parent.Stream): - create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) - child_is_immediate_subprocess = False - - username = 'root' +class Options(mitogen.parent.Options): + username = u'root' password = None doas_path = 'doas' - password_prompt = b('Password:') + password_prompt = u'Password:' incorrect_prompts = ( - b('doas: authentication failed'), + u'doas: authentication failed', # slicer69/doas + u'doas: Authorization failed', # openbsd/src ) - def construct(self, username=None, password=None, doas_path=None, - password_prompt=None, incorrect_prompts=None, **kwargs): - super(Stream, self).construct(**kwargs) + def __init__(self, username=None, password=None, doas_path=None, + password_prompt=None, incorrect_prompts=None, **kwargs): + super(Options, self).__init__(**kwargs) if username is not None: - self.username = username + self.username = mitogen.core.to_text(username) if password is not None: - self.password = password + self.password = mitogen.core.to_text(password) if doas_path is not None: self.doas_path = doas_path if password_prompt is not None: - self.password_prompt = password_prompt.lower() + self.password_prompt = mitogen.core.to_text(password_prompt) if incorrect_prompts is not None: - self.incorrect_prompts = map(str.lower, incorrect_prompts) + self.incorrect_prompts = [ + mitogen.core.to_text(p) + for p in incorrect_prompts + ] + + +class BootstrapProtocol(mitogen.parent.RegexProtocol): + password_sent = False + + def setup_patterns(self, conn): + prompt_pattern = re.compile( + re.escape(conn.options.password_prompt).encode('utf-8'), + re.I + ) + incorrect_prompt_pattern = re.compile( + u'|'.join( + re.escape(s) + for s in conn.options.incorrect_prompts + ).encode('utf-8'), + re.I + ) + + self.PATTERNS = [ + (incorrect_prompt_pattern, type(self)._on_incorrect_password), + ] + self.PARTIAL_PATTERNS = [ + (prompt_pattern, type(self)._on_password_prompt), + ] + + def _on_incorrect_password(self, line, match): + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + + def _on_password_prompt(self, line, match): + if self.stream.conn.options.password is None: + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + return + + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + return + + LOG.debug('sending password') + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + + +class Connection(mitogen.parent.Connection): + options_class = Options + diag_protocol_class = BootstrapProtocol + + create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) + child_is_immediate_subprocess = False def _get_name(self): - return u'doas.' + mitogen.core.to_text(self.username) + return u'doas.' + self.options.username + + def diag_stream_factory(self): + stream = super(Connection, self).diag_stream_factory() + stream.protocol.setup_patterns(self) + return stream def get_boot_command(self): - bits = [self.doas_path, '-u', self.username, '--'] - bits = bits + super(Stream, self).get_boot_command() - LOG.debug('doas command line: %r', bits) - return bits - - password_incorrect_msg = 'doas password is incorrect' - password_required_msg = 'doas password is required' - - def _connect_input_loop(self, it): - password_sent = False - for buf in it: - LOG.debug('%r: received %r', self, buf) - if buf.endswith(self.EC0_MARKER): - self._ec0_received() - return - if any(s in buf.lower() for s in self.incorrect_prompts): - if password_sent: - raise PasswordError(self.password_incorrect_msg) - elif self.password_prompt in buf.lower(): - if self.password is None: - raise PasswordError(self.password_required_msg) - if password_sent: - raise PasswordError(self.password_incorrect_msg) - LOG.debug('sending password') - self.diag_stream.transmit_side.write( - mitogen.core.to_text(self.password + '\n').encode('utf-8') - ) - password_sent = True - raise mitogen.core.StreamError('bootstrap failed') - - def _connect_bootstrap(self): - it = mitogen.parent.iter_read( - fds=[self.receive_side.fd, self.diag_stream.receive_side.fd], - deadline=self.connect_deadline, - ) - try: - self._connect_input_loop(it) - finally: - it.close() + bits = [self.options.doas_path, '-u', self.options.username, '--'] + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen/docker.py b/mitogen/docker.py index 0c0d40e7..48848c89 100644 --- a/mitogen/docker.py +++ b/mitogen/docker.py @@ -37,45 +37,47 @@ import mitogen.parent LOG = logging.getLogger(__name__) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False - +class Options(mitogen.parent.Options): container = None image = None username = None - docker_path = 'docker' - - # TODO: better way of capturing errors such as "No such container." - create_child_args = { - 'merge_stdio': True - } + docker_path = u'docker' - def construct(self, container=None, image=None, - docker_path=None, username=None, - **kwargs): + def __init__(self, container=None, image=None, docker_path=None, + username=None, **kwargs): + super(Options, self).__init__(**kwargs) assert container or image - super(Stream, self).construct(**kwargs) if container: - self.container = container + self.container = mitogen.core.to_text(container) if image: - self.image = image + self.image = mitogen.core.to_text(image) if docker_path: - self.docker_path = docker_path + self.docker_path = mitogen.core.to_text(docker_path) if username: - self.username = username + self.username = mitogen.core.to_text(username) + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } def _get_name(self): - return u'docker.' + (self.container or self.image) + return u'docker.' + (self.options.container or self.options.image) def get_boot_command(self): args = ['--interactive'] - if self.username: - args += ['--user=' + self.username] + if self.options.username: + args += ['--user=' + self.options.username] - bits = [self.docker_path] - if self.container: - bits += ['exec'] + args + [self.container] - elif self.image: - bits += ['run'] + args + ['--rm', self.image] + bits = [self.options.docker_path] + if self.options.container: + bits += ['exec'] + args + [self.options.container] + elif self.options.image: + bits += ['run'] + args + ['--rm', self.options.image] - return bits + super(Stream, self).get_boot_command() + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen/fakessh.py b/mitogen/fakessh.py index d39a710d..f56e8838 100644 --- a/mitogen/fakessh.py +++ b/mitogen/fakessh.py @@ -117,14 +117,12 @@ SSH_GETOPTS = ( _mitogen = None -class IoPump(mitogen.core.BasicStream): +class IoPump(mitogen.core.Protocol): _output_buf = '' _closed = False - def __init__(self, broker, stdin_fd, stdout_fd): + def __init__(self, broker): self._broker = broker - self.receive_side = mitogen.core.Side(self, stdout_fd) - self.transmit_side = mitogen.core.Side(self, stdin_fd) def write(self, s): self._output_buf += s @@ -134,13 +132,13 @@ class IoPump(mitogen.core.BasicStream): self._closed = True # If local process hasn't exitted yet, ensure its write buffer is # drained before lazily triggering disconnect in on_transmit. - if self.transmit_side.fd is not None: + if self.transmit_side.fp.fileno() is not None: self._broker._start_transmit(self) - def on_shutdown(self, broker): + def on_shutdown(self, stream, broker): self.close() - def on_transmit(self, broker): + def on_transmit(self, stream, broker): written = self.transmit_side.write(self._output_buf) IOLOG.debug('%r.on_transmit() -> len %r', self, written) if written is None: @@ -153,8 +151,8 @@ class IoPump(mitogen.core.BasicStream): if self._closed: self.on_disconnect(broker) - def on_receive(self, broker): - s = self.receive_side.read() + def on_receive(self, stream, broker): + s = stream.receive_side.read() IOLOG.debug('%r.on_receive() -> len %r', self, len(s)) if s: mitogen.core.fire(self, 'receive', s) @@ -163,8 +161,8 @@ class IoPump(mitogen.core.BasicStream): def __repr__(self): return 'IoPump(%r, %r)' % ( - self.receive_side.fd, - self.transmit_side.fd, + self.receive_side.fp.fileno(), + self.transmit_side.fp.fileno(), ) @@ -173,14 +171,15 @@ class Process(object): Manages the lifetime and pipe connections of the SSH command running in the slave. """ - def __init__(self, router, stdin_fd, stdout_fd, proc=None): + def __init__(self, router, stdin_fp, stdout_fp, proc=None): self.router = router - self.stdin_fd = stdin_fd - self.stdout_fd = stdout_fd + self.stdin_fp = stdin_fp + self.stdout_fp = stdout_fp self.proc = proc self.control_handle = router.add_handler(self._on_control) self.stdin_handle = router.add_handler(self._on_stdin) - self.pump = IoPump(router.broker, stdin_fd, stdout_fd) + self.pump = IoPump.build_stream(router.broker) + self.pump.accept(stdin_fp, stdout_fp) self.stdin = None self.control = None self.wake_event = threading.Event() @@ -193,7 +192,7 @@ class Process(object): pmon.add(proc.pid, self._on_proc_exit) def __repr__(self): - return 'Process(%r, %r)' % (self.stdin_fd, self.stdout_fd) + return 'Process(%r, %r)' % (self.stdin_fp, self.stdout_fp) def _on_proc_exit(self, status): LOG.debug('%r._on_proc_exit(%r)', self, status) @@ -202,12 +201,12 @@ class Process(object): def _on_stdin(self, msg): if msg.is_dead: IOLOG.debug('%r._on_stdin() -> %r', self, data) - self.pump.close() + self.pump.protocol.close() return data = msg.unpickle() IOLOG.debug('%r._on_stdin() -> len %d', self, len(data)) - self.pump.write(data) + self.pump.protocol.write(data) def _on_control(self, msg): if not msg.is_dead: @@ -279,13 +278,7 @@ def _start_slave(src_id, cmdline, router): stdout=subprocess.PIPE, ) - process = Process( - router, - proc.stdin.fileno(), - proc.stdout.fileno(), - proc, - ) - + process = Process(router, proc.stdin, proc.stdout, proc) return process.control_handle, process.stdin_handle @@ -361,7 +354,9 @@ def _fakessh_main(dest_context_id, econtext): LOG.debug('_fakessh_main: received control_handle=%r, stdin_handle=%r', control_handle, stdin_handle) - process = Process(econtext.router, 1, 0) + process = Process(econtext.router, + stdin_fp=os.fdopen(1, 'w+b', 0), + stdout_fp=os.fdopen(0, 'r+b', 0)) process.start_master( stdin=mitogen.core.Sender(dest, stdin_handle), control=mitogen.core.Sender(dest, control_handle), @@ -427,7 +422,7 @@ def run(dest, router, args, deadline=None, econtext=None): stream = mitogen.core.Stream(router, context_id) stream.name = u'fakessh' - stream.accept(sock1.fileno(), sock1.fileno()) + stream.accept(sock1, sock1) router.register(fakessh, stream) # Held in socket buffer until process is booted. diff --git a/mitogen/fork.py b/mitogen/fork.py index d6685d70..a247fd5b 100644 --- a/mitogen/fork.py +++ b/mitogen/fork.py @@ -28,6 +28,7 @@ # !mitogen: minify_safe +import errno import logging import os import random @@ -119,32 +120,45 @@ def handle_child_crash(): os._exit(1) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = True - +class Process(mitogen.parent.Process): + def poll(self): + try: + pid, status = os.waitpid(self.pid, os.WNOHANG) + except OSError: + e = sys.exc_info()[1] + if e.args[0] == errno.ECHILD: + LOG.warn('%r: waitpid(%r) produced ECHILD', self, self.pid) + return + raise + + if not pid: + return + if os.WIFEXITED(status): + return os.WEXITSTATUS(status) + elif os.WIFSIGNALED(status): + return -os.WTERMSIG(status) + elif os.WIFSTOPPED(status): + return -os.WSTOPSIG(status) + + +class Options(mitogen.parent.Options): #: Reference to the importer, if any, recovered from the parent. importer = None #: User-supplied function for cleaning up child process state. on_fork = None - python_version_msg = ( - "The mitogen.fork method is not supported on Python versions " - "prior to 2.6, since those versions made no attempt to repair " - "critical interpreter state following a fork. Please use the " - "local() method instead." - ) - - def construct(self, old_router, max_message_size, on_fork=None, - debug=False, profiling=False, unidirectional=False, - on_start=None): + def __init__(self, old_router, max_message_size, on_fork=None, debug=False, + profiling=False, unidirectional=False, on_start=None, + name=None): if not FORK_SUPPORTED: raise Error(self.python_version_msg) # fork method only supports a tiny subset of options. - super(Stream, self).construct(max_message_size=max_message_size, - debug=debug, profiling=profiling, - unidirectional=False) + super(Options, self).__init__( + max_message_size=max_message_size, debug=debug, + profiling=profiling, unidirectional=unidirectional, name=name, + ) self.on_fork = on_fork self.on_start = on_start @@ -152,17 +166,26 @@ class Stream(mitogen.parent.Stream): if isinstance(responder, mitogen.parent.ModuleForwarder): self.importer = responder.importer + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = True + + python_version_msg = ( + "The mitogen.fork method is not supported on Python versions " + "prior to 2.6, since those versions made no attempt to repair " + "critical interpreter state following a fork. Please use the " + "local() method instead." + ) + name_prefix = u'fork' def start_child(self): parentfp, childfp = mitogen.parent.create_socketpair() - self.pid = os.fork() - if self.pid: + pid = os.fork() + if pid: childfp.close() - # Decouple the socket from the lifetime of the Python socket object. - fd = os.dup(parentfp.fileno()) - parentfp.close() - return self.pid, fd, None + return Process(pid, parentfp) else: parentfp.close() self._wrap_child_main(childfp) @@ -173,12 +196,24 @@ class Stream(mitogen.parent.Stream): except BaseException: handle_child_crash() + def get_econtext_config(self): + config = super(Connection, self).get_econtext_config() + config['core_src_fd'] = None + config['importer'] = self.options.importer + config['send_ec2'] = False + config['setup_package'] = False + if self.options.on_start: + config['on_start'] = self.options.on_start + return config + def _child_main(self, childfp): on_fork() - if self.on_fork: - self.on_fork() + if self.options.on_fork: + self.options.on_fork() mitogen.core.set_block(childfp.fileno()) + childfp.send('MITO002\n') + # Expected by the ExternalContext.main(). os.dup2(childfp.fileno(), 1) os.dup2(childfp.fileno(), 100) @@ -201,23 +236,12 @@ class Stream(mitogen.parent.Stream): if childfp.fileno() not in (0, 1, 100): childfp.close() - config = self.get_econtext_config() - config['core_src_fd'] = None - config['importer'] = self.importer - config['setup_package'] = False - if self.on_start: - config['on_start'] = self.on_start - try: try: - mitogen.core.ExternalContext(config).main() + mitogen.core.ExternalContext(self.get_econtext_config()).main() except Exception: # TODO: report exception somehow. os._exit(72) finally: # Don't trigger atexit handlers, they were copied from the parent. os._exit(0) - - def _connect_bootstrap(self): - # None required. - pass diff --git a/mitogen/jail.py b/mitogen/jail.py index 6e0ac68b..c7c1f0f9 100644 --- a/mitogen/jail.py +++ b/mitogen/jail.py @@ -37,29 +37,34 @@ import mitogen.parent LOG = logging.getLogger(__name__) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False - create_child_args = { - 'merge_stdio': True - } - +class Options(mitogen.parent.Options): container = None username = None - jexec_path = '/usr/sbin/jexec' + jexec_path = u'/usr/sbin/jexec' - def construct(self, container, jexec_path=None, username=None, **kwargs): - super(Stream, self).construct(**kwargs) - self.container = container - self.username = username + def __init__(self, container, jexec_path=None, username=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = mitogen.core.to_text(container) + if username: + self.username = mitogen.core.to_text(username) if jexec_path: self.jexec_path = jexec_path + +class Connection(mitogen.parent.Connection): + options_class = Options + + child_is_immediate_subprocess = False + create_child_args = { + 'merge_stdio': True + } + def _get_name(self): - return u'jail.' + self.container + return u'jail.' + self.options.container def get_boot_command(self): - bits = [self.jexec_path] - if self.username: - bits += ['-U', self.username] - bits += [self.container] - return bits + super(Stream, self).get_boot_command() + bits = [self.options.jexec_path] + if self.options.username: + bits += ['-U', self.options.username] + bits += [self.options.container] + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen/kubectl.py b/mitogen/kubectl.py index ef626e1b..acc011b9 100644 --- a/mitogen/kubectl.py +++ b/mitogen/kubectl.py @@ -37,29 +37,36 @@ import mitogen.parent LOG = logging.getLogger(__name__) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = True - +class Options(mitogen.parent.Options): pod = None kubectl_path = 'kubectl' kubectl_args = None - # TODO: better way of capturing errors such as "No such container." - create_child_args = { - 'merge_stdio': True - } - - def construct(self, pod, kubectl_path=None, kubectl_args=None, **kwargs): - super(Stream, self).construct(**kwargs) + def __init__(self, pod, kubectl_path=None, kubectl_args=None, **kwargs): + super(Options, self).__init__(**kwargs) assert pod self.pod = pod if kubectl_path: self.kubectl_path = kubectl_path self.kubectl_args = kubectl_args or [] + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = True + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + def _get_name(self): - return u'kubectl.%s%s' % (self.pod, self.kubectl_args) + return u'kubectl.%s%s' % (self.options.pod, self.options.kubectl_args) def get_boot_command(self): - bits = [self.kubectl_path] + self.kubectl_args + ['exec', '-it', self.pod] - return bits + ["--"] + super(Stream, self).get_boot_command() + bits = [ + self.options.kubectl_path + ] + self.options.kubectl_args + [ + 'exec', '-it', self.options.pod + ] + return bits + ["--"] + super(Connection, self).get_boot_command() diff --git a/mitogen/lxc.py b/mitogen/lxc.py index 879d19a1..759475c1 100644 --- a/mitogen/lxc.py +++ b/mitogen/lxc.py @@ -37,7 +37,20 @@ import mitogen.parent LOG = logging.getLogger(__name__) -class Stream(mitogen.parent.Stream): +class Options(mitogen.parent.Options): + container = None + lxc_attach_path = 'lxc-attach' + + def __init__(self, container, lxc_attach_path=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = container + if lxc_attach_path: + self.lxc_attach_path = lxc_attach_path + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False create_child_args = { # If lxc-attach finds any of stdin, stdout, stderr connected to a TTY, @@ -47,29 +60,20 @@ class Stream(mitogen.parent.Stream): 'merge_stdio': True } - container = None - lxc_attach_path = 'lxc-attach' - eof_error_hint = ( 'Note: many versions of LXC do not report program execution failure ' 'meaningfully. Please check the host logs (/var/log) for more ' 'information.' ) - def construct(self, container, lxc_attach_path=None, **kwargs): - super(Stream, self).construct(**kwargs) - self.container = container - if lxc_attach_path: - self.lxc_attach_path = lxc_attach_path - def _get_name(self): - return u'lxc.' + self.container + return u'lxc.' + self.options.container def get_boot_command(self): bits = [ - self.lxc_attach_path, + self.options.lxc_attach_path, '--clear-env', - '--name', self.container, + '--name', self.options.container, '--', ] - return bits + super(Stream, self).get_boot_command() + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen/lxd.py b/mitogen/lxd.py index faea2561..6fbe0694 100644 --- a/mitogen/lxd.py +++ b/mitogen/lxd.py @@ -37,7 +37,21 @@ import mitogen.parent LOG = logging.getLogger(__name__) -class Stream(mitogen.parent.Stream): +class Options(mitogen.parent.Options): + container = None + lxc_path = 'lxc' + python_path = 'python' + + def __init__(self, container, lxc_path=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = container + if lxc_path: + self.lxc_path = lxc_path + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False create_child_args = { # If lxc finds any of stdin, stdout, stderr connected to a TTY, to @@ -47,31 +61,21 @@ class Stream(mitogen.parent.Stream): 'merge_stdio': True } - container = None - lxc_path = 'lxc' - python_path = 'python' - eof_error_hint = ( 'Note: many versions of LXC do not report program execution failure ' 'meaningfully. Please check the host logs (/var/log) for more ' 'information.' ) - def construct(self, container, lxc_path=None, **kwargs): - super(Stream, self).construct(**kwargs) - self.container = container - if lxc_path: - self.lxc_path = lxc_path - def _get_name(self): - return u'lxd.' + self.container + return u'lxd.' + self.options.container def get_boot_command(self): bits = [ - self.lxc_path, + self.options.lxc_path, 'exec', '--mode=noninteractive', - self.container, + self.options.container, '--', ] - return bits + super(Stream, self).get_boot_command() + return bits + super(Connection, self).get_boot_command() diff --git a/mitogen/master.py b/mitogen/master.py index 7bd2e78e..b5365693 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -531,14 +531,15 @@ class SysModulesMethod(FinderMethod): return if not isinstance(module, types.ModuleType): - LOG.debug('sys.modules[%r] absent or not a regular module', - fullname) + LOG.debug('%r: sys.modules[%r] absent or not a regular module', + self, fullname) return path = _py_filename(getattr(module, '__file__', '')) if not path: return + LOG.debug('%r: sys.modules[%r]: found %s', self, fullname, path) is_pkg = hasattr(module, '__path__') try: source = inspect.getsource(module) @@ -920,17 +921,17 @@ class ModuleResponder(object): return tup def _send_load_module(self, stream, fullname): - if fullname not in stream.sent_modules: + if fullname not in stream.protocol.sent_modules: tup = self._build_tuple(fullname) msg = mitogen.core.Message.pickled( tup, - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, handle=mitogen.core.LOAD_MODULE, ) LOG.debug('%s: sending %s (%.2f KiB) to %s', self, fullname, len(msg.data) / 1024.0, stream.name) self._router._async_route(msg) - stream.sent_modules.add(fullname) + stream.protocol.sent_modules.add(fullname) if tup[2] is not None: self.good_load_module_count += 1 self.good_load_module_size += len(msg.data) @@ -939,23 +940,23 @@ class ModuleResponder(object): def _send_module_load_failed(self, stream, fullname): self.bad_load_module_count += 1 - stream.send( + stream.protocol.send( mitogen.core.Message.pickled( self._make_negative_response(fullname), - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, handle=mitogen.core.LOAD_MODULE, ) ) def _send_module_and_related(self, stream, fullname): - if fullname in stream.sent_modules: + if fullname in stream.protocol.sent_modules: return try: tup = self._build_tuple(fullname) for name in tup[4]: # related parent, _, _ = str_partition(name, '.') - if parent != fullname and parent not in stream.sent_modules: + if parent != fullname and parent not in stream.protocol.sent_modules: # Parent hasn't been sent, so don't load submodule yet. continue @@ -976,7 +977,7 @@ class ModuleResponder(object): fullname = msg.data.decode() LOG.debug('%s requested module %s', stream.name, fullname) self.get_module_count += 1 - if fullname in stream.sent_modules: + if fullname in stream.protocol.sent_modules: LOG.warning('_on_get_module(): dup request for %r from %r', fullname, stream) @@ -987,12 +988,12 @@ class ModuleResponder(object): self.get_module_secs += time.time() - t0 def _send_forward_module(self, stream, context, fullname): - if stream.remote_id != context.context_id: + if stream.protocol.remote_id != context.context_id: stream.send( mitogen.core.Message( data=b('%s\x00%s' % (context.context_id, fullname)), handle=mitogen.core.FORWARD_MODULE, - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, ) ) diff --git a/mitogen/parent.py b/mitogen/parent.py index 04e832a9..91426881 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -42,6 +42,7 @@ import heapq import inspect import logging import os +import re import signal import socket import struct @@ -208,7 +209,7 @@ def is_immediate_child(msg, stream): Handler policy that requires messages to arrive only from immediately connected children. """ - return msg.src_id == stream.remote_id + return msg.src_id == stream.protocol.remote_id def flags(names): @@ -269,36 +270,18 @@ def create_socketpair(size=None): return parentfp, childfp -def detach_popen(**kwargs): +def popen(**kwargs): """ - Use :class:`subprocess.Popen` to construct a child process, then hack the - Popen so that it forgets the child it created, allowing it to survive a - call to Popen.__del__. - - If the child process is not detached, there is a race between it exitting - and __del__ being called. If it exits before __del__ runs, then __del__'s - call to :func:`os.waitpid` will capture the one and only exit event - delivered to this process, causing later 'legitimate' calls to fail with - ECHILD. - - :param list close_on_error: - Array of integer file descriptors to close on exception. - :returns: - Process ID of the new child. + Wrap :class:`subprocess.Popen` to ensure any global :data:`_preexec_hook` + is invoked in the child. """ - # This allows Popen() to be used for e.g. graceful post-fork error - # handling, without tying the surrounding code into managing a Popen - # object, which isn't possible for at least :mod:`mitogen.fork`. This - # should be replaced by a swappable helper class in a future version. real_preexec_fn = kwargs.pop('preexec_fn', None) def preexec_fn(): if _preexec_hook: _preexec_hook() if real_preexec_fn: real_preexec_fn() - proc = subprocess.Popen(preexec_fn=preexec_fn, **kwargs) - proc._child_created = False - return proc.pid + return subprocess.Popen(preexec_fn=preexec_fn, **kwargs) def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): @@ -318,7 +301,7 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): logs generated by e.g. SSH to be outpu as the session progresses, without interfering with `stdout`. :returns: - `(pid, socket_obj, :data:`None` or pipe_fd)` + :class:`Process` instance. """ parentfp, childfp = create_socketpair() # When running under a monkey patches-enabled gevent, the socket module @@ -327,43 +310,38 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): # future stdin fd. mitogen.core.set_block(childfp.fileno()) + stderr = None stderr_r = None - extra = {} if merge_stdio: - extra = {'stderr': childfp} + stderr = childfp elif stderr_pipe: - stderr_r, stderr_w = os.pipe() - mitogen.core.set_cloexec(stderr_r) - mitogen.core.set_cloexec(stderr_w) - extra = {'stderr': stderr_w} + stderr_r, stderr = mitogen.core.pipe() + mitogen.core.set_cloexec(stderr_r.fileno()) try: - pid = detach_popen( + proc = popen( args=args, stdin=childfp, stdout=childfp, + stderr=stderr, close_fds=True, preexec_fn=preexec_fn, - **extra ) - except Exception: + except: childfp.close() parentfp.close() if stderr_pipe: - os.close(stderr_r) - os.close(stderr_w) + stderr.close() + stderr_r.close() raise - if stderr_pipe: - os.close(stderr_w) childfp.close() - # Decouple the socket from the lifetime of the Python socket object. - fd = os.dup(parentfp.fileno()) - parentfp.close() + if stderr_pipe: + stderr.close() LOG.debug('create_child() child %d fd %d, parent %d, cmd: %s', - pid, fd, os.getpid(), Argv(args)) - return pid, fd, stderr_r + proc.pid, parentfp.fileno(), os.getpid(), Argv(args)) + return PopenProcess(proc, stdio_fp=parentfp, stderr_fp=stderr_r) def _acquire_controlling_tty(): @@ -428,15 +406,22 @@ def openpty(): :raises mitogen.core.StreamError: Creating a PTY failed. :returns: - See :func`os.openpty`. + `(master_fp, slave_fp)` file-like objects. """ try: - return os.openpty() + master_fd, slave_fd = os.openpty() except OSError: e = sys.exc_info()[1] - if IS_LINUX and e.args[0] == errno.EPERM: - return _linux_broken_devpts_openpty() - raise mitogen.core.StreamError(OPENPTY_MSG, e) + if not (IS_LINUX and e.args[0] == errno.EPERM): + raise mitogen.core.StreamError(OPENPTY_MSG, e) + master_fd, slave_fd = _linux_broken_devpts_openpty() + + master_fp = os.fdopen(master_fd, 'r+b', 0) + slave_fp = os.fdopen(slave_fd, 'r+b', 0) + disable_echo(master_fd) + disable_echo(slave_fd) + mitogen.core.set_block(slave_fd) + return master_fp, slave_fp def tty_create_child(args): @@ -450,31 +435,27 @@ def tty_create_child(args): :param list args: Program argument vector. :returns: - `(pid, tty_fd, None)` + :class:`Process` instance. """ - master_fd, slave_fd = openpty() + master_fp, slave_fp = openpty() try: - mitogen.core.set_block(slave_fd) - disable_echo(master_fd) - disable_echo(slave_fd) - - pid = detach_popen( + proc = popen( args=args, - stdin=slave_fd, - stdout=slave_fd, - stderr=slave_fd, + stdin=slave_fp, + stdout=slave_fp, + stderr=slave_fp, preexec_fn=_acquire_controlling_tty, close_fds=True, ) - except Exception: - os.close(master_fd) - os.close(slave_fd) + except: + master_fp.close() + slave_fp.close() raise - os.close(slave_fd) + slave_fp.close() LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s', - pid, master_fd, os.getpid(), Argv(args)) - return pid, master_fd, None + proc.pid, master_fp.fileno(), os.getpid(), Argv(args)) + return PopenProcess(proc, stdio_fp=master_fp) def hybrid_tty_create_child(args): @@ -486,90 +467,35 @@ def hybrid_tty_create_child(args): :param list args: Program argument vector. :returns: - `(pid, socketpair_fd, tty_fd)` + :class:`Process` instance. """ - master_fd, slave_fd = openpty() - + master_fp, slave_fp = openpty() try: - disable_echo(master_fd) - disable_echo(slave_fd) - mitogen.core.set_block(slave_fd) - parentfp, childfp = create_socketpair() try: mitogen.core.set_block(childfp) - pid = detach_popen( + proc = popen( args=args, stdin=childfp, stdout=childfp, - stderr=slave_fd, + stderr=slave_fp, preexec_fn=_acquire_controlling_tty, close_fds=True, ) - except Exception: + except: parentfp.close() childfp.close() raise - except Exception: - os.close(master_fd) - os.close(slave_fd) + except: + master_fp.close() + slave_fp.close() raise - os.close(slave_fd) + slave_fp.close() childfp.close() - # Decouple the socket from the lifetime of the Python socket object. - stdio_fd = os.dup(parentfp.fileno()) - parentfp.close() - LOG.debug('hybrid_tty_create_child() pid=%d stdio=%d, tty=%d, cmd: %s', - pid, stdio_fd, master_fd, Argv(args)) - return pid, stdio_fd, master_fd - - -def write_all(fd, s, deadline=None): - """Arrange for all of bytestring `s` to be written to the file descriptor - `fd`. - - :param int fd: - File descriptor to write to. - :param bytes s: - Bytestring to write to file descriptor. - :param float deadline: - If not :data:`None`, absolute UNIX timestamp after which timeout should - occur. - - :raises mitogen.core.TimeoutError: - Bytestring could not be written entirely before deadline was exceeded. - :raises mitogen.parent.EofError: - Stream indicated EOF, suggesting the child process has exitted. - :raises mitogen.core.StreamError: - File descriptor was disconnected before write could complete. - """ - timeout = None - written = 0 - poller = PREFERRED_POLLER() - poller.start_transmit(fd) - - try: - while written < len(s): - if deadline is not None: - timeout = max(0, deadline - time.time()) - if timeout == 0: - raise mitogen.core.TimeoutError('write timed out') - - if mitogen.core.PY3: - window = memoryview(s)[written:] - else: - window = buffer(s, written) - - for fd in poller.poll(timeout): - n, disconnected = mitogen.core.io_op(os.write, fd, window) - if disconnected: - raise EofError('EOF on stream during write') - - written += n - finally: - poller.close() + proc.pid, parentfp.fileno(), master_fp.fileno(), Argv(args)) + return PopenProcess(proc, stdio_fp=parentfp, stderr_fp=master_fp) class Timer(object): @@ -698,103 +624,6 @@ class PartialZlib(object): return out + compressor.flush() -class IteratingRead(object): - def __init__(self, fds, deadline=None): - self.deadline = deadline - self.timeout = None - self.poller = PREFERRED_POLLER() - for fd in fds: - self.poller.start_receive(fd) - - self.bits = [] - self.timeout = None - - def close(self): - self.poller.close() - - def __iter__(self): - return self - - def next(self): - while self.poller.readers: - if self.deadline is not None: - self.timeout = max(0, self.deadline - time.time()) - if self.timeout == 0: - break - - for fd in self.poller.poll(self.timeout): - s, disconnected = mitogen.core.io_op(os.read, fd, 4096) - if disconnected or not s: - LOG.debug('iter_read(%r) -> disconnected: %s', - fd, disconnected) - self.poller.stop_receive(fd) - else: - IOLOG.debug('iter_read(%r) -> %r', fd, s) - self.bits.append(s) - return s - - if not self.poller.readers: - raise EofError(u'EOF on stream; last 300 bytes received: %r' % - (b('').join(self.bits)[-300:].decode('latin1'),)) - - raise mitogen.core.TimeoutError('read timed out') - - __next__ = next - - -def iter_read(fds, deadline=None): - """Return a generator that arranges for up to 4096-byte chunks to be read - at a time from the file descriptor `fd` until the generator is destroyed. - - :param int fd: - File descriptor to read from. - :param float deadline: - If not :data:`None`, an absolute UNIX timestamp after which timeout - should occur. - - :raises mitogen.core.TimeoutError: - Attempt to read beyond deadline. - :raises mitogen.parent.EofError: - All streams indicated EOF, suggesting the child process has exitted. - :raises mitogen.core.StreamError: - Attempt to read past end of file. - """ - return IteratingRead(fds=fds, deadline=deadline) - - -def discard_until(fd, s, deadline): - """Read chunks from `fd` until one is encountered that ends with `s`. This - is used to skip output produced by ``/etc/profile``, ``/etc/motd`` and - mandatory SSH banners while waiting for :attr:`Stream.EC0_MARKER` to - appear, indicating the first stage is ready to receive the compressed - :mod:`mitogen.core` source. - - :param int fd: - File descriptor to read from. - :param bytes s: - Marker string to discard until encountered. - :param float deadline: - Absolute UNIX timestamp after which timeout should occur. - - :raises mitogen.core.TimeoutError: - Attempt to read beyond deadline. - :raises mitogen.parent.EofError: - All streams indicated EOF, suggesting the child process has exitted. - :raises mitogen.core.StreamError: - Attempt to read past end of file. - """ - it = iter_read([fd], deadline) - try: - for buf in it: - if IOLOG.level == logging.DEBUG: - for line in buf.splitlines(): - IOLOG.debug('discard_until: discarding %r', line) - if buf.endswith(s): - return - finally: - it.close() # ensure Poller.close() is called. - - def _upgrade_broker(broker): """ Extract the poller state from Broker and replace it with the industrial @@ -841,7 +670,7 @@ def upgrade_router(econtext): ) -def stream_by_method_name(name): +def get_connection_class(name): """ Given the name of a Mitogen connection method, import its implementation module and return its Stream subclass. @@ -849,7 +678,7 @@ def stream_by_method_name(name): if name == u'local': name = u'parent' module = mitogen.core.import_module(u'mitogen.' + name) - return module.Stream + return module.Connection @mitogen.core.takes_econtext @@ -870,7 +699,7 @@ def _proxy_connect(name, method_name, kwargs, econtext): try: context = econtext.router._connect( - klass=stream_by_method_name(method_name), + klass=get_connection_class(method_name), name=name, **kwargs ) @@ -891,19 +720,13 @@ def _proxy_connect(name, method_name, kwargs, econtext): } -def wstatus_to_str(status): +def returncode_to_str(n): """ Parse and format a :func:`os.waitpid` exit status. """ - if os.WIFEXITED(status): - return 'exited with return code %d' % (os.WEXITSTATUS(status),) - if os.WIFSIGNALED(status): - n = os.WTERMSIG(status) - return 'exited due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n)) - if os.WIFSTOPPED(status): - n = os.WSTOPSIG(status) - return 'stopped due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n)) - return 'unknown wait status (%d)' % (status,) + if n < 0: + return 'exited due to signal %d (%s)' % (-n, SIGNAL_BY_NUM.get(-n)) + return 'exited with return code %d' % (n,) class EofError(mitogen.core.StreamError): @@ -1192,78 +1015,202 @@ else: mitogen.core.Latch.poller_class = PREFERRED_POLLER -class DiagLogStream(mitogen.core.BasicStream): +class LineLoggingProtocolMixin(object): + def __init__(self, **kwargs): + super(LineLoggingProtocolMixin, self).__init__(**kwargs) + self.logged_lines = [] + self.logged_partial = None + + def get_history(self): + s = b('\n').join(self.logged_lines) + (self.logged_partial or b('')) + return mitogen.core.to_text(s) + + def on_line_received(self, line): + self.logged_lines.append(line) + self.logged_lines[:] = self.logged_lines[-100:] + return super(LineLoggingProtocolMixin, self).on_line_received(line) + + def on_partial_line_received(self, line): + self.logged_partial = line + return super(LineLoggingProtocolMixin, self).on_partial_line_received(line) + + +class RegexProtocol(mitogen.core.DelimitedProtocol): + """ + Implement a delimited protocol where messages matching a set of regular + expressions are dispatched to individual handler methods. Input is + dispatches using :attr:`PATTERNS` and :attr:`PARTIAL_PATTERNS`, before + falling back to :meth:`on_unrecognized_line_received` and + :meth:`on_unrecognized_partial_line_received`. """ - For "hybrid TTY/socketpair" mode, after connection setup a spare TTY master - FD exists that cannot be closed, and to which SSH or sudo may continue - writing log messages. - The descriptor cannot be closed since the UNIX TTY layer sends SIGHUP to - processes whose controlling TTY is the slave TTY whose master side has been - closed. LogProtocol takes over this FD and creates log messages for - anything written to it. + #: A sequence of 2-tuples of the form `(compiled pattern, method)` for + #: patterns that should be matched against complete (delimited) messages, + #: i.e. full lines. + PATTERNS = [] + + #: Like :attr:`PATTERNS`, but patterns that are matched against incomplete + #: lines. + PARTIAL_PATTERNS = [] + + def on_line_received(self, line): + for pattern, func in self.PATTERNS: + match = pattern.search(line) + if match is not None: + return func(self, line, match) + + return self.on_unrecognized_line_received(line) + + def on_unrecognized_line_received(self, line): + LOG.debug('%s: (unrecognized): %s', + self.stream.name, line.decode('utf-8', 'replace')) + + def on_partial_line_received(self, line): + LOG.debug('%s: (partial): %s', + self.stream.name, line.decode('utf-8', 'replace')) + for pattern, func in self.PARTIAL_PATTERNS: + match = pattern.search(line) + if match is not None: + return func(self, line, match) + + return self.on_unrecognized_partial_line_received(line) + + def on_unrecognized_partial_line_received(self, line): + LOG.debug('%s: (unrecognized partial): %s', + self.stream.name, line.decode('utf-8', 'replace')) + + +class BootstrapProtocol(LineLoggingProtocolMixin, RegexProtocol): """ + Respond to stdout of a child during bootstrap. Wait for EC0_MARKER to be + written by the first stage to indicate it can receive the bootstrap, then + await EC1_MARKER to indicate success, and + :class:`mitogen.core.MitogenProtocol` can be enabled. + """ + #: Sentinel value emitted by the first stage to indicate it is ready to + #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have + #: length of at least `max(len('password'), len('debug1:'))` + EC0_MARKER = b('MITO000') + EC1_MARKER = b('MITO001') + EC2_MARKER = b('MITO002') - def __init__(self, fd, stream): - self.receive_side = mitogen.core.Side(self, fd) - self.transmit_side = self.receive_side - self.stream = stream - self.buf = '' + def __init__(self, broker): + super(BootstrapProtocol, self).__init__() + self._writer = mitogen.core.BufferedWriter(broker, self) - def __repr__(self): - return "mitogen.parent.DiagLogStream(fd=%r, '%s')" % ( - self.receive_side.fd, - self.stream.name, - ) + def on_transmit(self, broker): + self._writer.on_transmit(broker) - def on_receive(self, broker): - """ - This handler is only called after the stream is registered with the IO - loop, the descriptor is manually read/written by _connect_bootstrap() - prior to that. - """ - buf = self.receive_side.read() - if not buf: - return self.on_disconnect(broker) + def _on_ec0_received(self, line, match): + LOG.debug('%r: first stage started succcessfully', self) + self._writer.write(self.stream.conn.get_preamble()) + + def _on_ec1_received(self, line, match): + LOG.debug('%r: first stage received bootstrap', self) + + def _on_ec2_received(self, line, match): + LOG.debug('%r: new child booted successfully', self) + self.stream.conn._complete_connection() + return False + + def on_unrecognized_line_received(self, line): + LOG.debug('%s: stdout: %s', self.stream.name, line) - self.buf += buf.decode('utf-8', 'replace') - while u'\n' in self.buf: - lines = self.buf.split('\n') - self.buf = lines[-1] - for line in lines[:-1]: - LOG.debug('%s: %s', self.stream.name, line.rstrip()) + PATTERNS = [ + (re.compile(EC0_MARKER), _on_ec0_received), + (re.compile(EC1_MARKER), _on_ec1_received), + (re.compile(EC2_MARKER), _on_ec2_received), + ] -class Stream(mitogen.core.Stream): +class LogProtocol(mitogen.core.DelimitedProtocol): """ - Base for streams capable of starting new slaves. + For "hybrid TTY/socketpair" mode, after connection setup a spare TTY master + FD exists that cannot be closed, and to which SSH or sudo may continue + writing log messages. + + The descriptor cannot be closed since the UNIX TTY layer sends SIGHUP to + processes whose controlling TTY is the slave whose master side was closed. + LogProtocol takes over this FD and creates log messages for anything + written to it. """ + def on_line_received(self, line): + LOG.info(u'%s: %s', self.stream.name, line.decode('utf-8', 'replace')) + + +class Options(object): + name = None + #: The path to the remote Python interpreter. python_path = get_sys_executable() #: Maximum time to wait for a connection attempt. connect_timeout = 30.0 - #: Derived from :py:attr:`connect_timeout`; absolute floating point - #: UNIX timestamp after which the connection attempt should be abandoned. - connect_deadline = None - #: True to cause context to write verbose /tmp/mitogen..log. debug = False #: True to cause context to write /tmp/mitogen.stats...log. profiling = False - #: Set to the child's PID by connect(). - pid = None + #: True if unidirectional routing is enabled in the new child. + unidirectional = False #: Passed via Router wrapper methods, must eventually be passed to #: ExternalContext.main(). max_message_size = None - #: If :attr:`create_child` supplied a diag_fd, references the corresponding - #: :class:`DiagLogStream`, allowing it to be disconnected when this stream - #: is disconnected. Set to :data:`None` if no `diag_fd` was present. + #: Remote name. + remote_name = None + + #: Derived from :py:attr:`connect_timeout`; absolute floating point + #: UNIX timestamp after which the connection attempt should be abandoned. + connect_deadline = None + + def __init__(self, max_message_size, name=None, remote_name=None, + python_path=None, debug=False, connect_timeout=None, + profiling=False, unidirectional=False, old_router=None): + self.name = name + self.max_message_size = max_message_size + if python_path: + self.python_path = python_path + if connect_timeout: + self.connect_timeout = connect_timeout + if remote_name is None: + remote_name = get_default_remote_name() + if '/' in remote_name or '\\' in remote_name: + raise ValueError('remote_name= cannot contain slashes') + if remote_name: + self.remote_name = mitogen.core.to_text(remote_name) + self.debug = debug + self.profiling = profiling + self.unidirectional = unidirectional + self.max_message_size = max_message_size + self.connect_deadline = time.time() + self.connect_timeout + + +class Connection(object): + """ + Base for streams capable of starting children. + """ + options_class = Options + + #: The protocol attached to stdio of the child. + stream_protocol_class = BootstrapProtocol + + #: The protocol attached to stderr of the child. + diag_protocol_class = LogProtocol + + #: :class:`Process` + proc = None + + #: :class:`mitogen.core.Stream` + stream = None + + #: If :attr:`create_child` provides a stderr_fp, referencing either a plain + #: pipe or the controlling TTY, this references the corresponding + #: :class:`LogProtocol`'s stream, allowing it to be disconnected when this + #: stream is disconnected. diag_stream = None #: Function with the semantics of :func:`create_child` used to create the @@ -1286,93 +1233,15 @@ class Stream(mitogen.core.Stream): #: Prefix given to default names generated by :meth:`connect`. name_prefix = u'local' - _reaped = False + timer = None - def __init__(self, *args, **kwargs): - super(Stream, self).__init__(*args, **kwargs) - self.sent_modules = set(['mitogen', 'mitogen.core']) - - def construct(self, max_message_size, remote_name=None, python_path=None, - debug=False, connect_timeout=None, profiling=False, - unidirectional=False, old_router=None, **kwargs): - """Get the named context running on the local machine, creating it if - it does not exist.""" - super(Stream, self).construct(**kwargs) - self.max_message_size = max_message_size - if python_path: - self.python_path = python_path - if connect_timeout: - self.connect_timeout = connect_timeout - if remote_name is None: - remote_name = get_default_remote_name() - if '/' in remote_name or '\\' in remote_name: - raise ValueError('remote_name= cannot contain slashes') - self.remote_name = remote_name - self.debug = debug - self.profiling = profiling - self.unidirectional = unidirectional - self.max_message_size = max_message_size - self.connect_deadline = time.time() + self.connect_timeout - - def on_shutdown(self, broker): - """Request the slave gracefully shut itself down.""" - LOG.debug('%r closing CALL_FUNCTION channel', self) - self._send( - mitogen.core.Message( - src_id=mitogen.context_id, - dst_id=self.remote_id, - handle=mitogen.core.SHUTDOWN, - ) - ) - - def _reap_child(self): - """ - Reap the child process during disconnection. - """ - if self.detached and self.child_is_immediate_subprocess: - LOG.debug('%r: immediate child is detached, won\'t reap it', self) - return - - if self.profiling: - LOG.info('%r: wont kill child because profiling=True', self) - return - - if self._reaped: - # on_disconnect() may be invoked more than once, for example, if - # there is still a pending message to be sent after the first - # on_disconnect() call. - return - - try: - pid, status = os.waitpid(self.pid, os.WNOHANG) - except OSError: - e = sys.exc_info()[1] - if e.args[0] == errno.ECHILD: - LOG.warn('%r: waitpid(%r) produced ECHILD', self, self.pid) - return - raise - - self._reaped = True - if pid: - LOG.debug('%r: PID %d %s', self, pid, wstatus_to_str(status)) - return + def __init__(self, options, router): + #: :class:`Options` + self.options = options + self._router = router - if not self._router.profiling: - # For processes like sudo we cannot actually send sudo a signal, - # because it is setuid, so this is best-effort only. - LOG.debug('%r: child process still alive, sending SIGTERM', self) - try: - os.kill(self.pid, signal.SIGTERM) - except OSError: - e = sys.exc_info()[1] - if e.args[0] != errno.EPERM: - raise - - def on_disconnect(self, broker): - super(Stream, self).on_disconnect(broker) - if self.diag_stream is not None: - self.diag_stream.on_disconnect(broker) - self._reap_child() + def __repr__(self): + return 'Connection(%r)' % (self.stream,) # Minimised, gzipped, base64'd and passed to 'python -c'. It forks, dups # file descriptor 0 as 100, creates a pipe, then execs a new interpreter @@ -1431,15 +1300,15 @@ class Stream(mitogen.core.Stream): This allows emulation of existing tools where the Python invocation may be set to e.g. `['/usr/bin/env', 'python']`. """ - if isinstance(self.python_path, list): - return self.python_path - return [self.python_path] + if isinstance(self.options.python_path, list): + return self.options.python_path + return [self.options.python_path] def get_boot_command(self): source = inspect.getsource(self._first_stage) source = textwrap.dedent('\n'.join(source.strip().split('\n')[2:])) source = source.replace(' ', '\t') - source = source.replace('CONTEXT_NAME', self.remote_name) + source = source.replace('CONTEXT_NAME', self.options.remote_name) preamble_compressed = self.get_preamble() source = source.replace('PREAMBLE_COMPRESSED_LEN', str(len(preamble_compressed))) @@ -1457,19 +1326,19 @@ class Stream(mitogen.core.Stream): ] def get_econtext_config(self): - assert self.max_message_size is not None + assert self.options.max_message_size is not None parent_ids = mitogen.parent_ids[:] parent_ids.insert(0, mitogen.context_id) return { 'parent_ids': parent_ids, - 'context_id': self.remote_id, - 'debug': self.debug, - 'profiling': self.profiling, - 'unidirectional': self.unidirectional, + 'context_id': self.context.context_id, + 'debug': self.options.debug, + 'profiling': self.options.profiling, + 'unidirectional': self.options.unidirectional, 'log_level': get_log_level(), 'whitelist': self._router.get_module_whitelist(), 'blacklist': self._router.get_module_blacklist(), - 'max_message_size': self.max_message_size, + 'max_message_size': self.options.max_message_size, 'version': mitogen.__version__, } @@ -1481,10 +1350,18 @@ class Stream(mitogen.core.Stream): partial = get_core_source_partial() return partial.append(suffix.encode('utf-8')) + def _get_name(self): + """ + Called by :meth:`connect` after :attr:`pid` is known. Subclasses can + override it to specify a default stream name, or set + :attr:`name_prefix` to generate a default format. + """ + return u'%s.%s' % (self.name_prefix, self.proc.pid) + def start_child(self): args = self.get_boot_command() try: - return self.create_child(args, **self.create_child_args) + return self.create_child(args=args, **self.create_child_args) except OSError: e = sys.exc_info()[1] msg = 'Child start failed: %s. Command was: %s' % (e, Argv(args)) @@ -1494,65 +1371,126 @@ class Stream(mitogen.core.Stream): def _adorn_eof_error(self, e): """ - Used by subclasses to provide additional information in the case of a - failed connection. + Subclasses may provide additional information in the case of a failed + connection. """ if self.eof_error_hint: e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),) - def _get_name(self): + exception = None + + def _complete_connection(self): + self.timer.cancel() + if not self.exception: + self._router.register(self.context, self.stream) + self.stream.set_protocol( + mitogen.core.MitogenProtocol( + router=self._router, + remote_id=self.context.context_id, + ) + ) + self.latch.put() + + def _fail_connection(self, exc): """ - Called by :meth:`connect` after :attr:`pid` is known. Subclasses can - override it to specify a default stream name, or set - :attr:`name_prefix` to generate a default format. + Fail the connection attempt. """ - return u'%s.%s' % (self.name_prefix, self.pid) + LOG.debug('%s: failing connection due to %r', + self.stream.name, exc) + if self.exception is None: + self._adorn_eof_error(exc) + self.exception = exc + for stream in self.stream, self.diag_stream: + if stream and not stream.receive_side.closed: + stream.on_disconnect(self._router.broker) + self._complete_connection() + + def on_stream_shutdown(self): + """Request the slave gracefully shut itself down.""" + LOG.debug('%r: requesting child shutdown', self) + self.stream.protocol._send( + mitogen.core.Message( + src_id=mitogen.context_id, + dst_id=self.stream.protocol.remote_id, + handle=mitogen.core.SHUTDOWN, + ) + ) - def connect(self): - LOG.debug('%r.connect()', self) - self.pid, fd, diag_fd = self.start_child() - self.name = self._get_name() - self.receive_side = mitogen.core.Side(self, fd) - self.transmit_side = mitogen.core.Side(self, os.dup(fd)) - if diag_fd is not None: - self.diag_stream = DiagLogStream(diag_fd, self) - else: - self.diag_stream = None + eof_error_msg = 'EOF on stream; last 100 lines received:\n' - LOG.debug('%r.connect(): pid:%r stdin:%r, stdout:%r, diag:%r', - self, self.pid, self.receive_side.fd, self.transmit_side.fd, - self.diag_stream and self.diag_stream.receive_side.fd) + def on_stream_disconnect(self): + if self.diag_stream is not None: + self.diag_stream.on_disconnect(self._router.broker) + if not self.timer.cancelled: + self.timer.cancel() + self._fail_connection(EofError( + self.eof_error_msg + self.stream.protocol.get_history() + )) + self.proc._async_reap(self, self._router) + + def _start_timer(self): + self.timer = self._router.broker.timers.schedule( + when=self.options.connect_deadline, + func=self._on_timer_expired, + ) - try: - self._connect_bootstrap() - except EofError: - self.on_disconnect(self._router.broker) - e = sys.exc_info()[1] - self._adorn_eof_error(e) - raise - except Exception: - self.on_disconnect(self._router.broker) - self._reap_child() - raise + def _on_timer_expired(self): + self._fail_connection( + mitogen.core.TimeoutError( + 'Failed to setup connection after %.2f seconds', + self.options.connect_timeout, + ) + ) - #: Sentinel value emitted by the first stage to indicate it is ready to - #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have - #: length of at least `max(len('password'), len('debug1:'))` - EC0_MARKER = mitogen.core.b('MITO000\n') - EC1_MARKER = mitogen.core.b('MITO001\n') + def stream_factory(self): + return self.stream_protocol_class.build_stream( + broker=self._router.broker, + ) - def _ec0_received(self): - LOG.debug('%r._ec0_received()', self) - write_all(self.transmit_side.fd, self.get_preamble()) - discard_until(self.receive_side.fd, self.EC1_MARKER, - self.connect_deadline) - if self.diag_stream: - self._router.broker.start_receive(self.diag_stream) + def diag_stream_factory(self): + return self.diag_protocol_class.build_stream() + + def _setup_stream(self): + self.stream = self.stream_factory() + self.stream.conn = self + self.stream.name = self.options.name or self._get_name() + self.stream.accept(self.proc.stdio_fp, self.proc.stdio_fp) + + mitogen.core.listen(self.stream, 'shutdown', + self.on_stream_shutdown) + mitogen.core.listen(self.stream, 'disconnect', + self.on_stream_disconnect) + self._router.broker.start_receive(self.stream) + + def _setup_diag_stream(self): + self.diag_stream = self.diag_stream_factory() + self.diag_stream.conn = self + self.diag_stream.name = self.options.name or self._get_name() + self.diag_stream.accept(self.proc.stderr_fp, self.proc.stderr_fp) + self._router.broker.start_receive(self.diag_stream) + + def _async_connect(self): + self._start_timer() + self._setup_stream() + if self.context.name is None: + self.context.name = self.stream.name + self.proc.name = self.stream.name + if self.proc.stderr_fp: + self._setup_diag_stream() + + def connect(self, context): + LOG.debug('%r.connect()', self) + self.context = context + self.proc = self.start_child() + LOG.debug('%r.connect(): pid:%r stdio:%r diag:%r', + self, self.proc.pid, self.proc.stdio_fp.fileno(), + self.proc.stderr_fp and self.proc.stderr_fp.fileno()) - def _connect_bootstrap(self): - discard_until(self.receive_side.fd, self.EC0_MARKER, - self.connect_deadline) - self._ec0_received() + self.latch = mitogen.core.Latch() + self._router.broker.defer(self._async_connect) + self.latch.get() + if self.exception: + raise self.exception class ChildIdAllocator(object): @@ -1956,11 +1894,11 @@ class RouteMonitor(object): data = str(target_id) if name: data = '%s:%s' % (target_id, name) - stream.send( + stream.protocol.send( mitogen.core.Message( handle=handle, data=data.encode('utf-8'), - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, ) ) @@ -1994,9 +1932,9 @@ class RouteMonitor(object): ID of the connecting or disconnecting context. """ for stream in self.router.get_streams(): - if target_id in stream.egress_ids and ( + if target_id in stream.protocol.egress_ids and ( (self.parent is None) or - (self.parent.context_id != stream.remote_id) + (self.parent.context_id != stream.protocol.remote_id) ): self._send_one(stream, mitogen.core.DEL_ROUTE, target_id, None) @@ -2006,8 +1944,8 @@ class RouteMonitor(object): stream, we're also responsible for broadcasting DEL_ROUTE upstream if/when that child disconnects. """ - self._routes_by_stream[stream] = set([stream.remote_id]) - self._propagate_up(mitogen.core.ADD_ROUTE, stream.remote_id, + self._routes_by_stream[stream] = set([stream.protocol.remote_id]) + self._propagate_up(mitogen.core.ADD_ROUTE, stream.protocol.remote_id, stream.name) mitogen.core.listen( obj=stream, @@ -2061,7 +1999,7 @@ class RouteMonitor(object): self.router.context_by_id(target_id).name = target_name stream = self.router.stream_by_id(msg.auth_id) current = self.router.stream_by_id(target_id) - if current and current.remote_id != mitogen.parent_id: + if current and current.protocol.remote_id != mitogen.parent_id: LOG.error('Cannot add duplicate route to %r via %r, ' 'already have existing route via %r', target_id, stream, current) @@ -2104,7 +2042,7 @@ class RouteMonitor(object): routes.discard(target_id) self.router.del_route(target_id) - if stream.remote_id != mitogen.parent_id: + if stream.protocol.remote_id != mitogen.parent_id: self._propagate_up(mitogen.core.DEL_ROUTE, target_id) self._propagate_down(mitogen.core.DEL_ROUTE, target_id) @@ -2138,11 +2076,11 @@ class Router(mitogen.core.Router): if msg.is_dead: return stream = self.stream_by_id(msg.src_id) - if stream.remote_id != msg.src_id or stream.detached: + if stream.protocol.remote_id != msg.src_id or stream.conn.detached: LOG.warning('bad DETACHING received on %r: %r', stream, msg) return LOG.debug('%r: marking as detached', stream) - stream.detached = True + stream.conn.detached = True msg.reply(None) def get_streams(self): @@ -2165,7 +2103,7 @@ class Router(mitogen.core.Router): """ LOG.debug('%r.add_route(%r, %r)', self, target_id, stream) assert isinstance(target_id, int) - assert isinstance(stream, Stream) + assert isinstance(stream, mitogen.core.Stream) self._write_lock.acquire() try: @@ -2174,7 +2112,7 @@ class Router(mitogen.core.Router): self._write_lock.release() def del_route(self, target_id): - LOG.debug('%r.del_route(%r)', self, target_id) + LOG.debug('%r: deleting route to %r', self, target_id) # DEL_ROUTE may be sent by a parent if it knows this context sent # messages to a peer that has now disconnected, to let us raise # 'disconnect' event on the appropriate Context instance. In that case, @@ -2201,25 +2139,27 @@ class Router(mitogen.core.Router): connection_timeout_msg = u"Connection timed out." - def _connect(self, klass, name=None, **kwargs): + def _connect(self, klass, **kwargs): context_id = self.allocate_id() context = self.context_class(self, context_id) + context.name = kwargs.get('name') + kwargs['old_router'] = self kwargs['max_message_size'] = self.max_message_size - stream = klass(self, context_id, **kwargs) - if name is not None: - stream.name = name + conn = klass(klass.options_class(**kwargs), self) try: - stream.connect() + conn.connect(context=context) except mitogen.core.TimeoutError: raise mitogen.core.StreamError(self.connection_timeout_msg) - context.name = stream.name - self.route_monitor.notice_stream(stream) - self.register(context, stream) + + self.route_monitor.notice_stream(conn.stream) return context def connect(self, method_name, name=None, **kwargs): - klass = stream_by_method_name(method_name) + if name: + name = mitogen.core.to_text(name) + + klass = get_connection_class(method_name) kwargs.setdefault(u'debug', self.debug) kwargs.setdefault(u'profiling', self.profiling) kwargs.setdefault(u'unidirectional', self.unidirectional) @@ -2290,43 +2230,89 @@ class Router(mitogen.core.Router): return self.connect(u'ssh', **kwargs) -class ProcessMonitor(object): +class Process(object): """ Install a :data:`signal.SIGCHLD` handler that generates callbacks when a specific child process has exitted. This class is obsolete, do not use. """ - def __init__(self): - # pid -> callback() - self.callback_by_pid = {} - signal.signal(signal.SIGCHLD, self._on_sigchld) - - def _on_sigchld(self, _signum, _frame): - for pid, callback in self.callback_by_pid.items(): - pid, status = os.waitpid(pid, os.WNOHANG) - if pid: - callback(status) - del self.callback_by_pid[pid] - - def add(self, pid, callback): - """ - Add a callback function to be notified of the exit status of a process. + _delays = [0.05, 0.15, 0.3, 1.0, 5.0, 10.0] + name = None - :param int pid: - Process ID to be notified of. + def __init__(self, pid, stdio_fp, stderr_fp=None): + self.pid = pid + self.stdio_fp = stdio_fp + self.stderr_fp = stderr_fp + self._returncode = None + self._reap_count = 0 - :param callback: - Function invoked as `callback(status)`, where `status` is the raw - exit status of the child process. + def __repr__(self): + return '%s %s pid %d' % ( + type(self).__name__, + self.name, + self.pid, + ) + + def poll(self): + raise NotImplementedError() + + def _signal_child(self, signum): + # For processes like sudo we cannot actually send sudo a signal, + # because it is setuid, so this is best-effort only. + LOG.debug('%r: child process still alive, sending %s', + self, SIGNAL_BY_NUM[signum]) + try: + os.kill(self.pid, signum) + except OSError: + e = sys.exc_info()[1] + if e.args[0] != errno.EPERM: + raise + + def _async_reap(self, conn, router): + """ + Reap the child process during disconnection. """ - self.callback_by_pid[pid] = callback + if self._returncode is not None: + # on_disconnect() may be invoked more than once, for example, if + # there is still a pending message to be sent after the first + # on_disconnect() call. + return - _instance = None + if conn.detached and conn.child_is_immediate_subprocess: + LOG.debug('%r: immediate child is detached, won\'t reap it', self) + return - @classmethod - def instance(cls): - if cls._instance is None: - cls._instance = cls() - return cls._instance + if router.profiling: + LOG.info('%r: wont kill child because profiling=True', self) + return + + self._reap_count += 1 + status = self.poll() + if status is not None: + LOG.debug('%r: %s', self, returncode_to_str(status)) + return + + i = self._reap_count - 1 + if i >= len(self._delays): + LOG.warning('%r: child will not die, abandoning it', self) + return + elif i == 0: + self._signal_child(signal.SIGTERM) + elif i == 1: + self._signal_child(signal.SIGKILL) + + router.broker.timers.schedule( + when=time.time() + self._delays[i], + func=lambda: self._async_reap(conn, router), + ) + + +class PopenProcess(Process): + def __init__(self, proc, stdio_fp, stderr_fp=None): + super(PopenProcess, self).__init__(proc.pid, stdio_fp, stderr_fp) + self.proc = proc + + def poll(self): + return self.proc.poll() class ModuleForwarder(object): @@ -2352,7 +2338,7 @@ class ModuleForwarder(object): ) def __repr__(self): - return 'ModuleForwarder(%r)' % (self.router,) + return 'ModuleForwarder' def _on_forward_module(self, msg): if msg.is_dead: @@ -2362,38 +2348,38 @@ class ModuleForwarder(object): fullname = mitogen.core.to_text(fullname) context_id = int(context_id_s) stream = self.router.stream_by_id(context_id) - if stream.remote_id == mitogen.parent_id: + if stream.protocol.remote_id == mitogen.parent_id: LOG.error('%r: dropping FORWARD_MODULE(%d, %r): no route to child', self, context_id, fullname) return - if fullname in stream.sent_modules: + if fullname in stream.protocol.sent_modules: return LOG.debug('%r._on_forward_module() sending %r to %r via %r', - self, fullname, context_id, stream.remote_id) + self, fullname, context_id, stream.protocol.remote_id) self._send_module_and_related(stream, fullname) - if stream.remote_id != context_id: + if stream.protocol.remote_id != context_id: stream._send( mitogen.core.Message( data=msg.data, handle=mitogen.core.FORWARD_MODULE, - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, ) ) def _on_get_module(self, msg): - LOG.debug('%r._on_get_module(%r)', self, msg) if msg.is_dead: return fullname = msg.data.decode('utf-8') + LOG.debug('%r: %s requested by %d', self, fullname, msg.src_id) callback = lambda: self._on_cache_callback(msg, fullname) self.importer._request_module(fullname, callback) def _on_cache_callback(self, msg, fullname): - LOG.debug('%r._on_get_module(): sending %r', self, fullname) stream = self.router.stream_by_id(msg.src_id) + LOG.debug('%r: sending %s to %r', self, fullname, stream) self._send_module_and_related(stream, fullname) def _send_module_and_related(self, stream, fullname): @@ -2403,18 +2389,18 @@ class ModuleForwarder(object): if rtup: self._send_one_module(stream, rtup) else: - LOG.debug('%r._send_module_and_related(%r): absent: %r', - self, fullname, related) + LOG.debug('%r: %s not in cache (for %s)', + self, related, fullname) self._send_one_module(stream, tup) def _send_one_module(self, stream, tup): - if tup[0] not in stream.sent_modules: - stream.sent_modules.add(tup[0]) + if tup[0] not in stream.protocol.sent_modules: + stream.protocol.sent_modules.add(tup[0]) self.router._async_route( mitogen.core.Message.pickled( tup, - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, handle=mitogen.core.LOAD_MODULE, ) ) diff --git a/mitogen/service.py b/mitogen/service.py index 942ed4f7..886012e8 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -485,7 +485,6 @@ class Pool(object): ) thread.start() self._threads.append(thread) - LOG.debug('%r: initialized', self) def _py_24_25_compat(self): @@ -658,7 +657,7 @@ class PushFileService(Service): def _forward(self, context, path): stream = self.router.stream_by_id(context.context_id) - child = mitogen.core.Context(self.router, stream.remote_id) + child = mitogen.core.Context(self.router, stream.protocol.remote_id) sent = self._sent_by_stream.setdefault(stream, set()) if path in sent: if child.context_id != context.context_id: @@ -891,7 +890,7 @@ class FileService(Service): # The IO loop pumps 128KiB chunks. An ideal message is a multiple of this, # odd-sized messages waste one tiny write() per message on the trailer. # Therefore subtract 10 bytes pickle overhead + 24 bytes header. - IO_SIZE = mitogen.core.CHUNK_SIZE - (mitogen.core.Stream.HEADER_LEN + ( + IO_SIZE = mitogen.core.CHUNK_SIZE - (mitogen.core.Message.HEADER_LEN + ( len( mitogen.core.Message.pickled( mitogen.core.Blob(b(' ') * mitogen.core.CHUNK_SIZE) diff --git a/mitogen/setns.py b/mitogen/setns.py index b1d69783..46a50301 100644 --- a/mitogen/setns.py +++ b/mitogen/setns.py @@ -116,9 +116,15 @@ def get_machinectl_pid(path, name): raise Error("could not find PID from machinectl output.\n%s", output) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False +GET_LEADER_BY_KIND = { + 'docker': ('docker_path', get_docker_pid), + 'lxc': ('lxc_info_path', get_lxc_pid), + 'lxd': ('lxc_path', get_lxd_pid), + 'machinectl': ('machinectl_path', get_machinectl_pid), +} + +class Options(mitogen.parent.Options): container = None username = 'root' kind = None @@ -128,24 +134,17 @@ class Stream(mitogen.parent.Stream): lxc_info_path = 'lxc-info' machinectl_path = 'machinectl' - GET_LEADER_BY_KIND = { - 'docker': ('docker_path', get_docker_pid), - 'lxc': ('lxc_info_path', get_lxc_pid), - 'lxd': ('lxc_path', get_lxd_pid), - 'machinectl': ('machinectl_path', get_machinectl_pid), - } - - def construct(self, container, kind, username=None, docker_path=None, - lxc_path=None, lxc_info_path=None, machinectl_path=None, - **kwargs): - super(Stream, self).construct(**kwargs) - if kind not in self.GET_LEADER_BY_KIND: + def __init__(self, container, kind, username=None, docker_path=None, + lxc_path=None, lxc_info_path=None, machinectl_path=None, + **kwargs): + super(Options, self).__init__(**kwargs) + if kind not in GET_LEADER_BY_KIND: raise Error('unsupported container kind: %r', kind) - self.container = container + self.container = mitogen.core.to_text(container) self.kind = kind if username: - self.username = username + self.username = mitogen.core.to_text(username) if docker_path: self.docker_path = docker_path if lxc_path: @@ -155,6 +154,11 @@ class Stream(mitogen.parent.Stream): if machinectl_path: self.machinectl_path = machinectl_path + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + # Order matters. https://github.com/karelzak/util-linux/commit/854d0fe/ NS_ORDER = ('ipc', 'uts', 'net', 'pid', 'mnt', 'user') @@ -189,15 +193,15 @@ class Stream(mitogen.parent.Stream): try: os.setgroups([grent.gr_gid for grent in grp.getgrall() - if self.username in grent.gr_mem]) - pwent = pwd.getpwnam(self.username) + if self.options.username in grent.gr_mem]) + pwent = pwd.getpwnam(self.options.username) os.setreuid(pwent.pw_uid, pwent.pw_uid) # shadow-4.4/libmisc/setupenv.c. Not done: MAIL, PATH os.environ.update({ 'HOME': pwent.pw_dir, 'SHELL': pwent.pw_shell or '/bin/sh', - 'LOGNAME': self.username, - 'USER': self.username, + 'LOGNAME': self.options.username, + 'USER': self.options.username, }) if ((os.path.exists(pwent.pw_dir) and os.access(pwent.pw_dir, os.X_OK))): @@ -217,7 +221,7 @@ class Stream(mitogen.parent.Stream): # namespaces, meaning starting new threads in the exec'd program will # fail. The solution is forking, so inject a /bin/sh call to achieve # this. - argv = super(Stream, self).get_boot_command() + argv = super(Connection, self).get_boot_command() # bash will exec() if a single command was specified and the shell has # nothing left to do, so "; exit $?" gives bash a reason to live. return ['/bin/sh', '-c', '%s; exit $?' % (mitogen.parent.Argv(argv),)] @@ -226,13 +230,12 @@ class Stream(mitogen.parent.Stream): return mitogen.parent.create_child(args, preexec_fn=self.preexec_fn) def _get_name(self): - return u'setns.' + self.container + return u'setns.' + self.options.container - def connect(self): - self.name = self._get_name() - attr, func = self.GET_LEADER_BY_KIND[self.kind] - tool_path = getattr(self, attr) - self.leader_pid = func(tool_path, self.container) + def connect(self, **kwargs): + attr, func = GET_LEADER_BY_KIND[self.options.kind] + tool_path = getattr(self.options, attr) + self.leader_pid = func(tool_path, self.options.container) LOG.debug('Leader PID for %s container %r: %d', - self.kind, self.container, self.leader_pid) - super(Stream, self).connect() + self.options.kind, self.options.container, self.leader_pid) + return super(Connection, self).connect(**kwargs) diff --git a/mitogen/ssh.py b/mitogen/ssh.py index 11b74c1b..059dec7a 100644 --- a/mitogen/ssh.py +++ b/mitogen/ssh.py @@ -29,7 +29,7 @@ # !mitogen: minify_safe """ -Functionality to allow establishing new slave contexts over an SSH connection. +Construct new children via the OpenSSH client. """ import logging @@ -52,82 +52,122 @@ except NameError: LOG = logging.getLogger('mitogen') +auth_incorrect_msg = 'SSH authentication is incorrect' +password_incorrect_msg = 'SSH password is incorrect' +password_required_msg = 'SSH password was requested, but none specified' +hostkey_config_msg = ( + 'SSH requested permission to accept unknown host key, but ' + 'check_host_keys=ignore. This is likely due to ssh_args= ' + 'conflicting with check_host_keys=. Please correct your ' + 'configuration.' +) +hostkey_failed_msg = ( + 'Host key checking is enabled, and SSH reported an unrecognized or ' + 'mismatching host key.' +) + # sshpass uses 'assword' because it doesn't lowercase the input. -PASSWORD_PROMPT = b('password') -HOSTKEY_REQ_PROMPT = b('are you sure you want to continue connecting (yes/no)?') -HOSTKEY_FAIL = b('host key verification failed.') +PASSWORD_PROMPT_PATTERN = re.compile( + b('password'), + re.I +) + +HOSTKEY_REQ_PATTERN = re.compile( + b(r'are you sure you want to continue connecting \(yes/no\)\?'), + re.I +) + +HOSTKEY_FAIL_PATTERN = re.compile( + b(r'host key verification failed\.'), + re.I +) # [user@host: ] permission denied -PERMDENIED_RE = re.compile( - ('(?:[^@]+@[^:]+: )?' # Absent in OpenSSH <7.5 - 'Permission denied').encode(), +PERMDENIED_PATTERN = re.compile( + b('(?:[^@]+@[^:]+: )?' # Absent in OpenSSH <7.5 + 'Permission denied'), re.I ) +DEBUG_PATTERN = re.compile(b'^debug[123]:') + -DEBUG_PREFIXES = (b('debug1:'), b('debug2:'), b('debug3:')) +class PasswordError(mitogen.core.StreamError): + pass -def filter_debug(stream, it): - """ - Read line chunks from it, either yielding them directly, or building up and - logging individual lines if they look like SSH debug output. +class HostKeyError(mitogen.core.StreamError): + pass - This contains the mess of dealing with both line-oriented input, and partial - lines such as the password prompt. - Yields `(line, partial)` tuples, where `line` is the line, `partial` is - :data:`True` if no terminating newline character was present and no more - data exists in the read buffer. Consuming code can use this to unreliably - detect the presence of an interactive prompt. +class SetupProtocol(mitogen.parent.RegexProtocol): + """ + This protocol is attached to stderr of the SSH client. It responds to + various interactive prompts as required. """ - # The `partial` test is unreliable, but is only problematic when verbosity - # is enabled: it's possible for a combination of SSH banner, password - # prompt, verbose output, timing and OS buffering specifics to create a - # situation where an otherwise newline-terminated line appears to not be - # terminated, due to a partial read(). If something is broken when - # ssh_debug_level>0, this is the first place to look. - state = 'start_of_line' - buf = b('') - for chunk in it: - buf += chunk - while buf: - if state == 'start_of_line': - if len(buf) < 8: - # short read near buffer limit, block awaiting at least 8 - # bytes so we can discern a debug line, or the minimum - # interesting token from above or the bootstrap - # ('password', 'MITO000\n'). - break - elif any(buf.startswith(p) for p in DEBUG_PREFIXES): - state = 'in_debug' - else: - state = 'in_plain' - elif state == 'in_debug': - if b('\n') not in buf: - break - line, _, buf = bytes_partition(buf, b('\n')) - LOG.debug('%s: %s', stream.name, - mitogen.core.to_text(line.rstrip())) - state = 'start_of_line' - elif state == 'in_plain': - line, nl, buf = bytes_partition(buf, b('\n')) - yield line + nl, not (nl or buf) - if nl: - state = 'start_of_line' + password_sent = False + def _on_host_key_request(self, line, match): + if self.stream.conn.options.check_host_keys == 'accept': + LOG.debug('%s: accepting host key', self.stream.name) + self.stream.transmit_side.write(b('yes\n')) + return -class PasswordError(mitogen.core.StreamError): - pass + # _host_key_prompt() should never be reached with ignore or enforce + # mode, SSH should have handled that. User's ssh_args= is conflicting + # with ours. + self.stream.conn._fail_connection(HostKeyError(hostkey_config_msg)) + + def _on_host_key_failed(self, line, match): + self.stream.conn._fail_connection(HostKeyError(hostkey_failed_msg)) + + def _on_permission_denied(self, line, match): + # issue #271: work around conflict with user shell reporting + # 'permission denied' e.g. during chdir($HOME) by only matching it at + # the start of the line. + if self.stream.conn.options.password is not None and \ + self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + elif PASSWORD_PROMPT_PATTERN.search(line) and \ + self.stream.conn.options.password is None: + # Permission denied (password,pubkey) + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + else: + self.stream.conn._fail_connection( + PasswordError(auth_incorrect_msg) + ) + def _on_password_prompt(self, line, match): + LOG.debug('%s: (password prompt): %s', self.stream.name, line) + if self.stream.conn.options.password is None: + self.stream.conn._fail(PasswordError(password_required_msg)) -class HostKeyError(mitogen.core.StreamError): - pass + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + def _on_debug_line(self, line, match): + text = mitogen.core.to_text(line.rstrip()) + LOG.debug('%s: %s', self.stream.name, text) + + PATTERNS = [ + (DEBUG_PATTERN, _on_debug_line), + (HOSTKEY_FAIL_PATTERN, _on_host_key_failed), + (PERMDENIED_PATTERN, _on_permission_denied), + ] + + PARTIAL_PATTERNS = [ + (PASSWORD_PROMPT_PATTERN, _on_password_prompt), + (HOSTKEY_REQ_PATTERN, _on_host_key_request), + ] -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False +class Options(mitogen.parent.Options): #: Default to whatever is available as 'python' on the remote machine, #: overriding sys.executable use. python_path = 'python' @@ -141,19 +181,19 @@ class Stream(mitogen.parent.Stream): hostname = None username = None port = None - identity_file = None password = None ssh_args = None check_host_keys_msg = 'check_host_keys= must be set to accept, enforce or ignore' - def construct(self, hostname, username=None, ssh_path=None, port=None, - check_host_keys='enforce', password=None, identity_file=None, - compression=True, ssh_args=None, keepalive_enabled=True, - keepalive_count=3, keepalive_interval=15, - identities_only=True, ssh_debug_level=None, **kwargs): - super(Stream, self).construct(**kwargs) + def __init__(self, hostname, username=None, ssh_path=None, port=None, + check_host_keys='enforce', password=None, identity_file=None, + compression=True, ssh_args=None, keepalive_enabled=True, + keepalive_count=3, keepalive_interval=15, + identities_only=True, ssh_debug_level=None, **kwargs): + super(Options, self).__init__(**kwargs) + if check_host_keys not in ('accept', 'enforce', 'ignore'): raise ValueError(self.check_host_keys_msg) @@ -175,143 +215,81 @@ class Stream(mitogen.parent.Stream): if ssh_debug_level: self.ssh_debug_level = ssh_debug_level - self._init_create_child() + +class Connection(mitogen.parent.Connection): + options_class = Options + diag_protocol_class = SetupProtocol + + child_is_immediate_subprocess = False + + def _get_name(self): + s = u'ssh.' + mitogen.core.to_text(self.options.hostname) + if self.options.port and self.options.port != 22: + s += u':%s' % (self.options.port,) + return s def _requires_pty(self): """ - Return :data:`True` if the configuration requires a PTY to be - allocated. This is only true if we must interactively accept host keys, - or type a password. + Return :data:`True` if a PTY to is required for this configuration, + because it must interactively accept host keys or type a password. """ - return (self.check_host_keys == 'accept' or - self.password is not None) + return ( + self.options.check_host_keys == 'accept' or + self.options.password is not None + ) - def _init_create_child(self): + def create_child(self, **kwargs): """ - Initialize the base class :attr:`create_child` and - :attr:`create_child_args` according to whether we need a PTY or not. + Avoid PTY use when possible to avoid a scaling limitation. """ if self._requires_pty(): - self.create_child = mitogen.parent.hybrid_tty_create_child + return mitogen.parent.hybrid_tty_create_child(**kwargs) else: - self.create_child = mitogen.parent.create_child - self.create_child_args = { - 'stderr_pipe': True, - } + return mitogen.parent.create_child(stderr_pipe=True, **kwargs) def get_boot_command(self): - bits = [self.ssh_path] - if self.ssh_debug_level: - bits += ['-' + ('v' * min(3, self.ssh_debug_level))] + bits = [self.options.ssh_path] + if self.options.ssh_debug_level: + bits += ['-' + ('v' * min(3, self.options.ssh_debug_level))] else: # issue #307: suppress any login banner, as it may contain the # password prompt, and there is no robust way to tell the # difference. bits += ['-o', 'LogLevel ERROR'] - if self.username: - bits += ['-l', self.username] - if self.port is not None: - bits += ['-p', str(self.port)] - if self.identities_only and (self.identity_file or self.password): + if self.options.username: + bits += ['-l', self.options.username] + if self.options.port is not None: + bits += ['-p', str(self.options.port)] + if self.options.identities_only and (self.options.identity_file or + self.options.password): bits += ['-o', 'IdentitiesOnly yes'] - if self.identity_file: - bits += ['-i', self.identity_file] - if self.compression: + if self.options.identity_file: + bits += ['-i', self.options.identity_file] + if self.options.compression: bits += ['-o', 'Compression yes'] - if self.keepalive_enabled: + if self.options.keepalive_enabled: bits += [ - '-o', 'ServerAliveInterval %s' % (self.keepalive_interval,), - '-o', 'ServerAliveCountMax %s' % (self.keepalive_count,), + '-o', 'ServerAliveInterval %s' % ( + self.options.keepalive_interval, + ), + '-o', 'ServerAliveCountMax %s' % ( + self.options.keepalive_count, + ), ] if not self._requires_pty(): bits += ['-o', 'BatchMode yes'] - if self.check_host_keys == 'enforce': + if self.options.check_host_keys == 'enforce': bits += ['-o', 'StrictHostKeyChecking yes'] - if self.check_host_keys == 'accept': + if self.options.check_host_keys == 'accept': bits += ['-o', 'StrictHostKeyChecking ask'] - elif self.check_host_keys == 'ignore': + elif self.options.check_host_keys == 'ignore': bits += [ '-o', 'StrictHostKeyChecking no', '-o', 'UserKnownHostsFile /dev/null', '-o', 'GlobalKnownHostsFile /dev/null', ] - if self.ssh_args: - bits += self.ssh_args - bits.append(self.hostname) - base = super(Stream, self).get_boot_command() + if self.options.ssh_args: + bits += self.options.ssh_args + bits.append(self.options.hostname) + base = super(Connection, self).get_boot_command() return bits + [shlex_quote(s).strip() for s in base] - - def _get_name(self): - s = u'ssh.' + mitogen.core.to_text(self.hostname) - if self.port: - s += u':%s' % (self.port,) - return s - - auth_incorrect_msg = 'SSH authentication is incorrect' - password_incorrect_msg = 'SSH password is incorrect' - password_required_msg = 'SSH password was requested, but none specified' - hostkey_config_msg = ( - 'SSH requested permission to accept unknown host key, but ' - 'check_host_keys=ignore. This is likely due to ssh_args= ' - 'conflicting with check_host_keys=. Please correct your ' - 'configuration.' - ) - hostkey_failed_msg = ( - 'Host key checking is enabled, and SSH reported an unrecognized or ' - 'mismatching host key.' - ) - - def _host_key_prompt(self): - if self.check_host_keys == 'accept': - LOG.debug('%s: accepting host key', self.name) - self.diag_stream.transmit_side.write(b('yes\n')) - return - - # _host_key_prompt() should never be reached with ignore or enforce - # mode, SSH should have handled that. User's ssh_args= is conflicting - # with ours. - raise HostKeyError(self.hostkey_config_msg) - - def _connect_input_loop(self, it): - password_sent = False - for buf, partial in filter_debug(self, it): - LOG.debug('%s: stdout: %s', self.name, buf.rstrip()) - if buf.endswith(self.EC0_MARKER): - self._ec0_received() - return - elif HOSTKEY_REQ_PROMPT in buf.lower(): - self._host_key_prompt() - elif HOSTKEY_FAIL in buf.lower(): - raise HostKeyError(self.hostkey_failed_msg) - elif PERMDENIED_RE.match(buf): - # issue #271: work around conflict with user shell reporting - # 'permission denied' e.g. during chdir($HOME) by only matching - # it at the start of the line. - if self.password is not None and password_sent: - raise PasswordError(self.password_incorrect_msg) - elif PASSWORD_PROMPT in buf and self.password is None: - # Permission denied (password,pubkey) - raise PasswordError(self.password_required_msg) - else: - raise PasswordError(self.auth_incorrect_msg) - elif partial and PASSWORD_PROMPT in buf.lower(): - if self.password is None: - raise PasswordError(self.password_required_msg) - LOG.debug('%s: sending password', self.name) - self.diag_stream.transmit_side.write( - (self.password + '\n').encode() - ) - password_sent = True - - raise mitogen.core.StreamError('bootstrap failed') - - def _connect_bootstrap(self): - fds = [self.receive_side.fd] - if self.diag_stream is not None: - fds.append(self.diag_stream.receive_side.fd) - - it = mitogen.parent.iter_read(fds=fds, deadline=self.connect_deadline) - try: - self._connect_input_loop(it) - finally: - it.close() diff --git a/mitogen/su.py b/mitogen/su.py index 5ff9e177..59574f3f 100644 --- a/mitogen/su.py +++ b/mitogen/su.py @@ -29,6 +29,7 @@ # !mitogen: minify_safe import logging +import re import mitogen.core import mitogen.parent @@ -42,87 +43,120 @@ except NameError: LOG = logging.getLogger(__name__) +password_incorrect_msg = 'su password is incorrect' +password_required_msg = 'su password is required' + class PasswordError(mitogen.core.StreamError): pass -class Stream(mitogen.parent.Stream): - # TODO: BSD su cannot handle stdin being a socketpair, but it does let the - # child inherit fds from the parent. So we can still pass a socketpair in - # for hybrid_tty_create_child(), there just needs to be either a shell - # snippet or bootstrap support for fixing things up afterwards. - create_child = staticmethod(mitogen.parent.tty_create_child) - child_is_immediate_subprocess = False +class SetupBootstrapProtocol(mitogen.parent.BootstrapProtocol): + password_sent = False + + def setup_patterns(self, conn): + """ + su options cause the regexes used to vary. This is a mess, requires + reworking. + """ + incorrect_pattern = re.compile( + mitogen.core.b('|').join( + re.escape(s.encode('utf-8')) + for s in conn.options.incorrect_prompts + ), + re.I + ) + prompt_pattern = re.compile( + re.escape( + conn.options.password_prompt.encode('utf-8') + ), + re.I + ) + + self.PATTERNS = mitogen.parent.BootstrapProtocol.PATTERNS + [ + (incorrect_pattern, type(self)._on_password_incorrect), + ] + self.PARTIAL_PATTERNS = mitogen.parent.BootstrapProtocol.PARTIAL_PATTERNS + [ + (prompt_pattern, type(self)._on_password_prompt), + ] + + def _on_password_prompt(self, line, match): + LOG.debug('%r: (password prompt): %r', + self.stream.name, line.decode('utf-8', 'replace')) + + if self.stream.conn.options.password is None: + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + return + + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + return + + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + + def _on_password_incorrect(self, line, match): + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) - #: Once connected, points to the corresponding DiagLogStream, allowing it to - #: be disconnected at the same time this stream is being torn down. - username = 'root' +class Options(mitogen.parent.Options): + username = u'root' password = None su_path = 'su' - password_prompt = b('password:') + password_prompt = u'password:' incorrect_prompts = ( - b('su: sorry'), # BSD - b('su: authentication failure'), # Linux - b('su: incorrect password'), # CentOS 6 - b('authentication is denied'), # AIX + u'su: sorry', # BSD + u'su: authentication failure', # Linux + u'su: incorrect password', # CentOS 6 + u'authentication is denied', # AIX ) - def construct(self, username=None, password=None, su_path=None, - password_prompt=None, incorrect_prompts=None, **kwargs): - super(Stream, self).construct(**kwargs) + def __init__(self, username=None, password=None, su_path=None, + password_prompt=None, incorrect_prompts=None, **kwargs): + super(Options, self).__init__(**kwargs) if username is not None: - self.username = username + self.username = mitogen.core.to_text(username) if password is not None: - self.password = password + self.password = mitogen.core.to_text(password) if su_path is not None: self.su_path = su_path if password_prompt is not None: - self.password_prompt = password_prompt.lower() + self.password_prompt = password_prompt if incorrect_prompts is not None: - self.incorrect_prompts = map(str.lower, incorrect_prompts) + self.incorrect_prompts = [ + mitogen.core.to_text(p) + for p in incorrect_prompts + ] + + +class Connection(mitogen.parent.Connection): + options_class = Options + stream_protocol_class = SetupBootstrapProtocol + + # TODO: BSD su cannot handle stdin being a socketpair, but it does let the + # child inherit fds from the parent. So we can still pass a socketpair in + # for hybrid_tty_create_child(), there just needs to be either a shell + # snippet or bootstrap support for fixing things up afterwards. + create_child = staticmethod(mitogen.parent.tty_create_child) + child_is_immediate_subprocess = False def _get_name(self): - return u'su.' + mitogen.core.to_text(self.username) + return u'su.' + self.options.username + + def stream_factory(self): + stream = super(Connection, self).stream_factory() + stream.protocol.setup_patterns(self) + return stream def get_boot_command(self): - argv = mitogen.parent.Argv(super(Stream, self).get_boot_command()) - return [self.su_path, self.username, '-c', str(argv)] - - password_incorrect_msg = 'su password is incorrect' - password_required_msg = 'su password is required' - - def _connect_input_loop(self, it): - password_sent = False - - for buf in it: - LOG.debug('%r: received %r', self, buf) - if buf.endswith(self.EC0_MARKER): - self._ec0_received() - return - if any(s in buf.lower() for s in self.incorrect_prompts): - if password_sent: - raise PasswordError(self.password_incorrect_msg) - elif self.password_prompt in buf.lower(): - if self.password is None: - raise PasswordError(self.password_required_msg) - if password_sent: - raise PasswordError(self.password_incorrect_msg) - LOG.debug('sending password') - self.transmit_side.write( - mitogen.core.to_text(self.password + '\n').encode('utf-8') - ) - password_sent = True - - raise mitogen.core.StreamError('bootstrap failed') - - def _connect_bootstrap(self): - it = mitogen.parent.iter_read( - fds=[self.receive_side.fd], - deadline=self.connect_deadline, - ) - try: - self._connect_input_loop(it) - finally: - it.close() + argv = mitogen.parent.Argv(super(Connection, self).get_boot_command()) + return [self.options.su_path, self.options.username, '-c', str(argv)] diff --git a/mitogen/sudo.py b/mitogen/sudo.py index 868d4d76..fac02c57 100644 --- a/mitogen/sudo.py +++ b/mitogen/sudo.py @@ -40,6 +40,9 @@ from mitogen.core import b LOG = logging.getLogger(__name__) +password_incorrect_msg = 'sudo password is incorrect' +password_required_msg = 'sudo password is required' + # These are base64-encoded UTF-8 as our existing minifier/module server # struggles with Unicode Python source in some (forgotten) circumstances. PASSWORD_PROMPTS = [ @@ -99,14 +102,13 @@ PASSWORD_PROMPTS = [ PASSWORD_PROMPT_RE = re.compile( - u'|'.join( - base64.b64decode(s).decode('utf-8') + mitogen.core.b('|').join( + base64.b64decode(s) for s in PASSWORD_PROMPTS - ) + ), + re.I ) - -PASSWORD_PROMPT = b('password') SUDO_OPTIONS = [ #(False, 'bool', '--askpass', '-A') #(False, 'str', '--auth-type', '-a') @@ -181,10 +183,7 @@ def option(default, *args): return default -class Stream(mitogen.parent.Stream): - create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) - child_is_immediate_subprocess = False - +class Options(mitogen.parent.Options): sudo_path = 'sudo' username = 'root' password = None @@ -195,15 +194,16 @@ class Stream(mitogen.parent.Stream): selinux_role = None selinux_type = None - def construct(self, username=None, sudo_path=None, password=None, - preserve_env=None, set_home=None, sudo_args=None, - login=None, selinux_role=None, selinux_type=None, **kwargs): - super(Stream, self).construct(**kwargs) + def __init__(self, username=None, sudo_path=None, password=None, + preserve_env=None, set_home=None, sudo_args=None, + login=None, selinux_role=None, selinux_type=None, **kwargs): + super(Options, self).__init__(**kwargs) opts = parse_sudo_flags(sudo_args or []) self.username = option(self.username, username, opts.user) self.sudo_path = option(self.sudo_path, sudo_path) - self.password = password or None + if password: + self.password = mitogen.core.to_text(password) self.preserve_env = option(self.preserve_env, preserve_env, opts.preserve_env) self.set_home = option(self.set_home, set_home, opts.set_home) @@ -211,67 +211,61 @@ class Stream(mitogen.parent.Stream): self.selinux_role = option(self.selinux_role, selinux_role, opts.role) self.selinux_type = option(self.selinux_type, selinux_type, opts.type) + +class SetupProtocol(mitogen.parent.RegexProtocol): + password_sent = False + + def _on_password_prompt(self, line, match): + LOG.debug('%s: (password prompt): %s', + self.stream.name, line.decode('utf-8', 'replace')) + + if self.stream.conn.options.password is None: + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + return + + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + return + + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + + PARTIAL_PATTERNS = [ + (PASSWORD_PROMPT_RE, _on_password_prompt), + ] + + +class Connection(mitogen.parent.Connection): + diag_protocol_class = SetupProtocol + options_class = Options + create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) + child_is_immediate_subprocess = False + def _get_name(self): - return u'sudo.' + mitogen.core.to_text(self.username) + return u'sudo.' + mitogen.core.to_text(self.options.username) def get_boot_command(self): # Note: sudo did not introduce long-format option processing until July # 2013, so even though we parse long-format options, supply short-form # to the sudo command. - bits = [self.sudo_path, '-u', self.username] - if self.preserve_env: + bits = [self.options.sudo_path, '-u', self.options.username] + if self.options.preserve_env: bits += ['-E'] - if self.set_home: + if self.options.set_home: bits += ['-H'] - if self.login: + if self.options.login: bits += ['-i'] - if self.selinux_role: - bits += ['-r', self.selinux_role] - if self.selinux_type: - bits += ['-t', self.selinux_type] + if self.options.selinux_role: + bits += ['-r', self.options.selinux_role] + if self.options.selinux_type: + bits += ['-t', self.options.selinux_type] - bits = bits + ['--'] + super(Stream, self).get_boot_command() + bits = bits + ['--'] + super(Connection, self).get_boot_command() LOG.debug('sudo command line: %r', bits) return bits - - password_incorrect_msg = 'sudo password is incorrect' - password_required_msg = 'sudo password is required' - - def _connect_input_loop(self, it): - password_sent = False - - for buf in it: - LOG.debug('%s: received %r', self.name, buf) - if buf.endswith(self.EC0_MARKER): - self._ec0_received() - return - - match = PASSWORD_PROMPT_RE.search(buf.decode('utf-8').lower()) - if match is not None: - LOG.debug('%s: matched password prompt %r', - self.name, match.group(0)) - if self.password is None: - raise PasswordError(self.password_required_msg) - if password_sent: - raise PasswordError(self.password_incorrect_msg) - self.diag_stream.transmit_side.write( - (mitogen.core.to_text(self.password) + '\n').encode('utf-8') - ) - password_sent = True - - raise mitogen.core.StreamError('bootstrap failed') - - def _connect_bootstrap(self): - fds = [self.receive_side.fd] - if self.diag_stream is not None: - fds.append(self.diag_stream.receive_side.fd) - - it = mitogen.parent.iter_read( - fds=fds, - deadline=self.connect_deadline, - ) - - try: - self._connect_input_loop(it) - finally: - it.close() diff --git a/mitogen/unix.py b/mitogen/unix.py index 66141eec..f7e56435 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -65,9 +65,38 @@ def make_socket_path(): return tempfile.mktemp(prefix='mitogen_unix_', suffix='.sock') -class Listener(mitogen.core.BasicStream): +class ListenerStream(mitogen.core.Stream): + def on_receive(self, broker): + sock, _ = self.receive_side.fp.accept() + try: + self.protocol.on_accept_client(sock) + except: + sock.close() + raise + + +class Listener(mitogen.core.Protocol): + stream_class = ListenerStream keep_alive = True + @classmethod + def build_stream(cls, router, path=None, backlog=100): + if not path: + path = make_socket_path() + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + if os.path.exists(path) and is_path_dead(path): + LOG.debug('%r: deleting stale %r', self, path) + os.unlink(path) + + sock.bind(path) + os.chmod(path, int('0600', 8)) + sock.listen(backlog) + + stream = super(Listener, cls).build_stream(router, path) + stream.accept(sock, sock) + router.broker.start_receive(stream) + return stream + def __repr__(self): return '%s.%s(%r)' % ( __name__, @@ -75,20 +104,9 @@ class Listener(mitogen.core.BasicStream): self.path, ) - def __init__(self, router, path=None, backlog=100): + def __init__(self, router, path): self._router = router - self.path = path or make_socket_path() - self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - - if os.path.exists(self.path) and is_path_dead(self.path): - LOG.debug('%r: deleting stale %r', self, self.path) - os.unlink(self.path) - - self._sock.bind(self.path) - os.chmod(self.path, int('0600', 8)) - self._sock.listen(backlog) - self.receive_side = mitogen.core.Side(self, self._sock.fileno()) - router.broker.start_receive(self) + self.path = path def _unlink_socket(self): try: @@ -102,10 +120,9 @@ class Listener(mitogen.core.BasicStream): def on_shutdown(self, broker): broker.stop_receive(self) self._unlink_socket() - self._sock.close() - self.receive_side.closed = True + self.receive_side.close() - def _accept_client(self, sock): + def on_accept_client(self, sock): sock.setblocking(True) try: pid, = struct.unpack('>L', sock.recv(4)) @@ -115,12 +132,6 @@ class Listener(mitogen.core.BasicStream): return context_id = self._router.id_allocator.allocate() - context = mitogen.parent.Context(self._router, context_id) - stream = mitogen.core.Stream(self._router, context_id) - stream.name = u'unix_client.%d' % (pid,) - stream.auth_id = mitogen.context_id - stream.is_privileged = True - try: sock.send(struct.pack('>LLL', context_id, mitogen.context_id, os.getpid())) @@ -129,21 +140,22 @@ class Listener(mitogen.core.BasicStream): self, pid, sys.exc_info()[1]) return + context = mitogen.parent.Context(self._router, context_id) + stream = mitogen.core.MitogenProtocol.build_stream( + router=self._router, + remote_id=context_id, + ) + stream.name = u'unix_client.%d' % (pid,) + stream.protocol.auth_id = mitogen.context_id + stream.protocol.is_privileged = True + side = mitogen.core.Side(stream, sock) + stream.receive_side = side + stream.transmit_side = side LOG.debug('%r: accepted %r', self, stream) - stream.accept(sock.fileno(), sock.fileno()) self._router.register(context, stream) - def on_receive(self, broker): - sock, _ = self._sock.accept() - try: - self._accept_client(sock) - finally: - sock.close() - -def connect(path, broker=None): - LOG.debug('unix.connect(path=%r)', path) - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +def _connect(path, broker, sock): sock.connect(path) sock.send(struct.pack('>L', os.getpid())) mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12)) @@ -154,15 +166,24 @@ def connect(path, broker=None): mitogen.context_id, remote_id) router = mitogen.master.Router(broker=broker) - stream = mitogen.core.Stream(router, remote_id) - stream.accept(sock.fileno(), sock.fileno()) + stream = mitogen.core.MitogenProtocol.build_stream(router, remote_id) + side = mitogen.core.Side(stream, sock) + stream.transmit_side = side + stream.receive_side = side stream.name = u'unix_listener.%d' % (pid,) - context = mitogen.parent.Context(router, remote_id) - router.register(context, stream) - mitogen.core.listen(router.broker, 'shutdown', - lambda: router.disconnect_stream(stream)) + lambda: router.disconnect_stream(stream)) - sock.close() + context = mitogen.parent.Context(router, remote_id) + router.register(context, stream) return router, context + +def connect(path, broker=None): + LOG.debug('unix.connect(path=%r)', path) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + try: + return _connect(path, broker, sock) + except: + sock.close() + raise diff --git a/preamble_size.py b/preamble_size.py index f5f1adc1..b2cbd60a 100644 --- a/preamble_size.py +++ b/preamble_size.py @@ -19,15 +19,17 @@ import mitogen.sudo router = mitogen.master.Router() context = mitogen.parent.Context(router, 0) -stream = mitogen.ssh.Stream(router, 0, max_message_size=0, hostname='foo') +options = mitogen.ssh.Options(max_message_size=0, hostname='foo') +conn = mitogen.ssh.Connection(options, router) +conn.context_id = 123 -print('SSH command size: %s' % (len(' '.join(stream.get_boot_command())),)) +print('SSH command size: %s' % (len(' '.join(conn.get_boot_command())),)) print('Preamble size: %s (%.2fKiB)' % ( - len(stream.get_preamble()), - len(stream.get_preamble()) / 1024.0, + len(conn.get_preamble()), + len(conn.get_preamble()) / 1024.0, )) if '--dump' in sys.argv: - print(zlib.decompress(stream.get_preamble())) + print(zlib.decompress(conn.get_preamble())) exit() diff --git a/tests/data/iter_read_generator.py b/tests/data/iter_read_generator.py deleted file mode 100755 index 3fd3c08c..00000000 --- a/tests/data/iter_read_generator.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python -# I produce text every 100ms, for testing mitogen.core.iter_read() - -import sys -import time - - -i = 0 -while True: - i += 1 - sys.stdout.write(str(i)) - sys.stdout.flush() - time.sleep(0.1) diff --git a/tests/data/write_all_consumer.py b/tests/data/write_all_consumer.py deleted file mode 100755 index 4013ccdd..00000000 --- a/tests/data/write_all_consumer.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python -# I consume 65535 bytes every 10ms, for testing mitogen.core.write_all() - -import os -import time - -while True: - os.read(0, 65535) - time.sleep(0.01) diff --git a/tests/docker_test.py b/tests/docker_test.py index 49c742ee..b5d15707 100644 --- a/tests/docker_test.py +++ b/tests/docker_test.py @@ -21,7 +21,7 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): self.assertEquals(argv[1], 'exec') self.assertEquals(argv[2], '--interactive') self.assertEquals(argv[3], 'container_name') - self.assertEquals(argv[4], stream.python_path) + self.assertEquals(argv[4], stream.conn.options.python_path) if __name__ == '__main__': diff --git a/tests/first_stage_test.py b/tests/first_stage_test.py index 470afc7a..20d7fd1b 100644 --- a/tests/first_stage_test.py +++ b/tests/first_stage_test.py @@ -19,8 +19,10 @@ class CommandLineTest(testlib.RouterMixin, testlib.TestCase): # * 3.x starting 2.7 def test_valid_syntax(self): - stream = mitogen.parent.Stream(self.router, 0, max_message_size=123) - args = stream.get_boot_command() + options = mitogen.parent.Options(max_message_size=123) + conn = mitogen.parent.Connection(options, self.router) + conn.context = mitogen.core.Context(None, 123) + args = conn.get_boot_command() # Executing the boot command will print "EC0" and expect to read from # stdin, which will fail because it's pointing at /dev/null, causing @@ -38,7 +40,8 @@ class CommandLineTest(testlib.RouterMixin, testlib.TestCase): ) stdout, stderr = proc.communicate() self.assertEquals(0, proc.returncode) - self.assertEquals(mitogen.parent.Stream.EC0_MARKER, stdout) + self.assertEquals(stdout, + mitogen.parent.BootstrapProtocol.EC0_MARKER+'\n') self.assertIn(b("Error -5 while decompressing data"), stderr) finally: fp.close() diff --git a/tests/lxc_test.py b/tests/lxc_test.py index ae5990f6..f78846ff 100644 --- a/tests/lxc_test.py +++ b/tests/lxc_test.py @@ -38,7 +38,7 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): lxc_attach_path='true', ) ) - self.assertTrue(str(e).endswith(mitogen.lxc.Stream.eof_error_hint)) + self.assertTrue(str(e).endswith(mitogen.lxc.Connection.eof_error_hint)) if __name__ == '__main__': diff --git a/tests/lxd_test.py b/tests/lxd_test.py index e59da43c..c80f8251 100644 --- a/tests/lxd_test.py +++ b/tests/lxd_test.py @@ -30,7 +30,7 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): lxc_path='true', ) ) - self.assertTrue(str(e).endswith(mitogen.lxd.Stream.eof_error_hint)) + self.assertTrue(str(e).endswith(mitogen.lxd.Connection.eof_error_hint)) if __name__ == '__main__': diff --git a/tests/mitogen_protocol_test.py b/tests/mitogen_protocol_test.py new file mode 100644 index 00000000..834fb437 --- /dev/null +++ b/tests/mitogen_protocol_test.py @@ -0,0 +1,34 @@ + +import unittest2 +import mock + +import mitogen.core + +import testlib + + +class ReceiveOneTest(testlib.TestCase): + klass = mitogen.core.MitogenProtocol + + def test_corruption(self): + broker = mock.Mock() + router = mock.Mock() + stream = mock.Mock() + + protocol = self.klass(router, 1) + protocol.stream = stream + + junk = mitogen.core.b('x') * mitogen.core.Message.HEADER_LEN + + capture = testlib.LogCapturer() + capture.start() + protocol.on_receive(broker, junk) + capture.stop() + + self.assertEquals(1, stream.on_disconnect.call_count) + expect = self.klass.corrupt_msg % (stream.name, junk) + self.assertTrue(expect in capture.raw()) + + +if __name__ == '__main__': + unittest2.main() diff --git a/tests/parent_test.py b/tests/parent_test.py index 00bddb4d..191d9f88 100644 --- a/tests/parent_test.py +++ b/tests/parent_test.py @@ -49,7 +49,7 @@ def wait_for_empty_output_queue(sync_recv, context): while True: # Now wait for the RPC to exit the output queue. stream = router.stream_by_id(context.context_id) - if broker.defer_sync(lambda: stream.pending_bytes()) == 0: + if broker.defer_sync(lambda: stream.protocol.pending_bytes()) == 0: return time.sleep(0.1) @@ -69,35 +69,17 @@ class GetDefaultRemoteNameTest(testlib.TestCase): self.assertEquals("ECORP_Administrator@box:123", self.func()) -class WstatusToStrTest(testlib.TestCase): - func = staticmethod(mitogen.parent.wstatus_to_str) +class ReturncodeToStrTest(testlib.TestCase): + func = staticmethod(mitogen.parent.returncode_to_str) def test_return_zero(self): - pid = os.fork() - if not pid: - os._exit(0) - (pid, status), _ = mitogen.core.io_op(os.waitpid, pid, 0) - self.assertEquals(self.func(status), - 'exited with return code 0') + self.assertEquals(self.func(0), 'exited with return code 0') def test_return_one(self): - pid = os.fork() - if not pid: - os._exit(1) - (pid, status), _ = mitogen.core.io_op(os.waitpid, pid, 0) - self.assertEquals( - self.func(status), - 'exited with return code 1' - ) + self.assertEquals(self.func(1), 'exited with return code 1') def test_sigkill(self): - pid = os.fork() - if not pid: - time.sleep(600) - os.kill(pid, signal.SIGKILL) - (pid, status), _ = mitogen.core.io_op(os.waitpid, pid, 0) - self.assertEquals( - self.func(status), + self.assertEquals(self.func(-signal.SIGKILL), 'exited due to signal %s (SIGKILL)' % (int(signal.SIGKILL),) ) @@ -107,20 +89,20 @@ class WstatusToStrTest(testlib.TestCase): class ReapChildTest(testlib.RouterMixin, testlib.TestCase): def test_connect_timeout(self): # Ensure the child process is reaped if the connection times out. - stream = mitogen.parent.Stream( - router=self.router, - remote_id=1234, + options = mitogen.parent.Options( old_router=self.router, max_message_size=self.router.max_message_size, python_path=testlib.data_path('python_never_responds.py'), connect_timeout=0.5, ) + + conn = mitogen.parent.Connection(options, router=self.router) self.assertRaises(mitogen.core.TimeoutError, - lambda: stream.connect() + lambda: conn.connect(context=mitogen.core.Context(None, 1234)) ) - wait_for_child(stream.pid) + wait_for_child(conn.proc.pid) e = self.assertRaises(OSError, - lambda: os.kill(stream.pid, 0) + lambda: os.kill(conn.proc.pid, 0) ) self.assertEquals(e.args[0], errno.ESRCH) @@ -133,7 +115,7 @@ class StreamErrorTest(testlib.RouterMixin, testlib.TestCase): connect_timeout=3, ) ) - prefix = "EOF on stream; last 300 bytes received: " + prefix = mitogen.parent.Connection.eof_error_msg self.assertTrue(e.args[0].startswith(prefix)) def test_via_eof(self): @@ -142,12 +124,12 @@ class StreamErrorTest(testlib.RouterMixin, testlib.TestCase): e = self.assertRaises(mitogen.core.StreamError, lambda: self.router.local( via=local, - python_path='true', + python_path='echo', connect_timeout=3, ) ) - s = "EOF on stream; last 300 bytes received: " - self.assertTrue(s in e.args[0]) + expect = mitogen.parent.Connection.eof_error_msg + self.assertTrue(expect in e.args[0]) def test_direct_enoent(self): e = self.assertRaises(mitogen.core.StreamError, @@ -185,11 +167,15 @@ class OpenPtyTest(testlib.TestCase): func = staticmethod(mitogen.parent.openpty) def test_pty_returned(self): - master_fd, slave_fd = self.func() - self.assertTrue(isinstance(master_fd, int)) - self.assertTrue(isinstance(slave_fd, int)) - os.close(master_fd) - os.close(slave_fd) + master_fp, slave_fp = self.func() + try: + self.assertTrue(master_fp.isatty()) + self.assertTrue(isinstance(master_fp, file)) + self.assertTrue(slave_fp.isatty()) + self.assertTrue(isinstance(slave_fp, file)) + finally: + master_fp.close() + slave_fp.close() @mock.patch('os.openpty') def test_max_reached(self, openpty): @@ -204,20 +190,20 @@ class OpenPtyTest(testlib.TestCase): @mock.patch('os.openpty') def test_broken_linux_fallback(self, openpty): openpty.side_effect = OSError(errno.EPERM) - master_fd, slave_fd = self.func() + master_fp, slave_fp = self.func() try: - st = os.fstat(master_fd) + st = os.fstat(master_fp.fileno()) self.assertEquals(5, os.major(st.st_rdev)) - flags = fcntl.fcntl(master_fd, fcntl.F_GETFL) + flags = fcntl.fcntl(master_fp.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) - st = os.fstat(slave_fd) + st = os.fstat(slave_fp.fileno()) self.assertEquals(136, os.major(st.st_rdev)) - flags = fcntl.fcntl(slave_fd, fcntl.F_GETFL) + flags = fcntl.fcntl(slave_fp.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) finally: - os.close(master_fd) - os.close(slave_fd) + master_fp.close() + slave_fp.close() class TtyCreateChildTest(testlib.TestCase): @@ -235,125 +221,21 @@ class TtyCreateChildTest(testlib.TestCase): # read a password. tf = tempfile.NamedTemporaryFile() try: - pid, fd, _ = self.func([ + proc = self.func([ 'bash', '-c', 'exec 2>%s; echo hi > /dev/tty' % (tf.name,) ]) deadline = time.time() + 5.0 - for line in mitogen.parent.iter_read([fd], deadline): - self.assertEquals(mitogen.core.b('hi\n'), line) - break - waited_pid, status = os.waitpid(pid, 0) - self.assertEquals(pid, waited_pid) + mitogen.core.set_block(proc.stdio_fp.fileno()) + self.assertEquals(mitogen.core.b('hi\n'), proc.stdio_fp.read()) + waited_pid, status = os.waitpid(proc.pid, 0) + self.assertEquals(proc.pid, waited_pid) self.assertEquals(0, status) self.assertEquals(mitogen.core.b(''), tf.read()) - os.close(fd) + proc.stdio_fp.close() finally: tf.close() -class IterReadTest(testlib.TestCase): - func = staticmethod(mitogen.parent.iter_read) - - def make_proc(self): - # I produce text every 100ms. - args = [testlib.data_path('iter_read_generator.py')] - proc = subprocess.Popen(args, stdout=subprocess.PIPE) - mitogen.core.set_nonblock(proc.stdout.fileno()) - return proc - - def test_no_deadline(self): - proc = self.make_proc() - try: - reader = self.func([proc.stdout.fileno()]) - for i, chunk in enumerate(reader): - self.assertEqual(1+i, int(chunk)) - if i > 2: - break - finally: - Popen__terminate(proc) - proc.stdout.close() - - def test_deadline_exceeded_before_call(self): - proc = self.make_proc() - reader = self.func([proc.stdout.fileno()], 0) - try: - got = [] - try: - for chunk in reader: - got.append(chunk) - assert 0, 'TimeoutError not raised' - except mitogen.core.TimeoutError: - self.assertEqual(len(got), 0) - finally: - Popen__terminate(proc) - proc.stdout.close() - - def test_deadline_exceeded_during_call(self): - proc = self.make_proc() - deadline = time.time() + 0.4 - - reader = self.func([proc.stdout.fileno()], deadline) - try: - got = [] - try: - for chunk in reader: - if time.time() > (deadline + 1.0): - assert 0, 'TimeoutError not raised' - got.append(chunk) - except mitogen.core.TimeoutError: - # Give a little wiggle room in case of imperfect scheduling. - # Ideal number should be 9. - self.assertLess(deadline, time.time()) - self.assertLess(1, len(got)) - self.assertLess(len(got), 20) - finally: - Popen__terminate(proc) - proc.stdout.close() - - -class WriteAllTest(testlib.TestCase): - func = staticmethod(mitogen.parent.write_all) - - def make_proc(self): - args = [testlib.data_path('write_all_consumer.py')] - proc = subprocess.Popen(args, stdin=subprocess.PIPE) - mitogen.core.set_nonblock(proc.stdin.fileno()) - return proc - - ten_ms_chunk = (mitogen.core.b('x') * 65535) - - def test_no_deadline(self): - proc = self.make_proc() - try: - self.func(proc.stdin.fileno(), self.ten_ms_chunk) - finally: - Popen__terminate(proc) - proc.stdin.close() - - def test_deadline_exceeded_before_call(self): - proc = self.make_proc() - try: - self.assertRaises(mitogen.core.TimeoutError, ( - lambda: self.func(proc.stdin.fileno(), self.ten_ms_chunk, 0) - )) - finally: - Popen__terminate(proc) - proc.stdin.close() - - def test_deadline_exceeded_during_call(self): - proc = self.make_proc() - try: - deadline = time.time() + 0.1 # 100ms deadline - self.assertRaises(mitogen.core.TimeoutError, ( - lambda: self.func(proc.stdin.fileno(), - self.ten_ms_chunk * 100, # 1s of data - deadline) - )) - finally: - Popen__terminate(proc) - proc.stdin.close() - - class DisconnectTest(testlib.RouterMixin, testlib.TestCase): def test_child_disconnected(self): # Easy mode: process notices its own directly connected child is @@ -394,7 +276,7 @@ class DisconnectTest(testlib.RouterMixin, testlib.TestCase): c2 = self.router.local() # Let c1 call functions in c2. - self.router.stream_by_id(c1.context_id).auth_id = mitogen.context_id + self.router.stream_by_id(c1.context_id).protocol.auth_id = mitogen.context_id c1.call(mitogen.parent.upgrade_router) sync_recv = mitogen.core.Receiver(self.router) @@ -412,14 +294,14 @@ class DisconnectTest(testlib.RouterMixin, testlib.TestCase): def test_far_sibling_disconnected(self): # God mode: child of child notices child of child of parent has # disconnected. - c1 = self.router.local() - c11 = self.router.local(via=c1) + c1 = self.router.local(name='c1') + c11 = self.router.local(name='c11', via=c1) - c2 = self.router.local() - c22 = self.router.local(via=c2) + c2 = self.router.local(name='c2') + c22 = self.router.local(name='c22', via=c2) # Let c1 call functions in c2. - self.router.stream_by_id(c1.context_id).auth_id = mitogen.context_id + self.router.stream_by_id(c1.context_id).protocol.auth_id = mitogen.context_id c11.call(mitogen.parent.upgrade_router) sync_recv = mitogen.core.Receiver(self.router) diff --git a/tests/poller_test.py b/tests/poller_test.py index e2e3cdd7..b05a9b94 100644 --- a/tests/poller_test.py +++ b/tests/poller_test.py @@ -42,8 +42,8 @@ class SockMixin(object): self.l2_sock, self.r2_sock = socket.socketpair() self.l2 = self.l2_sock.fileno() self.r2 = self.r2_sock.fileno() - for fd in self.l1, self.r1, self.l2, self.r2: - mitogen.core.set_nonblock(fd) + for fp in self.l1, self.r1, self.l2, self.r2: + mitogen.core.set_nonblock(fp) def fill(self, fd): """Make `fd` unwriteable.""" @@ -354,17 +354,17 @@ class FileClosedMixin(PollerMixin, SockMixin): class TtyHangupMixin(PollerMixin): def test_tty_hangup_detected(self): # bug in initial select.poll() implementation failed to detect POLLHUP. - master_fd, slave_fd = mitogen.parent.openpty() + master_fp, slave_fp = mitogen.parent.openpty() try: - self.p.start_receive(master_fd) + self.p.start_receive(master_fp.fileno()) self.assertEquals([], list(self.p.poll(0))) - os.close(slave_fd) - slave_fd = None - self.assertEquals([master_fd], list(self.p.poll(0))) + slave_fp.close() + slave_fp = None + self.assertEquals([master_fp.fileno()], list(self.p.poll(0))) finally: - if slave_fd is not None: - os.close(slave_fd) - os.close(master_fd) + if slave_fp is not None: + slave_fp.close() + master_fp.close() class DistinctDataMixin(PollerMixin, SockMixin): diff --git a/tests/responder_test.py b/tests/responder_test.py index dbc68a3c..285acd6f 100644 --- a/tests/responder_test.py +++ b/tests/responder_test.py @@ -105,7 +105,7 @@ class BrokenModulesTest(testlib.TestCase): # unavailable. Should never happen in the real world. stream = mock.Mock() - stream.sent_modules = set() + stream.protocol.sent_modules = set() router = mock.Mock() router.stream_by_id = lambda n: stream @@ -143,7 +143,7 @@ class BrokenModulesTest(testlib.TestCase): import six_brokenpkg stream = mock.Mock() - stream.sent_modules = set() + stream.protocol.sent_modules = set() router = mock.Mock() router.stream_by_id = lambda n: stream diff --git a/tests/router_test.py b/tests/router_test.py index 80169e34..1bd6c26a 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -171,7 +171,7 @@ class CrashTest(testlib.BrokerMixin, testlib.TestCase): self.assertTrue(sem.get().is_dead) # Ensure it was logged. - expect = '_broker_main() crashed' + expect = 'broker crashed' self.assertTrue(expect in log.stop()) self.broker.join() @@ -364,8 +364,8 @@ class UnidirectionalTest(testlib.RouterMixin, testlib.TestCase): # treated like a parent. l1 = self.router.local() l1s = self.router.stream_by_id(l1.context_id) - l1s.auth_id = mitogen.context_id - l1s.is_privileged = True + l1s.protocol.auth_id = mitogen.context_id + l1s.protocol.is_privileged = True l2 = self.router.local() e = self.assertRaises(mitogen.core.CallError, @@ -378,12 +378,21 @@ class UnidirectionalTest(testlib.RouterMixin, testlib.TestCase): class EgressIdsTest(testlib.RouterMixin, testlib.TestCase): def test_egress_ids_populated(self): # Ensure Stream.egress_ids is populated on message reception. - c1 = self.router.local() - stream = self.router.stream_by_id(c1.context_id) - self.assertEquals(set(), stream.egress_ids) + c1 = self.router.local(name='c1') + c2 = self.router.local(name='c2') - c1.call(time.sleep, 0) - self.assertEquals(set([mitogen.context_id]), stream.egress_ids) + c1s = self.router.stream_by_id(c1.context_id) + try: + c1.call(ping_context, c2) + except mitogen.core.CallError: + # Fails because siblings cant call funcs in each other, but this + # causes messages to be sent. + pass + + self.assertEquals(c1s.protocol.egress_ids, set([ + mitogen.context_id, + c2.context_id, + ])) if __name__ == '__main__': diff --git a/tests/service_test.py b/tests/service_test.py index 3869f713..438766f7 100644 --- a/tests/service_test.py +++ b/tests/service_test.py @@ -44,8 +44,8 @@ class ActivationTest(testlib.RouterMixin, testlib.TestCase): self.assertTrue(isinstance(id_, int)) def test_sibling_cannot_activate_framework(self): - l1 = self.router.local() - l2 = self.router.local() + l1 = self.router.local(name='l1') + l2 = self.router.local(name='l2') exc = self.assertRaises(mitogen.core.CallError, lambda: l2.call(call_service_in, l1, MyService2.name(), 'get_id')) self.assertTrue(mitogen.core.Router.refused_msg in exc.args[0]) diff --git a/tests/ssh_test.py b/tests/ssh_test.py index 496710b8..6ac7d8bf 100644 --- a/tests/ssh_test.py +++ b/tests/ssh_test.py @@ -42,8 +42,6 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): class SshTest(testlib.DockerMixin, testlib.TestCase): - stream_class = mitogen.ssh.Stream - def test_debug_decoding(self): # ensure filter_debug_logs() decodes the logged string. capture = testlib.LogCapturer() @@ -93,7 +91,7 @@ class SshTest(testlib.DockerMixin, testlib.TestCase): except mitogen.ssh.PasswordError: e = sys.exc_info()[1] - self.assertEqual(e.args[0], self.stream_class.password_required_msg) + self.assertEqual(e.args[0], mitogen.ssh.password_required_msg) def test_password_incorrect(self): try: @@ -105,7 +103,7 @@ class SshTest(testlib.DockerMixin, testlib.TestCase): except mitogen.ssh.PasswordError: e = sys.exc_info()[1] - self.assertEqual(e.args[0], self.stream_class.password_incorrect_msg) + self.assertEqual(e.args[0], mitogen.ssh.password_incorrect_msg) def test_password_specified(self): context = self.docker_ssh( @@ -127,7 +125,7 @@ class SshTest(testlib.DockerMixin, testlib.TestCase): except mitogen.ssh.PasswordError: e = sys.exc_info()[1] - self.assertEqual(e.args[0], self.stream_class.password_required_msg) + self.assertEqual(e.args[0], mitogen.ssh.password_required_msg) def test_pubkey_specified(self): context = self.docker_ssh( @@ -150,7 +148,7 @@ class SshTest(testlib.DockerMixin, testlib.TestCase): check_host_keys='enforce', ) ) - self.assertEquals(e.args[0], mitogen.ssh.Stream.hostkey_failed_msg) + self.assertEquals(e.args[0], mitogen.ssh.hostkey_failed_msg) finally: fp.close() @@ -184,8 +182,6 @@ class SshTest(testlib.DockerMixin, testlib.TestCase): class BannerTest(testlib.DockerMixin, testlib.TestCase): # Verify the ability to disambiguate random spam appearing in the SSHd's # login banner from a legitimate password prompt. - stream_class = mitogen.ssh.Stream - def test_verbose_enabled(self): context = self.docker_ssh( username='mitogen__has_sudo', @@ -210,8 +206,6 @@ class StubPermissionDeniedTest(StubSshMixin, testlib.TestCase): class StubCheckHostKeysTest(StubSshMixin, testlib.TestCase): - stream_class = mitogen.ssh.Stream - def test_check_host_keys_accept(self): # required=true, host_key_checking=accept context = self.stub_ssh(STUBSSH_MODE='ask', check_host_keys='accept') diff --git a/tests/stream_test.py b/tests/stream_test.py deleted file mode 100644 index d844e610..00000000 --- a/tests/stream_test.py +++ /dev/null @@ -1,33 +0,0 @@ - -import unittest2 -import mock - -import mitogen.core - -import testlib - - -class ReceiveOneTest(testlib.TestCase): - klass = mitogen.core.Stream - - def test_corruption(self): - broker = mock.Mock() - router = mock.Mock() - - stream = self.klass(router, 1) - junk = mitogen.core.b('x') * stream.HEADER_LEN - stream._input_buf = [junk] - stream._input_buf_len = len(junk) - - capture = testlib.LogCapturer() - capture.start() - ret = stream._receive_one(broker) - #self.assertEquals(1, broker.stop_receive.mock_calls) - capture.stop() - - self.assertFalse(ret) - self.assertTrue((self.klass.corrupt_msg % (junk,)) in capture.raw()) - - -if __name__ == '__main__': - unittest2.main() diff --git a/tests/su_test.py b/tests/su_test.py index 2af17c6e..760dd50d 100644 --- a/tests/su_test.py +++ b/tests/su_test.py @@ -1,9 +1,9 @@ +import getpass import os import mitogen -import mitogen.lxd -import mitogen.parent +import mitogen.su import unittest2 @@ -21,12 +21,41 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV')) return context, argv - def test_basic(self): context, argv = self.run_su() self.assertEquals(argv[1], 'root') self.assertEquals(argv[2], '-c') +class SuTest(testlib.DockerMixin, testlib.TestCase): + def test_password_required(self): + ssh = self.docker_ssh( + username='mitogen__has_sudo', + password='has_sudo_password', + ) + e = self.assertRaises(mitogen.core.StreamError, + lambda: self.router.su(via=ssh) + ) + self.assertTrue(mitogen.su.password_required_msg in str(e)) + + def test_password_incorrect(self): + ssh = self.docker_ssh( + username='mitogen__has_sudo', + password='has_sudo_password', + ) + e = self.assertRaises(mitogen.core.StreamError, + lambda: self.router.su(via=ssh, password='x') + ) + self.assertTrue(mitogen.su.password_incorrect_msg in str(e)) + + def test_password_okay(self): + ssh = self.docker_ssh( + username='mitogen__has_sudo', + password='has_sudo_password', + ) + context = self.router.su(via=ssh, password='rootpassword') + self.assertEquals('root', context.call(getpass.getuser)) + + if __name__ == '__main__': unittest2.main() diff --git a/tests/sudo_test.py b/tests/sudo_test.py index 1d10ba9a..9ecf103d 100644 --- a/tests/sudo_test.py +++ b/tests/sudo_test.py @@ -2,8 +2,7 @@ import os import mitogen -import mitogen.lxd -import mitogen.parent +import mitogen.sudo import unittest2 @@ -79,7 +78,7 @@ class NonEnglishPromptTest(testlib.DockerMixin, testlib.TestCase): e = self.assertRaises(mitogen.core.StreamError, lambda: self.router.sudo(via=ssh) ) - self.assertTrue(mitogen.sudo.Stream.password_required_msg in str(e)) + self.assertTrue(mitogen.sudo.password_required_msg in str(e)) def test_password_incorrect(self): ssh = self.docker_ssh( @@ -91,7 +90,7 @@ class NonEnglishPromptTest(testlib.DockerMixin, testlib.TestCase): e = self.assertRaises(mitogen.core.StreamError, lambda: self.router.sudo(via=ssh, password='x') ) - self.assertTrue(mitogen.sudo.Stream.password_incorrect_msg in str(e)) + self.assertTrue(mitogen.sudo.password_incorrect_msg in str(e)) def test_password_okay(self): ssh = self.docker_ssh( @@ -103,7 +102,7 @@ class NonEnglishPromptTest(testlib.DockerMixin, testlib.TestCase): e = self.assertRaises(mitogen.core.StreamError, lambda: self.router.sudo(via=ssh, password='rootpassword') ) - self.assertTrue(mitogen.sudo.Stream.password_incorrect_msg in str(e)) + self.assertTrue(mitogen.sudo.password_incorrect_msg in str(e)) if __name__ == '__main__': diff --git a/tests/unix_test.py b/tests/unix_test.py index 02dc11a4..4f4aab8a 100644 --- a/tests/unix_test.py +++ b/tests/unix_test.py @@ -67,12 +67,12 @@ class ListenerTest(testlib.RouterMixin, testlib.TestCase): klass = mitogen.unix.Listener def test_constructor_basic(self): - listener = self.klass(router=self.router) + listener = self.klass.build_stream(router=self.router) capture = testlib.LogCapturer() capture.start() try: - self.assertFalse(mitogen.unix.is_path_dead(listener.path)) - os.unlink(listener.path) + self.assertFalse(mitogen.unix.is_path_dead(listener.protocol.path)) + os.unlink(listener.protocol.path) # ensure we catch 0 byte read error log message self.broker.shutdown() self.broker.join() @@ -96,15 +96,17 @@ class ClientTest(testlib.TestCase): def _test_simple_client(self, path): router, context = self._try_connect(path) - self.assertEquals(0, context.context_id) - self.assertEquals(1, mitogen.context_id) - self.assertEquals(0, mitogen.parent_id) - resp = context.call_service(service_name=MyService, method_name='ping') - self.assertEquals(mitogen.context_id, resp['src_id']) - self.assertEquals(0, resp['auth_id']) - router.broker.shutdown() - router.broker.join() - os.unlink(path) + try: + self.assertEquals(0, context.context_id) + self.assertEquals(1, mitogen.context_id) + self.assertEquals(0, mitogen.parent_id) + resp = context.call_service(service_name=MyService, method_name='ping') + self.assertEquals(mitogen.context_id, resp['src_id']) + self.assertEquals(0, resp['auth_id']) + finally: + router.broker.shutdown() + router.broker.join() + os.unlink(path) @classmethod def _test_simple_server(cls, path): @@ -112,7 +114,7 @@ class ClientTest(testlib.TestCase): latch = mitogen.core.Latch() try: try: - listener = cls.klass(path=path, router=router) + listener = cls.klass.build_stream(path=path, router=router) pool = mitogen.service.Pool(router=router, services=[ MyService(latch=latch, router=router), ]) From acade4ce88b55ef9a8bccf2435c548ca9df56d70 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 12 Mar 2019 04:32:57 +0000 Subject: [PATCH 109/383] ssh: fix issue #271 regression due to refactor, add test. --- mitogen/ssh.py | 8 ++++---- tests/ssh_test.py | 8 ++++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/mitogen/ssh.py b/mitogen/ssh.py index 059dec7a..2ba83ed4 100644 --- a/mitogen/ssh.py +++ b/mitogen/ssh.py @@ -83,8 +83,11 @@ HOSTKEY_FAIL_PATTERN = re.compile( ) # [user@host: ] permission denied +# issue #271: work around conflict with user shell reporting 'permission +# denied' e.g. during chdir($HOME) by only matching it at the start of the +# line. PERMDENIED_PATTERN = re.compile( - b('(?:[^@]+@[^:]+: )?' # Absent in OpenSSH <7.5 + b('^(?:[^@]+@[^:]+: )?' # Absent in OpenSSH <7.5 'Permission denied'), re.I ) @@ -122,9 +125,6 @@ class SetupProtocol(mitogen.parent.RegexProtocol): self.stream.conn._fail_connection(HostKeyError(hostkey_failed_msg)) def _on_permission_denied(self, line, match): - # issue #271: work around conflict with user shell reporting - # 'permission denied' e.g. during chdir($HOME) by only matching it at - # the start of the line. if self.stream.conn.options.password is not None and \ self.password_sent: self.stream.conn._fail_connection( diff --git a/tests/ssh_test.py b/tests/ssh_test.py index 6ac7d8bf..1dbbe787 100644 --- a/tests/ssh_test.py +++ b/tests/ssh_test.py @@ -58,6 +58,14 @@ class SshTest(testlib.DockerMixin, testlib.TestCase): expect = "%s: debug1: Reading configuration data" % (context.name,) self.assertTrue(expect in s) + def test_bash_permission_denied(self): + # issue #271: only match Permission Denied at start of line. + context = self.docker_ssh( + username='mitogen__permdenied', + password='permdenied_password', + ssh_debug_level=3, + ) + def test_stream_name(self): context = self.docker_ssh( username='mitogen__has_sudo', From 4524f03a48cfdb551bfcc418788976f691a89963 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 12 Mar 2019 04:33:47 +0000 Subject: [PATCH 110/383] issue #271: add mitogen__permdenied user to Docker image. --- tests/data/docker/mitogen__permdenied.profile | 4 ++++ tests/image_prep/_user_accounts.yml | 9 +++++++++ 2 files changed, 13 insertions(+) create mode 100644 tests/data/docker/mitogen__permdenied.profile diff --git a/tests/data/docker/mitogen__permdenied.profile b/tests/data/docker/mitogen__permdenied.profile new file mode 100644 index 00000000..4a2be07e --- /dev/null +++ b/tests/data/docker/mitogen__permdenied.profile @@ -0,0 +1,4 @@ + +mkdir -p bad +chmod 0 bad +cd bad diff --git a/tests/image_prep/_user_accounts.yml b/tests/image_prep/_user_accounts.yml index a5b63c13..5f1bf0dc 100644 --- a/tests/image_prep/_user_accounts.yml +++ b/tests/image_prep/_user_accounts.yml @@ -20,6 +20,7 @@ - readonly_homedir - require_tty - require_tty_pw_required + - permdenied - slow_user - webapp - sudo1 @@ -98,6 +99,14 @@ - bashrc - profile + - name: "Login throws permission denied errors (issue #271)" + copy: + dest: ~mitogen__permdenied/.{{item}} + src: ../data/docker/mitogen__permdenied.profile + with_items: + - bashrc + - profile + - name: Install pubkey for mitogen__has_sudo_pubkey block: - file: From baafc746fe8cebc2c6a79975f10ecaecd499044c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 12 Mar 2019 04:34:09 +0000 Subject: [PATCH 111/383] tests: clean up old-style SSH exception catch --- tests/ssh_test.py | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/tests/ssh_test.py b/tests/ssh_test.py index 1dbbe787..273412e8 100644 --- a/tests/ssh_test.py +++ b/tests/ssh_test.py @@ -91,26 +91,20 @@ class SshTest(testlib.DockerMixin, testlib.TestCase): self.assertEquals(name, sudo.name) def test_password_required(self): - try: - context = self.docker_ssh( + e = self.assertRaises(mitogen.ssh.PasswordError, + lambda: self.docker_ssh( username='mitogen__has_sudo', ) - assert 0, 'exception not thrown' - except mitogen.ssh.PasswordError: - e = sys.exc_info()[1] - + ) self.assertEqual(e.args[0], mitogen.ssh.password_required_msg) def test_password_incorrect(self): - try: - context = self.docker_ssh( + e = self.assertRaises(mitogen.ssh.PasswordError, + lambda: self.docker_ssh( username='mitogen__has_sudo', password='badpw', ) - assert 0, 'exception not thrown' - except mitogen.ssh.PasswordError: - e = sys.exc_info()[1] - + ) self.assertEqual(e.args[0], mitogen.ssh.password_incorrect_msg) def test_password_specified(self): @@ -125,14 +119,11 @@ class SshTest(testlib.DockerMixin, testlib.TestCase): ) def test_pubkey_required(self): - try: - context = self.docker_ssh( + e = self.assertRaises(mitogen.ssh.PasswordError, + lambda: self.docker_ssh( username='mitogen__has_sudo_pubkey', ) - assert 0, 'exception not thrown' - except mitogen.ssh.PasswordError: - e = sys.exc_info()[1] - + ) self.assertEqual(e.args[0], mitogen.ssh.password_required_msg) def test_pubkey_specified(self): From 3a1125a7bdc11a17540dc9eb9a9afd2d39de46ca Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 14 Mar 2019 00:18:39 +0000 Subject: [PATCH 112/383] issue #363: add test. --- tests/data/stubs/stub-su.py | 10 ++++++++++ tests/su_test.py | 17 +++++++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/tests/data/stubs/stub-su.py b/tests/data/stubs/stub-su.py index c32c91de..6782490c 100755 --- a/tests/data/stubs/stub-su.py +++ b/tests/data/stubs/stub-su.py @@ -4,6 +4,16 @@ import json import os import subprocess import sys +import time + +# #363: old input loop would fail to spot auth failure because of scheduling +# vs. su calling write() twice. +if 'DO_SLOW_AUTH_FAILURE' in os.environ: + os.write(2, 'su: ') + time.sleep(0.5) + os.write(2, 'incorrect password\n') + os._exit(1) + os.environ['ORIGINAL_ARGV'] = json.dumps(sys.argv) os.environ['THIS_IS_STUB_SU'] = '1' diff --git a/tests/su_test.py b/tests/su_test.py index 760dd50d..447b00fc 100644 --- a/tests/su_test.py +++ b/tests/su_test.py @@ -11,11 +11,11 @@ import testlib class ConstructorTest(testlib.RouterMixin, testlib.TestCase): - su_path = testlib.data_path('stubs/stub-su.py') + stub_su_path = testlib.data_path('stubs/stub-su.py') def run_su(self, **kwargs): context = self.router.su( - su_path=self.su_path, + su_path=self.stub_su_path, **kwargs ) argv = eval(context.call(os.getenv, 'ORIGINAL_ARGV')) @@ -28,6 +28,19 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): class SuTest(testlib.DockerMixin, testlib.TestCase): + stub_su_path = testlib.data_path('stubs/stub-su.py') + + def test_slow_auth_failure(self): + # #363: old input loop would fail to spot auth failure because of + # scheduling vs. su calling write() twice. + os.environ['DO_SLOW_AUTH_FAILURE'] = '1' + try: + self.assertRaises(mitogen.su.PasswordError, + lambda: self.router.su(su_path=self.stub_su_path) + ) + finally: + del os.environ['DO_SLOW_AUTH_FAILURE'] + def test_password_required(self): ssh = self.docker_ssh( username='mitogen__has_sudo', From 9c38093aa777126e224a0f67724bf94c04fe2f7c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 14 Mar 2019 19:31:39 +0000 Subject: [PATCH 113/383] issue #482: tests: check for zombie process after test. --- tests/testlib.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/testlib.py b/tests/testlib.py index 4cfd1b1c..7328ce09 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -341,7 +341,20 @@ class TestCase(unittest2.TestCase): self, self._fd_count_before, get_fd_count(), ) + def _teardown_check_zombies(self): + try: + pid, status, ru = os.wait3(os.WNOHANG) + except OSError: + return # ECHILD + + if pid: + assert 0, "%s failed to reap subprocess %d (status %d)." % ( + self, pid, status + ) + assert 0, "%s leaked still-running subprocesses." % (self,) + def tearDown(self): + self._teardown_check_zombies() self._teardown_check_threads() self._teardown_check_fds() super(TestCase, self).tearDown() From cfe337b3c0a1c5b00c25c074adfe8ec772d964fb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 17 Jul 2019 17:24:14 +0100 Subject: [PATCH 114/383] [stream-refactor] import incomplete create_child_test --- tests/create_child_test.py | 278 +++++++++++++++++++++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 tests/create_child_test.py diff --git a/tests/create_child_test.py b/tests/create_child_test.py new file mode 100644 index 00000000..a4fbe09c --- /dev/null +++ b/tests/create_child_test.py @@ -0,0 +1,278 @@ + +import fcntl +import os +import stat +import sys +import time +import tempfile + +import mock +import unittest2 + +import mitogen.parent + +import testlib + + +def run_fd_check(func, fd, mode, on_start=None): + tf = tempfile.NamedTemporaryFile() + args = [ + sys.executable, + testlib.data_path('fd_check.py'), + tf.name, + str(fd), + mode, + ] + + proc = func(args=args) + os = None + if on_start: + os = on_start(proc) + proc.proc.wait() + try: + return proc, eval(tf.read()), os + finally: + tf.close() + + +def close_proc(proc): + proc.receive_side.close() + proc.transmit_side.close() + if proc.diag_receive_side: + proc.diag_receive_side.close() + if proc.diag_transmit_side: + proc.diag_transmit_side.close() + + +def wait_read(side, n): + poller = mitogen.core.Poller() + try: + poller.start_receive(side.fd) + for _ in poller.poll(): + return side.read(n) + assert False + finally: + poller.close() + + +class StdinSockMixin(object): + def test_stdin(self): + proc, info, _ = run_fd_check(self.func, 0, 'read', + lambda proc: proc.transmit_side.write('TEST')) + st = os.fstat(proc.transmit_side.fd) + self.assertTrue(stat.S_ISSOCK(st.st_mode)) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + flags = fcntl.fcntl(proc.transmit_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(info['buf'], 'TEST') + self.assertTrue(info['flags'] & os.O_RDWR) + + +class StdoutSockMixin(object): + def test_stdout(self): + proc, info, buf = run_fd_check(self.func, 1, 'write', + lambda proc: wait_read(proc.receive_side, 4)) + st = os.fstat(proc.transmit_side.fd) + self.assertTrue(stat.S_ISSOCK(st.st_mode)) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(buf, 'TEST') + self.assertTrue(info['flags'] & os.O_RDWR) + + +class CreateChildTest(StdinSockMixin, StdoutSockMixin, testlib.TestCase): + func = staticmethod(mitogen.parent.create_child) + + def test_stderr(self): + proc, info, _ = run_fd_check(self.func, 2, 'write') + st = os.fstat(sys.stderr.fileno()) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + self.assertEquals(st.st_ino, info['st_ino']) + + +class MergedCreateChildTest(StdinSockMixin, StdoutSockMixin, + testlib.TestCase): + func = staticmethod(mitogen.parent.merged_create_child) + + def test_stderr(self): + proc, info, buf = run_fd_check(self.func, 2, 'write', + lambda proc: wait_read(proc.receive_side, 4)) + st = os.fstat(proc.transmit_side.fd) + self.assertTrue(stat.S_ISSOCK(st.st_mode)) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(buf, 'TEST') + self.assertTrue(info['flags'] & os.O_RDWR) + + +class StderrCreateChildTest(StdinSockMixin, StdoutSockMixin, + testlib.TestCase): + func = staticmethod(mitogen.parent.stderr_create_child) + + def test_stderr(self): + proc, info, buf = run_fd_check(self.func, 2, 'write', + lambda proc: wait_read(proc.diag_receive_side, 4)) + st = os.fstat(proc.diag_receive_side.fd) + self.assertTrue(stat.S_ISFIFO(st.st_mode)) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + flags = fcntl.fcntl(proc.diag_receive_side.fd, fcntl.F_GETFL) + self.assertFalse(flags & os.O_WRONLY) + self.assertFalse(flags & os.O_RDWR) + self.assertTrue(buf, 'TEST') + self.assertTrue(info['flags'] & os.O_WRONLY) + + +class TtyCreateChildTest(testlib.TestCase): + func = staticmethod(mitogen.parent.tty_create_child) + + def test_stdin(self): + proc, info, _ = run_fd_check(self.func, 0, 'read', + lambda proc: proc.transmit_side.write('TEST')) + st = os.fstat(proc.transmit_side.fd) + self.assertTrue(stat.S_ISCHR(st.st_mode)) + self.assertTrue(stat.S_ISCHR(info['st_mode'])) + + self.assertTrue(isinstance(info['ttyname'], + mitogen.core.UnicodeType)) + os.ttyname(proc.transmit_side.fd) # crashes if wrong + + flags = fcntl.fcntl(proc.transmit_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(info['flags'] & os.O_RDWR) + + self.assertNotEquals(st.st_dev, info['st_dev']) + self.assertTrue(info['buf'], 'TEST') + + def test_stdout(self): + proc, info, buf = run_fd_check(self.func, 1, 'write', + lambda proc: wait_read(proc.receive_side, 4)) + + st = os.fstat(proc.receive_side.fd) + self.assertTrue(stat.S_ISCHR(st.st_mode)) + self.assertTrue(stat.S_ISCHR(info['st_mode'])) + + self.assertTrue(isinstance(info['ttyname'], + mitogen.core.UnicodeType)) + os.ttyname(proc.transmit_side.fd) # crashes if wrong + + flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(info['flags'] & os.O_RDWR) + + self.assertNotEquals(st.st_dev, info['st_dev']) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(buf, 'TEST') + + def test_stderr(self): + proc, info, buf = run_fd_check(self.func, 2, 'write', + lambda proc: wait_read(proc.receive_side, 4)) + + st = os.fstat(proc.receive_side.fd) + self.assertTrue(stat.S_ISCHR(st.st_mode)) + self.assertTrue(stat.S_ISCHR(info['st_mode'])) + + self.assertTrue(isinstance(info['ttyname'], + mitogen.core.UnicodeType)) + os.ttyname(proc.transmit_side.fd) # crashes if wrong + + flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(info['flags'] & os.O_RDWR) + + self.assertNotEquals(st.st_dev, info['st_dev']) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(buf, 'TEST') + + def test_dev_tty_open_succeeds(self): + # In the early days of UNIX, a process that lacked a controlling TTY + # would acquire one simply by opening an existing TTY. Linux and OS X + # continue to follow this behaviour, however at least FreeBSD moved to + # requiring an explicit ioctl(). Linux supports it, but we don't yet + # use it there and anyway the behaviour will never change, so no point + # in fixing things that aren't broken. Below we test that + # getpass-loving apps like sudo and ssh get our slave PTY when they + # attempt to open /dev/tty, which is what they both do on attempting to + # read a password. + tf = tempfile.NamedTemporaryFile() + try: + proc = self.func([ + 'bash', '-c', 'exec 2>%s; echo hi > /dev/tty' % (tf.name,) + ]) + deadline = time.time() + 5.0 + mitogen.core.set_block(proc.receive_side.fd) + self.assertEquals(mitogen.core.b('hi\n'), proc.receive_side.read()) + waited_pid, status = os.waitpid(proc.pid, 0) + self.assertEquals(proc.pid, waited_pid) + self.assertEquals(0, status) + self.assertEquals(mitogen.core.b(''), tf.read()) + proc.receive_side.close() + finally: + tf.close() + + +class StderrDiagTtyMixin(object): + def test_stderr(self): + proc, info, buf = run_fd_check(self.func, 2, 'write', + lambda proc: wait_read(proc.diag_receive_side, 4)) + + st = os.fstat(proc.diag_receive_side.fd) + self.assertTrue(stat.S_ISCHR(st.st_mode)) + self.assertTrue(stat.S_ISCHR(info['st_mode'])) + + self.assertTrue(isinstance(info['ttyname'], + mitogen.core.UnicodeType)) + os.ttyname(proc.diag_transmit_side.fd) # crashes if wrong + + flags = fcntl.fcntl(proc.diag_receive_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(info['flags'] & os.O_RDWR) + + self.assertNotEquals(st.st_dev, info['st_dev']) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(buf, 'TEST') + + +class HybridTtyCreateChildTest(StdinSockMixin, StdoutSockMixin, + StderrDiagTtyMixin, testlib.TestCase): + func = staticmethod(mitogen.parent.hybrid_tty_create_child) + + +class SelinuxHybridTtyCreateChildTest(StderrDiagTtyMixin, testlib.TestCase): + func = staticmethod(mitogen.parent.selinux_hybrid_tty_create_child) + + def test_stdin(self): + proc, info, buf = run_fd_check(self.func, 0, 'read', + lambda proc: proc.transmit_side.write('TEST')) + st = os.fstat(proc.transmit_side.fd) + self.assertTrue(stat.S_ISFIFO(st.st_mode)) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + flags = fcntl.fcntl(proc.transmit_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_WRONLY) + self.assertTrue(buf, 'TEST') + self.assertFalse(info['flags'] & os.O_WRONLY) + self.assertFalse(info['flags'] & os.O_RDWR) + + def test_stdout(self): + proc, info, buf = run_fd_check(self.func, 1, 'write', + lambda proc: wait_read(proc.receive_side, 4)) + st = os.fstat(proc.receive_side.fd) + self.assertTrue(stat.S_ISFIFO(st.st_mode)) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + self.assertFalse(flags & os.O_WRONLY) + self.assertFalse(flags & os.O_RDWR) + self.assertTrue(info['flags'] & os.O_WRONLY) + self.assertTrue(buf, 'TEST') + + +if __name__ == '__main__': + unittest2.main() From f039c81bb019a69e4809c7f27e3a816eeb5b6c71 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 21 Jul 2019 00:42:43 +0100 Subject: [PATCH 115/383] [stream-refactor] rename Process attrs, fix up more create_child_test --- mitogen/doas.py | 4 +- mitogen/fakessh.py | 14 +-- mitogen/parent.py | 61 ++++++------ tests/create_child_test.py | 199 +++++++++++++++++++------------------ tests/parent_test.py | 6 +- 5 files changed, 145 insertions(+), 139 deletions(-) diff --git a/mitogen/doas.py b/mitogen/doas.py index fc37325b..f3bf4c90 100644 --- a/mitogen/doas.py +++ b/mitogen/doas.py @@ -133,8 +133,8 @@ class Connection(mitogen.parent.Connection): def _get_name(self): return u'doas.' + self.options.username - def diag_stream_factory(self): - stream = super(Connection, self).diag_stream_factory() + def stderr_stream_factory(self): + stream = super(Connection, self).stderr_stream_factory() stream.protocol.setup_patterns(self) return stream diff --git a/mitogen/fakessh.py b/mitogen/fakessh.py index f56e8838..e62cf84a 100644 --- a/mitogen/fakessh.py +++ b/mitogen/fakessh.py @@ -171,15 +171,15 @@ class Process(object): Manages the lifetime and pipe connections of the SSH command running in the slave. """ - def __init__(self, router, stdin_fp, stdout_fp, proc=None): + def __init__(self, router, stdin, stdout, proc=None): self.router = router - self.stdin_fp = stdin_fp - self.stdout_fp = stdout_fp + self.stdin = stdin + self.stdout = stdout self.proc = proc self.control_handle = router.add_handler(self._on_control) self.stdin_handle = router.add_handler(self._on_stdin) self.pump = IoPump.build_stream(router.broker) - self.pump.accept(stdin_fp, stdout_fp) + self.pump.accept(stdin, stdout) self.stdin = None self.control = None self.wake_event = threading.Event() @@ -192,7 +192,7 @@ class Process(object): pmon.add(proc.pid, self._on_proc_exit) def __repr__(self): - return 'Process(%r, %r)' % (self.stdin_fp, self.stdout_fp) + return 'Process(%r, %r)' % (self.stdin, self.stdout) def _on_proc_exit(self, status): LOG.debug('%r._on_proc_exit(%r)', self, status) @@ -355,8 +355,8 @@ def _fakessh_main(dest_context_id, econtext): control_handle, stdin_handle) process = Process(econtext.router, - stdin_fp=os.fdopen(1, 'w+b', 0), - stdout_fp=os.fdopen(0, 'r+b', 0)) + stdin=os.fdopen(1, 'w+b', 0), + stdout=os.fdopen(0, 'r+b', 0)) process.start_master( stdin=mitogen.core.Sender(dest, stdin_handle), control=mitogen.core.Sender(dest, control_handle), diff --git a/mitogen/parent.py b/mitogen/parent.py index 91426881..c39be011 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -341,7 +341,7 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): LOG.debug('create_child() child %d fd %d, parent %d, cmd: %s', proc.pid, parentfp.fileno(), os.getpid(), Argv(args)) - return PopenProcess(proc, stdio_fp=parentfp, stderr_fp=stderr_r) + return PopenProcess(proc, stdin=parentfp, stdout=parentfp, stderr=stderr_r) def _acquire_controlling_tty(): @@ -455,7 +455,7 @@ def tty_create_child(args): slave_fp.close() LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s', proc.pid, master_fp.fileno(), os.getpid(), Argv(args)) - return PopenProcess(proc, stdio_fp=master_fp) + return PopenProcess(proc, stdin=master_fp, stdout=master_fp) def hybrid_tty_create_child(args): @@ -495,7 +495,7 @@ def hybrid_tty_create_child(args): childfp.close() LOG.debug('hybrid_tty_create_child() pid=%d stdio=%d, tty=%d, cmd: %s', proc.pid, parentfp.fileno(), master_fp.fileno(), Argv(args)) - return PopenProcess(proc, stdio_fp=parentfp, stderr_fp=master_fp) + return PopenProcess(proc, stdin=parentfp, stdout=parentfp, stderr=master_fp) class Timer(object): @@ -1207,11 +1207,11 @@ class Connection(object): #: :class:`mitogen.core.Stream` stream = None - #: If :attr:`create_child` provides a stderr_fp, referencing either a plain - #: pipe or the controlling TTY, this references the corresponding + #: If `proc.stderr` is set, referencing either a plain pipe or the + #: controlling TTY, this references the corresponding #: :class:`LogProtocol`'s stream, allowing it to be disconnected when this #: stream is disconnected. - diag_stream = None + stderr_stream = None #: Function with the semantics of :func:`create_child` used to create the #: child process. @@ -1400,7 +1400,7 @@ class Connection(object): if self.exception is None: self._adorn_eof_error(exc) self.exception = exc - for stream in self.stream, self.diag_stream: + for stream in self.stream, self.stderr_stream: if stream and not stream.receive_side.closed: stream.on_disconnect(self._router.broker) self._complete_connection() @@ -1419,8 +1419,8 @@ class Connection(object): eof_error_msg = 'EOF on stream; last 100 lines received:\n' def on_stream_disconnect(self): - if self.diag_stream is not None: - self.diag_stream.on_disconnect(self._router.broker) + if self.stderr_stream is not None: + self.stderr_stream.on_disconnect(self._router.broker) if not self.timer.cancelled: self.timer.cancel() self._fail_connection(EofError( @@ -1447,14 +1447,14 @@ class Connection(object): broker=self._router.broker, ) - def diag_stream_factory(self): + def stderr_stream_factory(self): return self.diag_protocol_class.build_stream() def _setup_stream(self): self.stream = self.stream_factory() self.stream.conn = self self.stream.name = self.options.name or self._get_name() - self.stream.accept(self.proc.stdio_fp, self.proc.stdio_fp) + self.stream.accept(self.proc.stdout, self.proc.stdin) mitogen.core.listen(self.stream, 'shutdown', self.on_stream_shutdown) @@ -1462,12 +1462,12 @@ class Connection(object): self.on_stream_disconnect) self._router.broker.start_receive(self.stream) - def _setup_diag_stream(self): - self.diag_stream = self.diag_stream_factory() - self.diag_stream.conn = self - self.diag_stream.name = self.options.name or self._get_name() - self.diag_stream.accept(self.proc.stderr_fp, self.proc.stderr_fp) - self._router.broker.start_receive(self.diag_stream) + def _setup_stderr_stream(self): + self.stderr_stream = self.stderr_stream_factory() + self.stderr_stream.conn = self + self.stderr_stream.name = self.options.name or self._get_name() + self.stderr_stream.accept(self.proc.stderr, self.proc.stderr) + self._router.broker.start_receive(self.stderr_stream) def _async_connect(self): self._start_timer() @@ -1475,16 +1475,18 @@ class Connection(object): if self.context.name is None: self.context.name = self.stream.name self.proc.name = self.stream.name - if self.proc.stderr_fp: - self._setup_diag_stream() + if self.proc.stderr: + self._setup_stderr_stream() def connect(self, context): LOG.debug('%r.connect()', self) self.context = context self.proc = self.start_child() - LOG.debug('%r.connect(): pid:%r stdio:%r diag:%r', - self, self.proc.pid, self.proc.stdio_fp.fileno(), - self.proc.stderr_fp and self.proc.stderr_fp.fileno()) + LOG.debug('%r.connect(): pid:%r stdin:%r stdout:%r diag:%r', + self, self.proc.pid, + self.proc.stdin.fileno(), + self.proc.stdout.fileno(), + self.proc.stderr.fileno()) self.latch = mitogen.core.Latch() self._router.broker.defer(self._async_connect) @@ -2231,17 +2233,14 @@ class Router(mitogen.core.Router): class Process(object): - """ - Install a :data:`signal.SIGCHLD` handler that generates callbacks when a - specific child process has exitted. This class is obsolete, do not use. - """ _delays = [0.05, 0.15, 0.3, 1.0, 5.0, 10.0] name = None - def __init__(self, pid, stdio_fp, stderr_fp=None): + def __init__(self, pid, stdin, stdout, stderr=None): self.pid = pid - self.stdio_fp = stdio_fp - self.stderr_fp = stderr_fp + self.stdin = stdin + self.stdout = stdout + self.stderr = stderr self._returncode = None self._reap_count = 0 @@ -2307,8 +2306,8 @@ class Process(object): class PopenProcess(Process): - def __init__(self, proc, stdio_fp, stderr_fp=None): - super(PopenProcess, self).__init__(proc.pid, stdio_fp, stderr_fp) + def __init__(self, proc, stdin, stdout, stderr=None): + super(PopenProcess, self).__init__(proc.pid, stdin, stdout, stderr) self.proc = proc def poll(self): diff --git a/tests/create_child_test.py b/tests/create_child_test.py index a4fbe09c..17dd55e7 100644 --- a/tests/create_child_test.py +++ b/tests/create_child_test.py @@ -10,6 +10,7 @@ import mock import unittest2 import mitogen.parent +from mitogen.core import b import testlib @@ -36,20 +37,18 @@ def run_fd_check(func, fd, mode, on_start=None): def close_proc(proc): - proc.receive_side.close() - proc.transmit_side.close() - if proc.diag_receive_side: - proc.diag_receive_side.close() - if proc.diag_transmit_side: - proc.diag_transmit_side.close() + proc.stdin.close() + proc.stdout.close() + if proc.stderr: + prco.stderr.close() -def wait_read(side, n): +def wait_read(fp, n): poller = mitogen.core.Poller() try: - poller.start_receive(side.fd) + poller.start_receive(fp.fileno()) for _ in poller.poll(): - return side.read(n) + return os.read(fp.fileno(), n) assert False finally: poller.close() @@ -58,12 +57,12 @@ def wait_read(side, n): class StdinSockMixin(object): def test_stdin(self): proc, info, _ = run_fd_check(self.func, 0, 'read', - lambda proc: proc.transmit_side.write('TEST')) - st = os.fstat(proc.transmit_side.fd) + lambda proc: proc.stdin.send(b('TEST'))) + st = os.fstat(proc.stdin.fileno()) self.assertTrue(stat.S_ISSOCK(st.st_mode)) self.assertEquals(st.st_dev, info['st_dev']) self.assertEquals(st.st_mode, info['st_mode']) - flags = fcntl.fcntl(proc.transmit_side.fd, fcntl.F_GETFL) + flags = fcntl.fcntl(proc.stdin.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['buf'], 'TEST') self.assertTrue(info['flags'] & os.O_RDWR) @@ -72,12 +71,12 @@ class StdinSockMixin(object): class StdoutSockMixin(object): def test_stdout(self): proc, info, buf = run_fd_check(self.func, 1, 'write', - lambda proc: wait_read(proc.receive_side, 4)) - st = os.fstat(proc.transmit_side.fd) + lambda proc: wait_read(proc.stdout, 4)) + st = os.fstat(proc.stdout.fileno()) self.assertTrue(stat.S_ISSOCK(st.st_mode)) self.assertEquals(st.st_dev, info['st_dev']) self.assertEquals(st.st_mode, info['st_mode']) - flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(buf, 'TEST') self.assertTrue(info['flags'] & os.O_RDWR) @@ -94,35 +93,42 @@ class CreateChildTest(StdinSockMixin, StdoutSockMixin, testlib.TestCase): self.assertEquals(st.st_ino, info['st_ino']) -class MergedCreateChildTest(StdinSockMixin, StdoutSockMixin, +class CreateChildMergedTest(StdinSockMixin, StdoutSockMixin, testlib.TestCase): - func = staticmethod(mitogen.parent.merged_create_child) + def func(self, *args, **kwargs): + return mitogen.parent.create_child( + *args, merge_stdio=True, **kwargs + ) def test_stderr(self): proc, info, buf = run_fd_check(self.func, 2, 'write', - lambda proc: wait_read(proc.receive_side, 4)) - st = os.fstat(proc.transmit_side.fd) + lambda proc: wait_read(proc.stdout, 4)) + self.assertEquals(None, proc.stderr) + st = os.fstat(proc.stdout.fileno()) self.assertTrue(stat.S_ISSOCK(st.st_mode)) self.assertEquals(st.st_dev, info['st_dev']) self.assertEquals(st.st_mode, info['st_mode']) - flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(buf, 'TEST') self.assertTrue(info['flags'] & os.O_RDWR) -class StderrCreateChildTest(StdinSockMixin, StdoutSockMixin, - testlib.TestCase): - func = staticmethod(mitogen.parent.stderr_create_child) +class CreateChildStderrPipeTest(StdinSockMixin, StdoutSockMixin, + testlib.TestCase): + def func(self, *args, **kwargs): + return mitogen.parent.create_child( + *args, stderr_pipe=True, **kwargs + ) def test_stderr(self): proc, info, buf = run_fd_check(self.func, 2, 'write', - lambda proc: wait_read(proc.diag_receive_side, 4)) - st = os.fstat(proc.diag_receive_side.fd) + lambda proc: wait_read(proc.stderr, 4)) + st = os.fstat(proc.stderr.fileno()) self.assertTrue(stat.S_ISFIFO(st.st_mode)) self.assertEquals(st.st_dev, info['st_dev']) self.assertEquals(st.st_mode, info['st_mode']) - flags = fcntl.fcntl(proc.diag_receive_side.fd, fcntl.F_GETFL) + flags = fcntl.fcntl(proc.stderr.fileno(), fcntl.F_GETFL) self.assertFalse(flags & os.O_WRONLY) self.assertFalse(flags & os.O_RDWR) self.assertTrue(buf, 'TEST') @@ -134,16 +140,16 @@ class TtyCreateChildTest(testlib.TestCase): def test_stdin(self): proc, info, _ = run_fd_check(self.func, 0, 'read', - lambda proc: proc.transmit_side.write('TEST')) - st = os.fstat(proc.transmit_side.fd) + lambda proc: proc.stdin.write(b('TEST'))) + st = os.fstat(proc.stdin.fileno()) self.assertTrue(stat.S_ISCHR(st.st_mode)) self.assertTrue(stat.S_ISCHR(info['st_mode'])) self.assertTrue(isinstance(info['ttyname'], mitogen.core.UnicodeType)) - os.ttyname(proc.transmit_side.fd) # crashes if wrong + os.ttyname(proc.stdin.fileno()) # crashes if not TTY - flags = fcntl.fcntl(proc.transmit_side.fd, fcntl.F_GETFL) + flags = fcntl.fcntl(proc.stdin.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['flags'] & os.O_RDWR) @@ -152,17 +158,17 @@ class TtyCreateChildTest(testlib.TestCase): def test_stdout(self): proc, info, buf = run_fd_check(self.func, 1, 'write', - lambda proc: wait_read(proc.receive_side, 4)) + lambda proc: wait_read(proc.stdout, 4)) - st = os.fstat(proc.receive_side.fd) + st = os.fstat(proc.stdout.fileno()) self.assertTrue(stat.S_ISCHR(st.st_mode)) self.assertTrue(stat.S_ISCHR(info['st_mode'])) self.assertTrue(isinstance(info['ttyname'], mitogen.core.UnicodeType)) - os.ttyname(proc.transmit_side.fd) # crashes if wrong + os.ttyname(proc.stdout.fileno()) # crashes if wrong - flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['flags'] & os.O_RDWR) @@ -172,17 +178,17 @@ class TtyCreateChildTest(testlib.TestCase): def test_stderr(self): proc, info, buf = run_fd_check(self.func, 2, 'write', - lambda proc: wait_read(proc.receive_side, 4)) + lambda proc: wait_read(proc.stdout, 4)) - st = os.fstat(proc.receive_side.fd) + st = os.fstat(proc.stdout.fileno()) self.assertTrue(stat.S_ISCHR(st.st_mode)) self.assertTrue(stat.S_ISCHR(info['st_mode'])) self.assertTrue(isinstance(info['ttyname'], mitogen.core.UnicodeType)) - os.ttyname(proc.transmit_side.fd) # crashes if wrong + os.ttyname(proc.stdin.fileno()) # crashes if not TTY - flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['flags'] & os.O_RDWR) @@ -206,72 +212,73 @@ class TtyCreateChildTest(testlib.TestCase): 'bash', '-c', 'exec 2>%s; echo hi > /dev/tty' % (tf.name,) ]) deadline = time.time() + 5.0 - mitogen.core.set_block(proc.receive_side.fd) - self.assertEquals(mitogen.core.b('hi\n'), proc.receive_side.read()) + self.assertEquals(mitogen.core.b('hi\n'), wait_read(proc.stdout, 3)) waited_pid, status = os.waitpid(proc.pid, 0) self.assertEquals(proc.pid, waited_pid) self.assertEquals(0, status) self.assertEquals(mitogen.core.b(''), tf.read()) - proc.receive_side.close() + proc.stdout.close() finally: tf.close() -class StderrDiagTtyMixin(object): - def test_stderr(self): - proc, info, buf = run_fd_check(self.func, 2, 'write', - lambda proc: wait_read(proc.diag_receive_side, 4)) - - st = os.fstat(proc.diag_receive_side.fd) - self.assertTrue(stat.S_ISCHR(st.st_mode)) - self.assertTrue(stat.S_ISCHR(info['st_mode'])) - - self.assertTrue(isinstance(info['ttyname'], - mitogen.core.UnicodeType)) - os.ttyname(proc.diag_transmit_side.fd) # crashes if wrong - - flags = fcntl.fcntl(proc.diag_receive_side.fd, fcntl.F_GETFL) - self.assertTrue(flags & os.O_RDWR) - self.assertTrue(info['flags'] & os.O_RDWR) - - self.assertNotEquals(st.st_dev, info['st_dev']) - self.assertTrue(flags & os.O_RDWR) - self.assertTrue(buf, 'TEST') - - -class HybridTtyCreateChildTest(StdinSockMixin, StdoutSockMixin, - StderrDiagTtyMixin, testlib.TestCase): - func = staticmethod(mitogen.parent.hybrid_tty_create_child) - - -class SelinuxHybridTtyCreateChildTest(StderrDiagTtyMixin, testlib.TestCase): - func = staticmethod(mitogen.parent.selinux_hybrid_tty_create_child) - - def test_stdin(self): - proc, info, buf = run_fd_check(self.func, 0, 'read', - lambda proc: proc.transmit_side.write('TEST')) - st = os.fstat(proc.transmit_side.fd) - self.assertTrue(stat.S_ISFIFO(st.st_mode)) - self.assertEquals(st.st_dev, info['st_dev']) - self.assertEquals(st.st_mode, info['st_mode']) - flags = fcntl.fcntl(proc.transmit_side.fd, fcntl.F_GETFL) - self.assertTrue(flags & os.O_WRONLY) - self.assertTrue(buf, 'TEST') - self.assertFalse(info['flags'] & os.O_WRONLY) - self.assertFalse(info['flags'] & os.O_RDWR) - - def test_stdout(self): - proc, info, buf = run_fd_check(self.func, 1, 'write', - lambda proc: wait_read(proc.receive_side, 4)) - st = os.fstat(proc.receive_side.fd) - self.assertTrue(stat.S_ISFIFO(st.st_mode)) - self.assertEquals(st.st_dev, info['st_dev']) - self.assertEquals(st.st_mode, info['st_mode']) - flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) - self.assertFalse(flags & os.O_WRONLY) - self.assertFalse(flags & os.O_RDWR) - self.assertTrue(info['flags'] & os.O_WRONLY) - self.assertTrue(buf, 'TEST') +if 0: + + class StderrDiagTtyMixin(object): + def test_stderr(self): + proc, info, buf = run_fd_check(self.func, 2, 'write', + lambda proc: wait_read(proc.diag_receive_side, 4)) + + st = os.fstat(proc.diag_receive_side.fd) + self.assertTrue(stat.S_ISCHR(st.st_mode)) + self.assertTrue(stat.S_ISCHR(info['st_mode'])) + + self.assertTrue(isinstance(info['ttyname'], + mitogen.core.UnicodeType)) + os.ttyname(proc.diag_transmit_side.fd) # crashes if wrong + + flags = fcntl.fcntl(proc.diag_receive_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(info['flags'] & os.O_RDWR) + + self.assertNotEquals(st.st_dev, info['st_dev']) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(buf, 'TEST') + + + class HybridTtyCreateChildTest(StdinSockMixin, StdoutSockMixin, + StderrDiagTtyMixin, testlib.TestCase): + func = staticmethod(mitogen.parent.hybrid_tty_create_child) + + + class SelinuxHybridTtyCreateChildTest(StderrDiagTtyMixin, testlib.TestCase): + func = staticmethod(mitogen.parent.selinux_hybrid_tty_create_child) + + def test_stdin(self): + proc, info, buf = run_fd_check(self.func, 0, 'read', + lambda proc: proc.transmit_side.write('TEST')) + st = os.fstat(proc.transmit_side.fd) + self.assertTrue(stat.S_ISFIFO(st.st_mode)) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + flags = fcntl.fcntl(proc.transmit_side.fd, fcntl.F_GETFL) + self.assertTrue(flags & os.O_WRONLY) + self.assertTrue(buf, 'TEST') + self.assertFalse(info['flags'] & os.O_WRONLY) + self.assertFalse(info['flags'] & os.O_RDWR) + + def test_stdout(self): + proc, info, buf = run_fd_check(self.func, 1, 'write', + lambda proc: wait_read(proc.receive_side, 4)) + st = os.fstat(proc.receive_side.fd) + self.assertTrue(stat.S_ISFIFO(st.st_mode)) + self.assertEquals(st.st_dev, info['st_dev']) + self.assertEquals(st.st_mode, info['st_mode']) + flags = fcntl.fcntl(proc.receive_side.fd, fcntl.F_GETFL) + self.assertFalse(flags & os.O_WRONLY) + self.assertFalse(flags & os.O_RDWR) + self.assertTrue(info['flags'] & os.O_WRONLY) + self.assertTrue(buf, 'TEST') if __name__ == '__main__': diff --git a/tests/parent_test.py b/tests/parent_test.py index 191d9f88..1accf222 100644 --- a/tests/parent_test.py +++ b/tests/parent_test.py @@ -225,13 +225,13 @@ class TtyCreateChildTest(testlib.TestCase): 'bash', '-c', 'exec 2>%s; echo hi > /dev/tty' % (tf.name,) ]) deadline = time.time() + 5.0 - mitogen.core.set_block(proc.stdio_fp.fileno()) - self.assertEquals(mitogen.core.b('hi\n'), proc.stdio_fp.read()) + mitogen.core.set_block(proc.stdin.fileno()) + self.assertEquals(mitogen.core.b('hi\n'), proc.stdin.read()) waited_pid, status = os.waitpid(proc.pid, 0) self.assertEquals(proc.pid, waited_pid) self.assertEquals(0, status) self.assertEquals(mitogen.core.b(''), tf.read()) - proc.stdio_fp.close() + proc.stdout.close() finally: tf.close() From f43e24e97000963388c308762769e8682c842a79 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 21 Jul 2019 03:13:13 +0100 Subject: [PATCH 116/383] [stream-refactor] repaired rest of create_child_test. --- tests/create_child_test.py | 41 +++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/tests/create_child_test.py b/tests/create_child_test.py index 17dd55e7..0e8855f7 100644 --- a/tests/create_child_test.py +++ b/tests/create_child_test.py @@ -222,35 +222,36 @@ class TtyCreateChildTest(testlib.TestCase): tf.close() -if 0: +class StderrDiagTtyMixin(object): + def test_stderr(self): + proc, info, buf = run_fd_check(self.func, 2, 'write', + lambda proc: wait_read(proc.stderr, 4)) - class StderrDiagTtyMixin(object): - def test_stderr(self): - proc, info, buf = run_fd_check(self.func, 2, 'write', - lambda proc: wait_read(proc.diag_receive_side, 4)) + st = os.fstat(proc.stderr.fileno()) + self.assertTrue(stat.S_ISCHR(st.st_mode)) + self.assertTrue(stat.S_ISCHR(info['st_mode'])) - st = os.fstat(proc.diag_receive_side.fd) - self.assertTrue(stat.S_ISCHR(st.st_mode)) - self.assertTrue(stat.S_ISCHR(info['st_mode'])) + self.assertTrue(isinstance(info['ttyname'], + mitogen.core.UnicodeType)) + os.ttyname(proc.stderr.fileno()) # crashes if wrong - self.assertTrue(isinstance(info['ttyname'], - mitogen.core.UnicodeType)) - os.ttyname(proc.diag_transmit_side.fd) # crashes if wrong + flags = fcntl.fcntl(proc.stderr.fileno(), fcntl.F_GETFL) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(info['flags'] & os.O_RDWR) - flags = fcntl.fcntl(proc.diag_receive_side.fd, fcntl.F_GETFL) - self.assertTrue(flags & os.O_RDWR) - self.assertTrue(info['flags'] & os.O_RDWR) + self.assertNotEquals(st.st_dev, info['st_dev']) + self.assertTrue(flags & os.O_RDWR) + self.assertTrue(buf, 'TEST') - self.assertNotEquals(st.st_dev, info['st_dev']) - self.assertTrue(flags & os.O_RDWR) - self.assertTrue(buf, 'TEST') +class HybridTtyCreateChildTest(StdinSockMixin, StdoutSockMixin, + StderrDiagTtyMixin, testlib.TestCase): + func = staticmethod(mitogen.parent.hybrid_tty_create_child) - class HybridTtyCreateChildTest(StdinSockMixin, StdoutSockMixin, - StderrDiagTtyMixin, testlib.TestCase): - func = staticmethod(mitogen.parent.hybrid_tty_create_child) +if 0: + # issue #410 class SelinuxHybridTtyCreateChildTest(StderrDiagTtyMixin, testlib.TestCase): func = staticmethod(mitogen.parent.selinux_hybrid_tty_create_child) From 29c63f56aeeb079033ccadbe4ac75bb5d37a9eb2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 21 Jul 2019 20:38:32 +0100 Subject: [PATCH 117/383] import release-notes script. --- scripts/release-notes.py | 48 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 scripts/release-notes.py diff --git a/scripts/release-notes.py b/scripts/release-notes.py new file mode 100644 index 00000000..1444d7a3 --- /dev/null +++ b/scripts/release-notes.py @@ -0,0 +1,48 @@ +# coding=UTF-8 + +# Generate the fragment used to make email release announcements +# usage: release-notes.py 0.2.6 + +import os +import sys +import urllib +import lxml.html + +import subprocess + + +response = urllib.urlopen('https://mitogen.networkgenomics.com/changelog.html') +tree = lxml.html.parse(response) + +prefix = 'v' + sys.argv[1].replace('.', '-') + +for elem in tree.getroot().cssselect('div.section[id]'): + if elem.attrib['id'].startswith(prefix): + break +else: + print('cant find') + + + +for child in tree.getroot().cssselect('body > *'): + child.getparent().remove(child) + +body, = tree.getroot().cssselect('body') +body.append(elem) + +proc = subprocess.Popen( + args=['w3m', '-T', 'text/html', '-dump', '-cols', '72'], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, +) + +stdout, _ = proc.communicate(input=(lxml.html.tostring(tree))) +stdout = stdout.decode('UTF-8') +stdout = stdout.translate({ + ord(u'¶'): None, + ord(u'•'): ord(u'*'), + ord(u'’'): ord(u"'"), + ord(u'“'): ord(u'"'), + ord(u'”'): ord(u'"'), +}) +print(stdout) From 2e371c96b4813bbd5c92f9678b9476b045f5d40e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 18:01:52 +0100 Subject: [PATCH 118/383] Add extra/ to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 55f37f29..df5e70f1 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ venvs/** MANIFEST build/ dist/ +extra/ docs/_build/ htmlcov/ *.egg-info From 8e9f47a2e95387258f0b24bb88ad1b8d08bc26f0 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 18:03:03 +0100 Subject: [PATCH 119/383] Add tests/ansible/.*.pid to gitignore (for ansible_mitogen/process.py) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index df5e70f1..aa75f691 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ MANIFEST build/ dist/ extra/ +tests/ansible/.*.pid docs/_build/ htmlcov/ *.egg-info From 4b0870aa6e4842e6839a2494de034348e0487202 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 18:09:16 +0100 Subject: [PATCH 120/383] [stream-refactor] fix Process constructor invocation --- mitogen/fork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/fork.py b/mitogen/fork.py index a247fd5b..2d82eb79 100644 --- a/mitogen/fork.py +++ b/mitogen/fork.py @@ -185,7 +185,7 @@ class Connection(mitogen.parent.Connection): pid = os.fork() if pid: childfp.close() - return Process(pid, parentfp) + return Process(pid, stdin=parentfp, stdout=parentfp) else: parentfp.close() self._wrap_child_main(childfp) From 1fb3852fa63f4fb374f74c3b8d13c7eef29f6882 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 18:12:41 +0100 Subject: [PATCH 121/383] [stream-refactor] fix crash when no stderr present. --- mitogen/parent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index c39be011..cd9810c6 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1486,7 +1486,7 @@ class Connection(object): self, self.proc.pid, self.proc.stdin.fileno(), self.proc.stdout.fileno(), - self.proc.stderr.fileno()) + self.proc.stderr and self.proc.stderr.fileno()) self.latch = mitogen.core.Latch() self._router.broker.defer(self._async_connect) From 26b63337876aa150b577d25153fd6262bccbc840 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 18:21:53 +0100 Subject: [PATCH 122/383] [stream-refactor] fix unix.Listener construction --- ansible_mitogen/process.py | 2 +- mitogen/unix.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index e4e61e8b..251acf30 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -308,7 +308,7 @@ class MuxProcess(object): self._setup_responder(self.router.responder) mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown) mitogen.core.listen(self.broker, 'exit', self.on_broker_exit) - self.listener = mitogen.unix.Listener( + self.listener = mitogen.unix.Listener.build_stream( router=self.router, path=self.unix_listener_path, backlog=C.DEFAULT_FORKS, diff --git a/mitogen/unix.py b/mitogen/unix.py index f7e56435..3434fab2 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -118,9 +118,9 @@ class Listener(mitogen.core.Protocol): raise def on_shutdown(self, broker): - broker.stop_receive(self) + broker.stop_receive(self.stream) self._unlink_socket() - self.receive_side.close() + self.stream.receive_side.close() def on_accept_client(self, sock): sock.setblocking(True) From 1069ca43d60d8f4e3a1dc8e0d4a270ee202989a9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 19:34:08 +0100 Subject: [PATCH 123/383] [stream-refactor] port mitogen.buildah, added to master since work began --- mitogen/buildah.py | 44 +++++++++++++++++++++---------------------- tests/buildah_test.py | 2 +- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/mitogen/buildah.py b/mitogen/buildah.py index eec415f3..f850234d 100644 --- a/mitogen/buildah.py +++ b/mitogen/buildah.py @@ -37,37 +37,37 @@ import mitogen.parent LOG = logging.getLogger(__name__) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False - +class Options(mitogen.parent.Options): container = None username = None buildah_path = 'buildah' - # TODO: better way of capturing errors such as "No such container." - create_child_args = { - 'merge_stdio': True - } - - def construct(self, container=None, - buildah_path=None, username=None, - **kwargs): - assert container or image - super(Stream, self).construct(**kwargs) - if container: - self.container = container + def __init__(self, container=None, buildah_path=None, username=None, + **kwargs): + super(Options, self).__init__(**kwargs) + assert container is not None + self.container = container if buildah_path: self.buildah_path = buildah_path if username: self.username = username + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + def _get_name(self): - return u'buildah.' + self.container + return u'buildah.' + self.options.container def get_boot_command(self): - args = [] - if self.username: - args += ['--user=' + self.username] - bits = [self.buildah_path, 'run'] + args + ['--', self.container] - - return bits + super(Stream, self).get_boot_command() + args = [self.options.buildah_path, 'run'] + if self.options.username: + args += ['--user=' + self.options.username] + args += ['--', self.options.container] + return args + super(Connection, self).get_boot_command() diff --git a/tests/buildah_test.py b/tests/buildah_test.py index dad2534f..874205cd 100644 --- a/tests/buildah_test.py +++ b/tests/buildah_test.py @@ -21,7 +21,7 @@ class ConstructorTest(testlib.RouterMixin, testlib.TestCase): self.assertEquals(argv[1], 'run') self.assertEquals(argv[2], '--') self.assertEquals(argv[3], 'container_name') - self.assertEquals(argv[4], stream.python_path) + self.assertEquals(argv[4], stream.conn.options.python_path) if __name__ == '__main__': From 869e04af10482ce998002ea5aa2b7f88a8e4ab71 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 20:38:14 +0100 Subject: [PATCH 124/383] [stream-refactor] import fd_check.py used by create_child_test --- tests/data/fd_check.py | 56 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100755 tests/data/fd_check.py diff --git a/tests/data/fd_check.py b/tests/data/fd_check.py new file mode 100755 index 00000000..b933c9c7 --- /dev/null +++ b/tests/data/fd_check.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +import fcntl +import os +import sys + + +def ttyname(fd): + try: + t = os.ttyname(fd) + if hasattr(t, 'decode'): + t = t.decode() + return t + except OSError: + return None + + +def controlling_tty(): + try: + fp = open('/dev/tty') + try: + return ttyname(fp.fileno()) + finally: + fp.close() + except (IOError, OSError): + return None + + +fd = int(sys.argv[2]) +st = os.fstat(fd) + +if sys.argv[3] == 'write': + os.write(fd, u'TEST'.encode()) + buf = u'' +else: + buf = os.read(fd, 4).decode() + +open(sys.argv[1], 'w').write(repr({ + 'buf': buf, + 'flags': fcntl.fcntl(fd, fcntl.F_GETFL), + 'st_mode': st.st_mode, + 'st_dev': st.st_dev, + 'st_ino': st.st_ino, + 'ttyname': ttyname(fd), + 'controlling_tty': controlling_tty(), +})) + +open('/tmp/foo', 'w').write(repr({ + 'buf': buf, + 'flags': fcntl.fcntl(fd, fcntl.F_GETFL), + 'st_mode': st.st_mode, + 'st_dev': st.st_dev, + 'st_ino': st.st_ino, + 'ttyname': ttyname(fd), + 'controlling_tty': controlling_tty(), +})) From d411003b6407ad890da9581cc219b8388ef1dbdc Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 21:06:21 +0100 Subject: [PATCH 125/383] [stream-refactor] dont doubly log last partial line --- mitogen/parent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mitogen/parent.py b/mitogen/parent.py index cd9810c6..c5d534b2 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1026,6 +1026,7 @@ class LineLoggingProtocolMixin(object): return mitogen.core.to_text(s) def on_line_received(self, line): + self.logged_partial = None self.logged_lines.append(line) self.logged_lines[:] = self.logged_lines[-100:] return super(LineLoggingProtocolMixin, self).on_line_received(line) From 8769c3ce240f482c00b36222309b909e9c2a1c99 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 21:06:35 +0100 Subject: [PATCH 126/383] [stream-refactor] more readable log string format --- mitogen/parent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index c5d534b2..363d53d0 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1483,7 +1483,7 @@ class Connection(object): LOG.debug('%r.connect()', self) self.context = context self.proc = self.start_child() - LOG.debug('%r.connect(): pid:%r stdin:%r stdout:%r diag:%r', + LOG.debug('%r.connect(): pid:%r stdin:%r stdout:%r stderr:%r', self, self.proc.pid, self.proc.stdin.fileno(), self.proc.stdout.fileno(), From 0ff5fb8fc4f60a42b9bf9e2be7e364d084016c5a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 21:06:53 +0100 Subject: [PATCH 127/383] [stream-refactor] fix su_test failure (issue #363) --- mitogen/su.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mitogen/su.py b/mitogen/su.py index 59574f3f..5e9a237a 100644 --- a/mitogen/su.py +++ b/mitogen/su.py @@ -102,10 +102,9 @@ class SetupBootstrapProtocol(mitogen.parent.BootstrapProtocol): self.password_sent = True def _on_password_incorrect(self, line, match): - if self.password_sent: - self.stream.conn._fail_connection( - PasswordError(password_incorrect_msg) - ) + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) class Options(mitogen.parent.Options): From 2ce3383a01b0b974c3f9f6c7609a7f72ee4abf88 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 21:32:22 +0100 Subject: [PATCH 128/383] [stream-refactor] make trusty our Travis dist. They updated to xenial recently, and it no longer supports Py2.6. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index eae04cb0..f8bd4a14 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,5 @@ sudo: required +dist: trusty notifications: email: false From 7c4621a01094b90f2be1e0aa76e551504780ba6a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 22 Jul 2019 21:35:44 +0100 Subject: [PATCH 129/383] [stream-refactor] make syntax 2.4 compatible --- mitogen/core.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index 6c6f1ec2..5fe01812 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3217,7 +3217,8 @@ class Broker(object): fire(self, 'shutdown') self._broker_shutdown() - except Exception as e: + except Exception: + e = sys.exc_info()[1] LOG.exception('broker crashed') syslog.syslog(syslog.LOG_ERR, 'broker crashed: %s' % (e,)) syslog.closelog() # prevent test 'fd leak'. From 6e33de7cd269feb3bf5d587c3d2b9ef493f73133 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 14:01:57 +0100 Subject: [PATCH 130/383] unix: ensure mitogen.context_id is reset when client disconnects To ensure a test process can successfully recreate an Ansible MuxProcess, reset fork-inherited globals during disconnection. There is basically no good place for this. Per the comments on #91, it would be far better if the context's identity was tied to its router, rather than some global variable. --- mitogen/unix.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/mitogen/unix.py b/mitogen/unix.py index 3434fab2..7be4a464 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -172,6 +172,7 @@ def _connect(path, broker, sock): stream.receive_side = side stream.name = u'unix_listener.%d' % (pid,) + mitogen.core.listen(stream, 'disconnect', _cleanup) mitogen.core.listen(router.broker, 'shutdown', lambda: router.disconnect_stream(stream)) @@ -179,6 +180,7 @@ def _connect(path, broker, sock): router.register(context, stream) return router, context + def connect(path, broker=None): LOG.debug('unix.connect(path=%r)', path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) @@ -187,3 +189,14 @@ def connect(path, broker=None): except: sock.close() raise + + +def _cleanup(): + """ + Reset mitogen.context_id and friends when our connection to the parent is + lost. Per comments on #91, these globals need to move to the Router so + fix-ups like this become unnecessary. + """ + mitogen.context_id = 0 + mitogen.parent_id = None + mitogen.parent_ids = [] From 300f8b2ff910a57e63b0d05f7332c29350623d15 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 14:04:22 +0100 Subject: [PATCH 131/383] ansible: fixturize creation of MuxProcess This relies on the previous commit resetting global variables. Update clean_shutdown() to handle duplicate calls, due to tests repeatedly installing it. --- ansible_mitogen/process.py | 26 ++++++++--- tests/ansible/tests/connection_test.py | 61 ++++++++++++++------------ tests/testlib.py | 4 ++ 3 files changed, 58 insertions(+), 33 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 251acf30..a8827cb1 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -79,8 +79,15 @@ def clean_shutdown(sock): MuxProcess, debug logs may appear on the user's terminal *after* the prompt has been printed. """ - sock.shutdown(socket.SHUT_WR) + try: + sock.shutdown(socket.SHUT_WR) + except socket.error: + # Already closed. This is possible when tests are running. + LOG.debug('clean_shutdown: ignoring duplicate call') + return + sock.recv(1) + sock.close() def getenv_int(key, default=0): @@ -154,8 +161,15 @@ class MuxProcess(object): #: forked WorkerProcesses to contact the MuxProcess unix_listener_path = None - #: Singleton. - _instance = None + @classmethod + def _reset(cls): + """ + Used to clean up in unit tests. + """ + assert cls.worker_sock is not None + cls.worker_sock.close() + cls.worker_sock = None + os.waitpid(cls.worker_pid, 0) @classmethod def start(cls, _init_logging=True): @@ -178,7 +192,7 @@ class MuxProcess(object): mitogen.utils.setup_gil() cls.unix_listener_path = mitogen.unix.make_socket_path() cls.worker_sock, cls.child_sock = socket.socketpair() - atexit.register(lambda: clean_shutdown(cls.worker_sock)) + atexit.register(clean_shutdown, cls.worker_sock) mitogen.core.set_cloexec(cls.worker_sock.fileno()) mitogen.core.set_cloexec(cls.child_sock.fileno()) @@ -189,8 +203,8 @@ class MuxProcess(object): ansible_mitogen.logging.setup() cls.original_env = dict(os.environ) - cls.child_pid = os.fork() - if cls.child_pid: + cls.worker_pid = os.fork() + if cls.worker_pid: save_pid('controller') ansible_mitogen.logging.set_process_name('top') ansible_mitogen.affinity.policy.assign_controller() diff --git a/tests/ansible/tests/connection_test.py b/tests/ansible/tests/connection_test.py index 401cbe9e..d663ecc5 100644 --- a/tests/ansible/tests/connection_test.py +++ b/tests/ansible/tests/connection_test.py @@ -13,42 +13,29 @@ import ansible.errors import ansible.playbook.play_context import mitogen.core +import mitogen.utils + import ansible_mitogen.connection import ansible_mitogen.plugins.connection.mitogen_local import ansible_mitogen.process -import testlib - -LOGGER_NAME = ansible_mitogen.target.LOG.name - - -# TODO: fixtureize -import mitogen.utils -mitogen.utils.log_to_file() -ansible_mitogen.process.MuxProcess.start(_init_logging=False) - - -class OptionalIntTest(unittest2.TestCase): - func = staticmethod(ansible_mitogen.connection.optional_int) - - def test_already_int(self): - self.assertEquals(0, self.func(0)) - self.assertEquals(1, self.func(1)) - self.assertEquals(-1, self.func(-1)) +import testlib - def test_is_string(self): - self.assertEquals(0, self.func("0")) - self.assertEquals(1, self.func("1")) - self.assertEquals(-1, self.func("-1")) - def test_is_none(self): - self.assertEquals(None, self.func(None)) +class MuxProcessMixin(object): + @classmethod + def setUpClass(cls): + #mitogen.utils.log_to_file() + ansible_mitogen.process.MuxProcess.start(_init_logging=False) + super(MuxProcessMixin, cls).setUpClass() - def test_is_junk(self): - self.assertEquals(None, self.func({1:2})) + @classmethod + def tearDownClass(cls): + super(MuxProcessMixin, cls).tearDownClass() + ansible_mitogen.process.MuxProcess._reset() -class ConnectionMixin(object): +class ConnectionMixin(MuxProcessMixin): klass = ansible_mitogen.plugins.connection.mitogen_local.Connection def make_connection(self): @@ -70,6 +57,26 @@ class ConnectionMixin(object): super(ConnectionMixin, self).tearDown() +class OptionalIntTest(unittest2.TestCase): + func = staticmethod(ansible_mitogen.connection.optional_int) + + def test_already_int(self): + self.assertEquals(0, self.func(0)) + self.assertEquals(1, self.func(1)) + self.assertEquals(-1, self.func(-1)) + + def test_is_string(self): + self.assertEquals(0, self.func("0")) + self.assertEquals(1, self.func("1")) + self.assertEquals(-1, self.func("-1")) + + def test_is_none(self): + self.assertEquals(None, self.func(None)) + + def test_is_junk(self): + self.assertEquals(None, self.func({1:2})) + + class PutDataTest(ConnectionMixin, unittest2.TestCase): def test_out_path(self): path = tempfile.mktemp(prefix='mitotest') diff --git a/tests/testlib.py b/tests/testlib.py index 7328ce09..762d0c6a 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -351,6 +351,10 @@ class TestCase(unittest2.TestCase): assert 0, "%s failed to reap subprocess %d (status %d)." % ( self, pid, status ) + + print() + print('Children of unit test process:') + os.system('ps uww --ppid ' + str(os.getpid())) assert 0, "%s leaked still-running subprocesses." % (self,) def tearDown(self): From 1aceacf89eca333174edf64cb14343d26b43c84d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 14:07:00 +0100 Subject: [PATCH 132/383] [stream-refactor] replace old detach_popen() reference --- ansible_mitogen/affinity.py | 6 +++--- tests/ansible/tests/affinity_test.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible_mitogen/affinity.py b/ansible_mitogen/affinity.py index 09a6acee..94539e21 100644 --- a/ansible_mitogen/affinity.py +++ b/ansible_mitogen/affinity.py @@ -177,9 +177,9 @@ class FixedPolicy(Policy): cores, before reusing the second hyperthread of an existing core. A hook is installed that causes :meth:`reset` to run in the child of any - process created with :func:`mitogen.parent.detach_popen`, ensuring - CPU-intensive children like SSH are not forced to share the same core as - the (otherwise potentially very busy) parent. + process created with :func:`mitogen.parent.popen`, ensuring CPU-intensive + children like SSH are not forced to share the same core as the (otherwise + potentially very busy) parent. """ def __init__(self, cpu_count=None): #: For tests. diff --git a/tests/ansible/tests/affinity_test.py b/tests/ansible/tests/affinity_test.py index 8ee96085..641455bd 100644 --- a/tests/ansible/tests/affinity_test.py +++ b/tests/ansible/tests/affinity_test.py @@ -199,10 +199,10 @@ class LinuxPolicyTest(testlib.TestCase): self.policy._set_cpu(3) my_cpu = self._get_cpus() - pid = mitogen.parent.detach_popen( + proc = mitogen.parent.popen( args=['cp', '/proc/self/status', tf.name] ) - os.waitpid(pid, 0) + proc.wait() his_cpu = self._get_cpus(tf.name) self.assertNotEquals(my_cpu, his_cpu) From 93abbcaf7a833d50a4a7c3fe133747d3dd4f6152 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 14:13:59 +0100 Subject: [PATCH 133/383] [stream-refactor] fix crash in runner/forking_active.yml --- mitogen/master.py | 2 +- mitogen/parent.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mitogen/master.py b/mitogen/master.py index b5365693..71c6085b 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -989,7 +989,7 @@ class ModuleResponder(object): def _send_forward_module(self, stream, context, fullname): if stream.protocol.remote_id != context.context_id: - stream.send( + stream.protocol._send( mitogen.core.Message( data=b('%s\x00%s' % (context.context_id, fullname)), handle=mitogen.core.FORWARD_MODULE, diff --git a/mitogen/parent.py b/mitogen/parent.py index 363d53d0..a5702ec6 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2360,7 +2360,7 @@ class ModuleForwarder(object): self, fullname, context_id, stream.protocol.remote_id) self._send_module_and_related(stream, fullname) if stream.protocol.remote_id != context_id: - stream._send( + stream.protocol._send( mitogen.core.Message( data=msg.data, handle=mitogen.core.FORWARD_MODULE, From 1d2bfc28da20ad3abc423223634d742d570be1db Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 14:17:03 +0100 Subject: [PATCH 134/383] [stream-refactor] fix crash in detach() / during async/multiple_items_loop.yml --- mitogen/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index 5fe01812..6df6ac54 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3418,7 +3418,7 @@ class ExternalContext(object): self.parent.send_await(Message(handle=DETACHING)) LOG.info('Detaching from %r; parent is %s', stream, self.parent) for x in range(20): - pending = self.broker.defer_sync(lambda: stream.pending_bytes()) + pending = self.broker.defer_sync(stream.protocol.pending_bytes) if not pending: break time.sleep(0.05) From 4eecc080477160dddaae7bc9967a815c919fd66b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 15:29:15 +0100 Subject: [PATCH 135/383] [stream-refactor] merge stdout+stderr when reporting EofError Fixes sudo regression --- mitogen/parent.py | 36 +++++++++++++++++++++++++++--------- mitogen/sudo.py | 4 +--- 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index a5702ec6..ce4d3c63 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1021,13 +1021,9 @@ class LineLoggingProtocolMixin(object): self.logged_lines = [] self.logged_partial = None - def get_history(self): - s = b('\n').join(self.logged_lines) + (self.logged_partial or b('')) - return mitogen.core.to_text(s) - def on_line_received(self, line): self.logged_partial = None - self.logged_lines.append(line) + self.logged_lines.append((time.time(), line)) self.logged_lines[:] = self.logged_lines[-100:] return super(LineLoggingProtocolMixin, self).on_line_received(line) @@ -1035,8 +1031,25 @@ class LineLoggingProtocolMixin(object): self.logged_partial = line return super(LineLoggingProtocolMixin, self).on_partial_line_received(line) + def on_disconnect(self, broker): + if self.logged_partial: + self.logged_lines.append((time.time(), self.logged_partial)) + self.logged_partial = None + super(LineLoggingProtocolMixin, self).on_disconnect(broker) + + +def get_history(streams): + history = [] + for stream in streams: + if stream: + history.extend(getattr(stream.protocol, 'logged_lines', [])) + history.sort() -class RegexProtocol(mitogen.core.DelimitedProtocol): + s = b('\n').join(h[1] for h in history) + return mitogen.core.to_text(s) + + +class RegexProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): """ Implement a delimited protocol where messages matching a set of regular expressions are dispatched to individual handler methods. Input is @@ -1055,6 +1068,7 @@ class RegexProtocol(mitogen.core.DelimitedProtocol): PARTIAL_PATTERNS = [] def on_line_received(self, line): + super(RegexProtocol, self).on_line_received(line) for pattern, func in self.PATTERNS: match = pattern.search(line) if match is not None: @@ -1067,6 +1081,7 @@ class RegexProtocol(mitogen.core.DelimitedProtocol): self.stream.name, line.decode('utf-8', 'replace')) def on_partial_line_received(self, line): + super(RegexProtocol, self).on_partial_line_received(line) LOG.debug('%s: (partial): %s', self.stream.name, line.decode('utf-8', 'replace')) for pattern, func in self.PARTIAL_PATTERNS: @@ -1081,7 +1096,7 @@ class RegexProtocol(mitogen.core.DelimitedProtocol): self.stream.name, line.decode('utf-8', 'replace')) -class BootstrapProtocol(LineLoggingProtocolMixin, RegexProtocol): +class BootstrapProtocol(RegexProtocol): """ Respond to stdout of a child during bootstrap. Wait for EC0_MARKER to be written by the first stage to indicate it can receive the bootstrap, then @@ -1124,7 +1139,7 @@ class BootstrapProtocol(LineLoggingProtocolMixin, RegexProtocol): ] -class LogProtocol(mitogen.core.DelimitedProtocol): +class LogProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): """ For "hybrid TTY/socketpair" mode, after connection setup a spare TTY master FD exists that cannot be closed, and to which SSH or sudo may continue @@ -1136,6 +1151,7 @@ class LogProtocol(mitogen.core.DelimitedProtocol): written to it. """ def on_line_received(self, line): + super(LogProtocol, self).on_line_received(line) LOG.info(u'%s: %s', self.stream.name, line.decode('utf-8', 'replace')) @@ -1425,7 +1441,9 @@ class Connection(object): if not self.timer.cancelled: self.timer.cancel() self._fail_connection(EofError( - self.eof_error_msg + self.stream.protocol.get_history() + self.eof_error_msg + get_history( + [self.stream, self.stderr_stream] + ) )) self.proc._async_reap(self, self._router) diff --git a/mitogen/sudo.py b/mitogen/sudo.py index fac02c57..725e6aff 100644 --- a/mitogen/sudo.py +++ b/mitogen/sudo.py @@ -266,6 +266,4 @@ class Connection(mitogen.parent.Connection): if self.options.selinux_type: bits += ['-t', self.options.selinux_type] - bits = bits + ['--'] + super(Connection, self).get_boot_command() - LOG.debug('sudo command line: %r', bits) - return bits + return bits + ['--'] + super(Connection, self).get_boot_command() From 402dba4197fcddc6f0ca6a5299e7c9a63f555f78 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 16:04:44 +0100 Subject: [PATCH 136/383] module_finder: pass raw file to compile() Newer Ansibles have e.g. UTF-8 present in apt.py. --- ansible_mitogen/module_finder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible_mitogen/module_finder.py b/ansible_mitogen/module_finder.py index 633e3cad..89aa2beb 100644 --- a/ansible_mitogen/module_finder.py +++ b/ansible_mitogen/module_finder.py @@ -57,7 +57,7 @@ def get_code(module): """ Compile and return a Module's code object. """ - fp = open(module.path) + fp = open(module.path, 'rb') try: return compile(fp.read(), str(module.name), 'exec') finally: From b1379e6f4501e5701bb9c83d8d15ceb66bae80d8 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 17:24:59 +0100 Subject: [PATCH 137/383] [stream-refactor] send MITO002 earlier Prevents 2.4 bootstrap from attempting to fetch os_fork too early. Connection(None).connect(): pid:25098 stdin:81 stdout:81 stderr:79 ssh.localhost:2201: (partial): mitogen__has_sudo_nopw@localhost's password: ssh.localhost:2201: (password prompt): mitogen__has_sudo_nopw@localhost's password: ssh.localhost:2201: (unrecognized): mitogen__has_sudo_nopw@localhost's password: BootstrapProtocol(ssh.localhost:2201): first stage started succcessfully BootstrapProtocol(ssh.localhost:2201): first stage received bootstrap ssh.localhost:2201: (partial): MIdmitogen.os_fork ssh.localhost:2201: (unrecognized partial): MIdmitogen.os_fork ssh.localhost:2201: failing connection due to TimeoutError(u'Failed to setup connection after 10.00 seconds',) --- mitogen/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index 6df6ac54..18b5283c 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3599,9 +3599,9 @@ class ExternalContext(object): socket.gethostname()) _v and LOG.debug('Recovered sys.executable: %r', sys.executable) - self.broker._py24_25_compat() if self.config.get('send_ec2', True): self.stream.transmit_side.write(b('MITO002\n')) + self.broker._py24_25_compat() self.log_handler.uncork() self.dispatcher.run() _v and LOG.debug('ExternalContext.main() normal exit') From 321dac304658c8eb2a919e138466f7d020c6ce94 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 17:25:53 +0100 Subject: [PATCH 138/383] Allow specifying -vvv to debops_tests. --- .ci/debops_common_tests.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.ci/debops_common_tests.py b/.ci/debops_common_tests.py index b0e2e4e8..8b35de1e 100755 --- a/.ci/debops_common_tests.py +++ b/.ci/debops_common_tests.py @@ -3,6 +3,7 @@ from __future__ import print_function import os import shutil +import sys import ci_lib @@ -68,8 +69,8 @@ with ci_lib.Fold('job_setup'): with ci_lib.Fold('first_run'): - ci_lib.run('debops common') + ci_lib.run('debops common %s', ' '.join(sys.argv[1:])) with ci_lib.Fold('second_run'): - ci_lib.run('debops common') + ci_lib.run('debops common %s', ' '.join(sys.argv[1:])) From bd80d4b0af5bd03b53ff0f6c90548dd9774a3248 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 17:30:46 +0100 Subject: [PATCH 139/383] [stream-refactor] avoid os.wait3() for Py2.4. --- tests/testlib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testlib.py b/tests/testlib.py index 762d0c6a..e0ab827d 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -343,7 +343,7 @@ class TestCase(unittest2.TestCase): def _teardown_check_zombies(self): try: - pid, status, ru = os.wait3(os.WNOHANG) + pid, status = os.waitpid(0, os.WNOHANG) except OSError: return # ECHILD From c09bbdc2f94d6fab3a239ad881478f7a41fa124f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 18:33:35 +0100 Subject: [PATCH 140/383] [stream-refactor] fix 2.4 syntax error. --- mitogen/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/ssh.py b/mitogen/ssh.py index 2ba83ed4..b4c247c1 100644 --- a/mitogen/ssh.py +++ b/mitogen/ssh.py @@ -92,7 +92,7 @@ PERMDENIED_PATTERN = re.compile( re.I ) -DEBUG_PATTERN = re.compile(b'^debug[123]:') +DEBUG_PATTERN = re.compile(b('^debug[123]:')) class PasswordError(mitogen.core.StreamError): From fdf3484a2a828bb18c3dd940345ad063a6c4d979 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 18:50:32 +0100 Subject: [PATCH 141/383] [stream-refactor] 3.x socket.send() requires bytes --- mitogen/fork.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mitogen/fork.py b/mitogen/fork.py index 2d82eb79..e2075fc3 100644 --- a/mitogen/fork.py +++ b/mitogen/fork.py @@ -38,6 +38,7 @@ import traceback import mitogen.core import mitogen.parent +from mitogen.core import b LOG = logging.getLogger('mitogen') @@ -212,7 +213,7 @@ class Connection(mitogen.parent.Connection): self.options.on_fork() mitogen.core.set_block(childfp.fileno()) - childfp.send('MITO002\n') + childfp.send(b('MITO002\n')) # Expected by the ExternalContext.main(). os.dup2(childfp.fileno(), 1) From f0065d76d8019bb5e460ba975e255da8b1f69351 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 23 Jul 2019 19:45:48 +0100 Subject: [PATCH 142/383] [stream-refactor] add descriptive task names to _container_prep --- tests/image_prep/_container_setup.yml | 79 +++++++++++++++++---------- 1 file changed, 51 insertions(+), 28 deletions(-) diff --git a/tests/image_prep/_container_setup.yml b/tests/image_prep/_container_setup.yml index f2a5c5ff..9662d498 100644 --- a/tests/image_prep/_container_setup.yml +++ b/tests/image_prep/_container_setup.yml @@ -46,84 +46,101 @@ - when: ansible_virtualization_type != "docker" meta: end_play - - apt: + - name: Ensure requisite Debian packages are installed + apt: name: "{{packages.common + packages[distro][ver]}}" state: installed update_cache: true when: distro == "Debian" - - yum: + - name: Ensure requisite Red Hat packaed are installed + yum: name: "{{packages.common + packages[distro][ver]}}" state: installed update_cache: true when: distro == "CentOS" - - command: apt-get clean + - name: Clean up apt cache + command: apt-get clean when: distro == "Debian" - - command: yum clean all - when: distro == "CentOS" - - - shell: rm -rf {{item}}/* + - name: Clean up apt package lists + shell: rm -rf {{item}}/* with_items: - /var/cache/apt - /var/lib/apt/lists + when: distro == "Debian" - - copy: + - name: Clean up yum cache + command: yum clean all + when: distro == "CentOS" + + - name: Enable UTF-8 locale on Debian + copy: dest: /etc/locale.gen content: | en_US.UTF-8 UTF-8 fr_FR.UTF-8 UTF-8 when: distro == "Debian" - - shell: locale-gen + - name: Generate UTF-8 locale on Debian + shell: locale-gen when: distro == "Debian" - - unarchive: + - name: Install prebuilt 'doas' binary on Debian + unarchive: dest: / src: ../data/docker/doas-debian.tar.gz when: distro == "Debian" - - file: + - name: Make prebuilt 'doas' binary executable on Debian + file: path: /usr/local/bin/doas mode: 'u=rwxs,go=rx' owner: root group: root when: distro == "Debian" - - copy: + - name: Install doas.conf on Debian + copy: dest: /etc/doas.conf content: | permit :mitogen__group permit :root when: distro == "Debian" - # Vanilla Ansible needs simplejson on CentOS 5. - - shell: mkdir -p /usr/lib/python2.4/site-packages/simplejson/ + - name: Vanilla Ansible needs simplejson on CentOS 5. + shell: mkdir -p /usr/lib/python2.4/site-packages/simplejson/ when: distro == "CentOS" and ver == "5" - - synchronize: + - name: Vanilla Ansible needs simplejson on CentOS 5. + synchronize: dest: /usr/lib/python2.4/site-packages/simplejson/ src: ../../ansible_mitogen/compat/simplejson/ when: distro == "CentOS" and ver == "5" - - user: + - name: Set root user password and shell + user: name: root password: "{{ 'rootpassword' | password_hash('sha256') }}" shell: /bin/bash - - file: + - name: Ensure /var/run/sshd exists + file: path: /var/run/sshd state: directory - - command: ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key + - name: Generate SSH host key + command: ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key args: creates: /etc/ssh/ssh_host_rsa_key - - group: + - name: Ensure correct sudo group exists + group: name: "{{sudo_group[distro]}}" - - copy: + - name: Ensure /etc/sentinel exists + copy: dest: /etc/sentinel content: | i-am-mitogen-test-docker-image @@ -138,7 +155,8 @@ path: /etc/sudoers.d mode: 'u=rwx,go=' - - blockinfile: + - name: Install test-related sudo rules + blockinfile: path: /etc/sudoers block: | # https://www.toofishes.net/blog/trouble-sudoers-or-last-entry-wins/ @@ -150,31 +168,36 @@ Defaults>mitogen__require_tty requiretty Defaults>mitogen__require_tty_pw_required requiretty,targetpw - # Prevent permission denied errors. - - file: + - name: Prevent permission denied errors. + file: path: /etc/sudoers.d/README state: absent - - lineinfile: + - name: Install CentOS wheel sudo rule + lineinfile: path: /etc/sudoers line: "%wheel ALL=(ALL) ALL" when: distro == "CentOS" - - lineinfile: + - name: Enable SSH banner + lineinfile: path: /etc/ssh/sshd_config line: Banner /etc/ssh/banner.txt - - lineinfile: + - name: Allow remote SSH root login + lineinfile: path: /etc/ssh/sshd_config line: PermitRootLogin yes regexp: '.*PermitRootLogin.*' - - lineinfile: + - name: Allow remote SSH root login + lineinfile: path: /etc/pam.d/sshd regexp: '.*session.*required.*pam_loginuid.so' line: session optional pam_loginuid.so - - copy: + - name: Install convenience script for running an straced Python + copy: mode: 'u+rwx,go=rx' dest: /usr/local/bin/pywrap content: | From 07f3179e580277ea483b4815319f25171cac8e62 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 24 Jul 2019 15:14:08 +0100 Subject: [PATCH 143/383] [stream-refactor] Py2.4 compat fix for iter_split_test. --- tests/iter_split_test.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/iter_split_test.py b/tests/iter_split_test.py index 6178ef0c..ee5e97d9 100644 --- a/tests/iter_split_test.py +++ b/tests/iter_split_test.py @@ -6,6 +6,12 @@ import mitogen.core import testlib +try: + next +except NameError: + def next(it): + return it.next() + class IterSplitTest(unittest2.TestCase): func = staticmethod(mitogen.core.iter_split) From 2ba3973bc5df401d770d4d813adb786716cf6d9d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 24 Jul 2019 15:39:34 +0100 Subject: [PATCH 144/383] Pin idna==2.7 when running on Python<2.7. --- tests/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/requirements.txt b/tests/requirements.txt index 327f563a..bbcdc7cc 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -13,3 +13,5 @@ unittest2==1.1.0 # Fix InsecurePlatformWarning while creating py26 tox environment # https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings urllib3[secure]; python_version < '2.7.9' +# Last idna compatible with Python 2.6 was idna 2.7. +idna==2.7; python_version < '2.7' From 2f950b3bda40ae717ce86b93f69a3070e44bf8b8 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 24 Jul 2019 23:11:44 +0100 Subject: [PATCH 145/383] [stream-refactor] allow doas_test to succeed on CentOS Unlike on Debian, some environment variables that tickle getpass.getuser() are being inherited. So use getuid() instead. Also install the doas binary on CentOS. CI was changed (I believe) to shrink the configuration matrix, and now these tests run on CentOS too. --- tests/doas_test.py | 3 +-- tests/image_prep/README.md | 3 +++ tests/image_prep/_container_setup.yml | 9 +++------ 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/tests/doas_test.py b/tests/doas_test.py index 43284367..560ada99 100644 --- a/tests/doas_test.py +++ b/tests/doas_test.py @@ -1,5 +1,4 @@ -import getpass import os import mitogen @@ -59,7 +58,7 @@ class DoasTest(testlib.DockerMixin, testlib.TestCase): password='has_sudo_password', ) context = self.router.su(via=ssh, password='rootpassword') - self.assertEquals('root', context.call(getpass.getuser)) + self.assertEquals(0, context.call(os.getuid)) if __name__ == '__main__': diff --git a/tests/image_prep/README.md b/tests/image_prep/README.md index d275672f..a970b319 100644 --- a/tests/image_prep/README.md +++ b/tests/image_prep/README.md @@ -11,10 +11,13 @@ code, the OS X config just has the user accounts. See ../README.md for a (mostly) description of the accounts created. + ## Building the containers ``./build_docker_images.sh`` +Requires Ansible 2.3.x.x in order to target CentOS 5 + ## Preparing an OS X box diff --git a/tests/image_prep/_container_setup.yml b/tests/image_prep/_container_setup.yml index 9662d498..9d001f48 100644 --- a/tests/image_prep/_container_setup.yml +++ b/tests/image_prep/_container_setup.yml @@ -87,27 +87,24 @@ shell: locale-gen when: distro == "Debian" - - name: Install prebuilt 'doas' binary on Debian + - name: Install prebuilt 'doas' binary unarchive: dest: / src: ../data/docker/doas-debian.tar.gz - when: distro == "Debian" - - name: Make prebuilt 'doas' binary executable on Debian + - name: Make prebuilt 'doas' binary executable file: path: /usr/local/bin/doas mode: 'u=rwxs,go=rx' owner: root group: root - when: distro == "Debian" - - name: Install doas.conf on Debian + - name: Install doas.conf copy: dest: /etc/doas.conf content: | permit :mitogen__group permit :root - when: distro == "Debian" - name: Vanilla Ansible needs simplejson on CentOS 5. shell: mkdir -p /usr/lib/python2.4/site-packages/simplejson/ From bed59311946870dc2191eb291f7314e3a6ebb67e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 24 Jul 2019 23:22:26 +0100 Subject: [PATCH 146/383] [stream-refactor] remove one more getuser() usage --- tests/su_test.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/su_test.py b/tests/su_test.py index 447b00fc..320f9cef 100644 --- a/tests/su_test.py +++ b/tests/su_test.py @@ -1,5 +1,4 @@ -import getpass import os import mitogen @@ -67,7 +66,7 @@ class SuTest(testlib.DockerMixin, testlib.TestCase): password='has_sudo_password', ) context = self.router.su(via=ssh, password='rootpassword') - self.assertEquals('root', context.call(getpass.getuser)) + self.assertEquals(0, context.call(os.getuid)) if __name__ == '__main__': From 11ae6f38731d877f832979a5ee1baffe5ac63c9e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 24 Jul 2019 23:22:39 +0100 Subject: [PATCH 147/383] core: better Side attribute docstrings --- mitogen/core.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 18b5283c..d6fa8922 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1745,9 +1745,13 @@ class Side(object): def __init__(self, stream, fp, cloexec=True, keep_alive=True, blocking=False): #: The :class:`Stream` for which this is a read or write side. self.stream = stream - #: Integer file descriptor to perform IO on, or :data:`None` if - #: :meth:`close` has been called. + # File or socket object responsible for the lifetime of its underlying + # file descriptor. self.fp = fp + #: Integer file descriptor to perform IO on, or :data:`None` if + #: :meth:`close` has been called. This is saved separately from the + #: file object, since fileno() cannot be called on it after it has been + #: closed. self.fd = fp.fileno() #: If :data:`True`, causes presence of this side in #: :class:`Broker`'s active reader set to defer shutdown until the From 054643783c2e8428d7faac9879d6b7ed065a7d9b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 25 Jul 2019 11:50:55 +0100 Subject: [PATCH 148/383] .travis.yml: Add reverse shell spawn for Travis too --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index f8bd4a14..24525bb1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,6 +21,7 @@ install: - .ci/${MODE}_install.py script: +- .ci/spawn_reverse_shell.py - .ci/${MODE}_tests.py From 856dfcebcdd583998b63727f0cb3f84d7d008a70 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 25 Jul 2019 12:02:00 +0100 Subject: [PATCH 149/383] [stream-refactor] another 2.4 fix for create_child_test --- tests/create_child_test.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/create_child_test.py b/tests/create_child_test.py index 0e8855f7..91ab4365 100644 --- a/tests/create_child_test.py +++ b/tests/create_child_test.py @@ -96,9 +96,8 @@ class CreateChildTest(StdinSockMixin, StdoutSockMixin, testlib.TestCase): class CreateChildMergedTest(StdinSockMixin, StdoutSockMixin, testlib.TestCase): def func(self, *args, **kwargs): - return mitogen.parent.create_child( - *args, merge_stdio=True, **kwargs - ) + kwargs['merge_stdio'] = True + return mitogen.parent.create_child(*args, **kwargs) def test_stderr(self): proc, info, buf = run_fd_check(self.func, 2, 'write', From db9066fbfb00d03850c31aefefa273d7412ec68b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 25 Jul 2019 12:03:17 +0100 Subject: [PATCH 150/383] [stream-refactor] mark setns module as requiring Python >2.4 --- docs/api.rst | 3 +++ tests/setns_test.py | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/docs/api.rst b/docs/api.rst index db39ad99..917fc627 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -383,6 +383,9 @@ Connection Methods the root PID of a running Docker, LXC, LXD, or systemd-nspawn container. + The setns method depends on the built-in :mod:`ctypes` module, and thus + does not support Python 2.4. + A program is required only to find the root PID, after which management of the child Python interpreter is handled directly. diff --git a/tests/setns_test.py b/tests/setns_test.py index b8a75788..d48179b1 100644 --- a/tests/setns_test.py +++ b/tests/setns_test.py @@ -1,6 +1,7 @@ import os import socket +import sys import mitogen import mitogen.parent @@ -34,5 +35,12 @@ class DockerTest(testlib.DockerMixin, testlib.TestCase): via_setns.call(socket.gethostname), ) + +DockerTest = unittest2.skipIf( + condition=sys.version_info < (2, 5), + reason="mitogen.setns unsupported on Python <2.4" +)(DockerTest) + + if __name__ == '__main__': unittest2.main() From 54987100b2e74ff836c94df98e1f57dc43005172 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 27 Jul 2019 10:34:18 +0000 Subject: [PATCH 151/383] [stream-refactor] allow up to 30 seconds to connect in unix_test It reliably fails when running on a (intentionally) heavily loaded machine --- tests/unix_test.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/unix_test.py b/tests/unix_test.py index 4f4aab8a..cb8c08f5 100644 --- a/tests/unix_test.py +++ b/tests/unix_test.py @@ -86,11 +86,12 @@ class ClientTest(testlib.TestCase): def _try_connect(self, path): # give server a chance to setup listener - for x in range(10): + timeout = time.time() + 30.0 + while True: try: return mitogen.unix.connect(path) except socket.error: - if x == 9: + if time.time() > timeout: raise time.sleep(0.1) From 65e31f63fe381f180331163a25cf7a0134fee144 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 25 Jul 2019 13:19:30 +0100 Subject: [PATCH 152/383] [stream-refactor] fix Py2.4 failure by implementing missing Timer method --- mitogen/parent.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mitogen/parent.py b/mitogen/parent.py index ce4d3c63..375c5622 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -508,12 +508,18 @@ class Timer(object): self.when = when self.func = func + def __repr__(self): + return 'Timer(%r, %r)' % (self.when, self.func) + def __eq__(self, other): return self.when == other.when def __lt__(self, other): return self.when < other.when + def __le__(self, other): + return self.when <= other.when + def cancel(self): """ Cancel this event. If it has not yet executed, it will not execute From cd0a557602495914ed59e43af5d762f605932a6a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 25 Jul 2019 13:54:02 +0100 Subject: [PATCH 153/383] [stream-refactor] yet another 2.4 issue in create_child_test --- tests/create_child_test.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/create_child_test.py b/tests/create_child_test.py index 91ab4365..21591fe8 100644 --- a/tests/create_child_test.py +++ b/tests/create_child_test.py @@ -116,9 +116,8 @@ class CreateChildMergedTest(StdinSockMixin, StdoutSockMixin, class CreateChildStderrPipeTest(StdinSockMixin, StdoutSockMixin, testlib.TestCase): def func(self, *args, **kwargs): - return mitogen.parent.create_child( - *args, stderr_pipe=True, **kwargs - ) + kwargs['stderr_pipe'] = True + return mitogen.parent.create_child(*args, **kwargs) def test_stderr(self): proc, info, buf = run_fd_check(self.func, 2, 'write', From 462c4ff59fa0b4fbeafe288ed276f21e2b404a7e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 25 Jul 2019 13:54:41 +0100 Subject: [PATCH 154/383] [stream-refactor] stop writing to /tmp/foo in fd_check.py. --- tests/data/fd_check.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/tests/data/fd_check.py b/tests/data/fd_check.py index b933c9c7..0a87a95e 100755 --- a/tests/data/fd_check.py +++ b/tests/data/fd_check.py @@ -44,13 +44,3 @@ open(sys.argv[1], 'w').write(repr({ 'ttyname': ttyname(fd), 'controlling_tty': controlling_tty(), })) - -open('/tmp/foo', 'w').write(repr({ - 'buf': buf, - 'flags': fcntl.fcntl(fd, fcntl.F_GETFL), - 'st_mode': st.st_mode, - 'st_dev': st.st_dev, - 'st_ino': st.st_ino, - 'ttyname': ttyname(fd), - 'controlling_tty': controlling_tty(), -})) From 5f7ab220cb066abb1a580aeeb13660624ec9ec46 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 25 Jul 2019 13:54:59 +0100 Subject: [PATCH 155/383] [stream-refactor] statically link doas binary using musl So it can run on CentOS 5 --- tests/data/docker/doas-debian.tar.gz | Bin 15371 -> 58712 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/tests/data/docker/doas-debian.tar.gz b/tests/data/docker/doas-debian.tar.gz index 9211fff25ca6e8a5edb6dac0eac5a4a1188b9bb6..2deb72ff8c0481d0851a7bb7ebc4e11e34ad8cec 100644 GIT binary patch literal 58712 zcmV(zK<2+6iwFQeojF|s1ME8scvRJycM?WMh|GY1k<}7jk_eQ9knjwS3j+sD)~Lkd zDk3ixDi3uAw1pL%Oy%zNw)w1Cqr22VWn~2|TSX(9I6P$H`e3ApX=`EXqTcMpG9b#x zYx@8HIro_ax7(Fnzt4OhbME=?|9+iw@41BTyLY7Y4gVFw-+XL`|3VP|yO++JbN9&q zPw_7*8ddyV{LgRv=Pq0_^8YgaB}JvhMJ1z4M`8R+$Bg+d{^vjb^Ow%K?>xu9c+}|9 zA~XKQV~QdEqe>tR@(RC;|2O{kV4!?zmdj<#v!zKA&bGMu+%Mpsr%fEcG*)uMZ(peo z;kwVk|HWFDZIq-u`o-A=@Gt)t9G?G+0k$z$FlCoWHo7c9ce|jwUEu3ur4XZ!9zlKm z$HDm*`~`mTiSY)Y>v9e~TZ9~a)UC`C*SejO1YA6R@-*IiMLpVM^YLVl&g3JWFWW6u zI-J`T{`Ji6CLQ>xOXte}W-nZ_FeuHQedppJ*)R0d?f0+e^O;Wg20o8JWryo>6Arj8 zS98F1f22BrOZ9fRF?Yc8CUX9H4*0Wv;eQAG*8*PXfWIu@V;%6<1>Em|?-p?8fWIl= zl@9p30zT6L|BZmJcEIbVaCnUazD~gF9q^|Ge5(WgjDSZS@XZ1qcfhv_c*5x)0Z%#o zBjD0{J3J=f`RhB$D+m107Cx?Zz?+`qV~qp8Nxxm@^v zLnm?_a7~x{OFKO05Ff`n;HzH{{YQHT%3D)QUDC{5Fwd0S@ZPEP_ zqb4)t7BSvOBMZ0X7DeTigLAZ3L%`r=_$i8NeMh+@1%b6$E{PR=#^g_#TJlWY0EvYU zr!>mgHElIuo6?oZUuB}Kj)hn5@JLMcyF$mAI%9{YD9U0}cCRH|KnSPvLVsdP7K=oK zePGsq#f41yk%!4EcdupPoxbFMvRE}ow@@URoMMWPMfL@AS@>lC6*)|~ABbMtkC-eR z^(7MwG>x)ICUhlUZDq=g!JC*eWpF*ff_+(LZ+L80=t4GZnUBdQl5XaTCqHI#nq^lH z_JIXV-ovtA9Gr`n<5I!?OsVv&`6!dgJO~0kKq#MN**@kPPtraVlu^l`Y?e6XU2kU}+wTMY%jGUeo}2uDsqP#ieV47-jSkEtAO;?$-sCA)-N#0QyOmO=T(lW{9VmK} zsbzl;_}cI80UEW;{Vxy!)r~_HD@Zf>WAIXs)iZ7kg8R$-0MIs{ z$z(v2yZ;G=T)r5Gf?d<)-E7CHY?gV8DegHaf84I+K8&U8z5%9JXL|2W9@dJ%MZkFP zg=s%4WNc1hYC;CRc`x6az*{vM#Zb^*paj|=K&gfo7T)ejYIK)x#M;QdxQhWUF2GA?(*5x${B(@v)7dH&$Jx9$*^WPEGgmWGb3NX8m-Oh$Gok|BWl%u6&fCX! ze3Wg-^G23IAUfAa=i_dm@>N2ZB%}2tDP7!gk}$cVWr`fwFS4YC#rT#YaGIFSh1T%? zGXo^0>a&Vsa?f*J}1dQ59wW~&M=VVYlQ`b?D^K@%oQ5WT@TdmQPAzN0(fJ;>_jG01CGXo>7pl$`-j$q5nYwZ zRV{Kw0^74#K+v}v>#xFr{2yeLx zEr-H9mZ=pfrRr_SsW|o$$f>I?AsMn;!LnoRB|)th)xSJB3yY9lWT6Rm?EE zm}kQzCf_b*+hXoNE2lT6Lc?*@=&>l|&dvmfDil`S)?A6DCnm4HkCT~v@;pr1mC(*R zjS^bZ=J!!vH6Nk8iXQ1GqY_8-jEajZunbX~Mv5A0jq&ZUO%w}-BWzLd^* zs|SLaVdCIw?Tfh)CaF@$r>(mSnA1Cs)-#+t*C(dUXr@r227sRs_=`>acQ`&F$dMKp zTA!eVrfbmS4_9(MdcH}VG|mYJDR?haR8m)|B{+Egh$Ro72GSu(uw<%mO`xH$n_&!cJRm+;WgYSbxwSLJfDB?nD8q6G4QR^;SCV4s^w9D6kQKGZ1uc6K z@1ff%>+rTZ*~PK7kN44jXx=XQG2$rXd8-v3z@@-KTQ`?-*28la2Yggj2tXwp8gJyP zKOf!r%r2tHBM={8A2*P88WxSF-$_|cj0LvJ2bsJUBY)9uS@+p=r4i#jMNv`za_$F3 z#p;6IMXugPPF5%zY1fMy?f;T*P55@=t`Xl*`@%1ElR$#)|-;o%UmsKR&)he1{B!%2np!UMp8PTvc1pQMLOip|_C>sX@_(?)nOO z$JKHgjs9HU&xIyKp(S{Lc+tMc7xM)}Y>W9HevG>hT{BCl3nr0*Bvl_UBosN@q|Uh7Xxh~>gL*( zWo4U232Cj2>GDyrZ6bT_K^<>07&P8*CLpNy;w2Q_T;f;D2bCAG$RbMeQiV3Y+Jm?8 zje8blB}YIZ!|jfEfNN>qA85b7Zw`MS3Dxd{=f*UXLn(Y*V@hQnZjtFl3(uTgjm8dH zyijZXD4Rbv(n}wFg+`PTn-I{vkx%#=o)o>Mf?YEUuS!e7@yHeGEu40RzK_>2f3A2v zqM4T|xO9n2ppCcQZ%Xn2`?xoECv~IFBLL)9im9V`uJK9NaE<&ts5%))6<`JHWwCj8!D;Q_;JInYcm zk$EWMtrxp39T4a*w$Xbd2OZTzrP;lGIl8`WRc7^TT&c{(+cFJti)HQHYRRT=O$xQ} zpftBm&o=AFQ7vzMO|?j-rNTsAj1g5$MD;vcs(ag7Ez&nvET34`7CI!haJH5T<%Kv4 z>@M1MhqWRQsZVyg>qvFH>#%h4|3M92ktu>ZYQT|{d zF!APn)|+x5niM;@K-9$IX%_m#%GEeS2H9EW`?wN~=W2K22)_SWS(i7GwJ@Mc{j{GdS;D611m)YFc!{RTD56ccRmAnvVt=05?41KX#)Q7od#{3?$s%@pL_chue3^3%S#gas+ zl-BFcC`e9S73aspr17DnnG8Kw+CTBY<7>+XBRN?Znh<0^K8l7O53f3I2gpV; z!WK(HpW2li?iA+Gva~ZTdpfKO9_Um_I=wdX#f;!kJxX{oZ7+!&Ym|6&_E~sZ67hBX z8eV(yX35+Xhk85TM!#CA_nDaX$rkZ4kM_8EJa1##UK=>ycv%9biX8UmqyO6Rp3Qli zJ;G;6o;+mzXcfoMU!gyw&*6GGT-O)}m6dcH+SqWa-3jIE_~Eto!c2Op)O?Cw8RF1^ zvF~2TPZtr1e^DZ4M#qSt0DAYDg>NwDxabt@6hH849Ysr>d;IbNl2VIM%cn4MB+d_{ zacC+Nuc-GvOKP=nwQzvm&d-lHa020btNDXqZHvi+73^ek91oaqvZfJ-4D5})5C4+m z5?oOUmmeC7-uwXuOsy3Mvm?=fZ4hg5=q8pO^s{G}i#AonP>ntA`yll;gMGMgg- ztl(@WgUcSFLpmZ;KEc*WFYUwRIRLvop${3mmJ;h z;cu?_xz&{;&za;0Ds0P9QzpZ0SbiEuD5^Wf%Z7YWTzN`G8&gNWqkq`O)QLTex0_FS z_4B<)R`Hjn?F23Wf#E=#i%w%bK>;JmuGt!?&$& zpZT%KV|r!|$->v?k5P1ugO}B(#K8-u;3gj2P1fuz+CQD_a&N591-fJ-+;L5 z)u2SkD{5`jOwwSQh#R;uI7q>(bWEkT!Nfd$oLDpnD`%S=DUbCDV|vjnIn`v%__+B*B7w`Ai32UHvT3ZlfcXjN5o(r}UY)Zg}Bp%M-ou+~rfGWQ6Xt;$=<&B%5&z--!&@D3r!-p=BAe#HE;mg8OCnX*p`iX^WL7?MoarJpu0 zz|Tgy2aB~Ii0dw)rO6=*S-|R;i)eHXZu+Wd0>PGFi))DGCRH^5imCw59TSER_`!eh zMcp%80m&P=7e88|^fH_^{@)FSn+imqS+bFT#*^gV0U)hz-*#GIkzJPfl!r7bk-PD; z<)50Qt%vcOR>Fn+mTO6qbk|_FaJp}BYU@TyfZCQMU-d|B`iS{K7TbgW-vZf{4MQ8E z9EVUL9~W^|M!N9(uPOL#5l<#HZt~jR&HN0-vw?q0#P1T)I6?~c)!v)Tqc9Mjn!=45 z#p$9yGxslmO;p+AIG$;eHZ3g^pg_?gL4&pe(kN&Htu&C(38sP;6a^m`iXczhqy^>C zI<0hwQFqmq-F01eU3b}CU%TsKK`luOeSjhqR30i76()p70c}fx&i|ZqCus`0pZ)#+ zzt8{sb(dyl?)%(x&pqef^I-LViqbc!a$(O016S%F_s2v|RlRuQPglT*ujuoUm2w(p8Hf+jNF{OD^P_i0DbrLlq-&w`zVe!w6q=-vFJ_ftP4yDy{FiOXtAeh<@(#AJ=_v! zG%_6;Hnx9G!m(x1*s^GBPs+cvMJ_v+p^9U)?B;Sn&MlLmf=Fr46{UMRvFWWyC%8Cv zD~c7pgxMrxG}rT(3wf5%A+!dHd|vrh3}mySkWGC&3fU}{aeB0s-*W#EvgH*FvTC`s zXgN327oC2ZY4xC`qI~vGsKT|fCwu74NcOSZ6@!7&+3ZF;4}QXP#gE1kEG)8+7a@%< zIVUed8soyZM=O3lTJa$wELbwCzm&&jn<^Irc|bGqOQ{PGAvTF|LBUQf<41{Z=EXn` z>1NJ5I8$S3rtmMU>zNT{zn`v50J2t=KywqG1evC@<>>6SMS3u zDsX#b9x~Pv7@`zlL`nEbYILKHsL?D2)LB3@sh+~3(q`12 z-J(w0?SI2j&7e`upivEwlX4?;NHuvF6-iU+e|nU(VSQT|GlP?2ed|hun>OJm56T)H zZA_(`iyWxEe0{9ocQxjy6;!UnHzY#es(KfW@SKN! zOUm1~_9``VNY4>OpI2f|KA0&`q|WfzUZsY`l=|$qmzUy(sDfei2u+S2^&|jyuJ*FnAY*n<{^2n3^E)O$jZPc%Yr*6a%gCJ79aXunP!4|G&1}d)M$akw8mlU>JRiv4BUV-H-`ymyO{>-Zl)&sNHSA_ zCy*s$H+RRv@(V%Q0pwGj{#(kn`|DV6M@CKct2%k?XUiuav;TySBSP@FzE`(!OJa?@ z6u7+0T2#A>_JPgW9zi-w+IXMJ&A*B5#>Sr+w^r>@Ym6G{f7c@wp{xW_ybENoMMW=L z51g);Xcgstayk}N<&1`@s+=am**MUFk;-Vt3zTS0 z6R0p>^~cMyB>nW*G;8~G{0<*NuTTTDRy_&gTll~=>c3dd)+wt>V!#p!*co`fK|mkq zUyedNEZBiQdO`|1u$n#M*DCgy29Hm%M^i`#0(>v%XLLsg(W{4i<)7kdqIw4NQEXPE zT10L~(j(@$4D;(EJ!J-b(o3W*i*#Oksgph8SEjrOy_8-uFqLhSsxQpi7Ug)cvmd`M zpu5ZjHQZjG6W+^joL=Yuh>II_rvk^oo7nfjtNstev{DIuks8xYKm5cT>E|jjt4X7=m zE6>K_%hO3!KQMCfZbadkY4L}`aghRXw0Hf`W71+!ZC^r-aX**xR$S}^X858L1WhLH z?%gQCKiaU5(!^H1*r~^H=iYcRGURCqXmhOVsX1aJJRz>fNB~$AER`NDH3UnIxU7^q z$hhbLxXT7`f{Uc+E=y^Nro>&AQp5EEN9^z-#u1?xAy2I#x*p!SO%pMcf&BD?=ZZ_1 zMNa93UQ@p9)+=jml=rtIIeL-ECH@ryafDj2`C>6_)Zm|Jl3`fy=4P_y+Q<&zMItDw z^>37nU=QN1$RpNqk&6p2 zjIt7mVU(5V%1cZQ8;D5^Wv}nNY!ir(UL#AtE|OlmTV)ja_ca^1-E3!KTf1h(UF>1V zS|N6WxglD2Y*`>**T&}hrXyyf#LuDof4rg_Z(fmU+>f}O>HofvSOm{#pZ>LlF}wZ? z3nRNe*WR=1C)2JEn?G)`Z1Y#QV6VxizX9tCaFYI-h2$B-+Zn;p9BpK;*78*^Xvi*l zA#owRPoF_g-nF6KT!a-7`~#0K7_K23jvS~(ZaN3EsP9^!GU0{)KjT3UJQ+Lyw;1EE zWg=`8yw|q>G>_%ESEN@?-St=GtK81o+hg&=trpMWyw}HmAKCsh?9Kd$@y`Te2Kma( zmvzX}Gaq<+h5IUk=?0i#`44Att-ZxiTzre^B@}q-;%7DkjXFAIH%!23n%$cw8nGd|nXBGwc%q6?|Om8cnxw%5^m$U1O)em+rYr31QX$jMvv-nd zXBNHrS=VWUTlF+f`E^Ca5<$jh$~f-Q4>31L_n-ll{&b;ASB}C18WRiTfuiRyY+Gim z^|$UP&J3C^M1=c~qfe+~z#ItdyAVA`P;YDZ$J|x2`=!Vwt=>A1T>$D9(Crr9ewuyy zhvun$yk{P%E1v$ zuSKeRxu?26;Z>m8JriHK6z$wQO>0L3+cp=dc3)}m3Wcf z=6D{2KWhhzo!9b}dGzTzzS4x{O6cuLzOp|(tmZ2d@$(!KT>GhA!u4f27aFdn5v~ zC6A~en+Fe}ZcVf!9-vk;9#Kh9B);=HPK5@pR(L__o@_jeEQ@HOJF)lY6s z-L9}xL^V%2)D?bUE6Sa6WeU@XWBKt%vl5SJ>4`w5_K6KIJgBGT!_Sjc5^Wh-MQ;^VF0QHy$K7A+cJ!|u6C*61|W-gN-P*sc3x=;TA<{*z! z1#Rkfd~yx#8I$wb-X9mU_s=_pYGDuKwHVrxr&#^~7{Hy53Pgs?=sPvfsUHBG5%|JxmG2=5>#7v zS6=Y}f_5Ug87OC7i+Ub|B4j4?Ry=|u?Yv44NBHPD#*3U^|N7M7nnoSYHqbXr1$hR4 z^47Ef{$`zzut=u!5wCPVsEdpw};u=M)tOVy}ircmQ%|$G_V!)%*KP>5O`me z3&+~}zC;hu(Y$C!2~v1zD$@CgpL(|?CvpLe2y)F+2wke8$q=c62GcuVdj-kMk$lgv zd;zkM2^cVkX*xnr=!&qn`hPH1Ho$ttoKCSfsbOnhI=z}kkbUh!uOvDJ`zuS*^iIO@ z$j`E*hm~GQDVS8klJYM0N+Q=fvshAV?<5|Ra#>ROrCv#?m~<^mGIjS#8i+{=EGf`C zX%KChclJivO!NlP$w+zU8kM5?@e9LOcJVv=Ikszc(q=>_ah;J1oNR^=Y<*T`z+33A z4Daw6h&X28#g}5%3i?ws@|&u7LXWl8^bmP(G65?m%5%leA!S0Ogbq~;)f~v_HOgPY zGGPTHc??(N*%i$b=?&xeGAPGOm;el)-_J+UQgxbvW1j0L9LXX)`UrnG?^SB`IY}Nq z@|eO`8I@=Y4TvB^@q%3|| z>1Rzr7S?>8X_v$<`p8<^y;k+}Am1=FqKuK`(+`_MuJ?BOsIa8xr{H!)&ehbreHG_- zyNHQFoR20D#=p-JG_eVU@2|21MCxb)p>!2Xz=aS^pk=U_CA7wtAWSc035e;@1j6(2 zECKO6nm|}SoFxQe69~r*t|*E0q%aTxbXHgQ+64-KVi)+Go!DFe)=lzIA373ZnV4I} zsffV%D$1#1XIxnl+XT2P_l!S@AJO*U(T6ID+S9h-G0M+fO)Jbts3FCvc%^s})dlo3 zN|xzM_XB+$fI03hOt2zLSsOQ&Qb>y znBb60`pe&6gE zFhS_L4 zr;Uz{%o^|&RoCZT_FAK&ru|#W8fbJh4Psx$voE_F9s93kUv6e!S{ohmwL~D&hbgEhIcy}@lQqfE8Y%uTUw{R#L1J~R2Jf^|s&FHGe+@9bno#032ChW4aQGpi zCgQ3c@@(`b%RdZOm3uu&kbfgAgYwmwolg3*gc`;HDr?p3+oRc2Sa#OC1Sp|p)&$GR z!|PC=;}uns=H#AR-Q@&Sqr7G?BM7}Sp`W8TarG-2k0AnMYUDBxyZZ)S&WfuHG?nrq33%b~75Q!Sezjc8x+%P7Wtp@Q{6>QL7Ou zwjo$&*8@O*F>$yiL$Kp*YKX3M48}k27qsh71+oybu z*6bY+s)0=$-( z%$8=GPuIuhOHY#Or0qdDDY!i)SjStAOJ7;)Ajx3U>Jo$7`*L;C{$N-eY)Y`mmi^pr zNe5&mr~`WrvmGM?!1JG^vo`766rU$`qb<0z zPjE+kzIN!DgLoH+N6^(zIl|G|ecLrQ4-^ zGhhqO-RuW!b8HXT9<)7FFEl0)fy_5NdNlE|#{A@}w;%nWG2d8xZfQ!y5C=Lq2JpU2nU`Hq$A!17Zl$r9uINJnR*?I+1%mi(K=7~5aQeqyBOaL;8B<7r%4{>7 z(mv{ubWXfrDji5}JzdgKs6|HCr()O?J}Mn#U$AHhLk++voYD`4IEV{^e^CaE!oPxR zuxm5QMJ2oRnk_gOOM7OQz@3)+dvMJk7Nk@1SHL=2lr7mYsXvq;UD66Ts>2c8AxT?3 z`B5QwJiEH^VSEoB&#Eq*%bqf-3m;)mrs_gBdrGS=oX4Jw)rCdu$xvN5UtWw22KY@g zw0&&VsXG4s8QLUU^$Z>7o1ry89(<0&XM8|iI~~Qy*o7ml$HnZFx^XpI0y7IY)8-(Z ztHsbiu(-*AynmH>lpv*=1)xi24G}t*)FB^#T%*ZFWWdiWt40LeVrM z4fGY(7wNRizZeL!bXcyvh=*fcE~y>g+TaanMjBz&VVZEe*(e|Am1!rvAy&wKvV%P= zujrNGZzyxaK#Z&!`8Sk$oE5naid3EST$|Uvo%1BASbZuSA1%J;q`VLcgb%81WU6hX z62?q`X)C!Zni;y6E{|uuPFDiqR+ryno&^aG3pCj-pSZ{he?gXpuo;N~qs`h_GBBop zHl$tM@Wmx{%l}kM{tj&dWwDjRrab+C&?dW3RrIiYANJB183BIz2jm69 zEfYd^oT+L-Zxz%|Hzxm*k~bp~Nirq#G1VpEO~Wd4y2jH#w$2BzXR-N?T!?^Q*)AtT zA?sqZfu|!#*eB8iUk>8Va&+77pJ6r^NUeZFP1ERbTB|d={Q8k!64K9uECo`~<)03_ zbCBNz``{W5R%B{6ziCxYcJPQXc+8O41z+RnLt>Xju}OP_$K!3{w-fW%r>EJg)2)Ki zD2R&A`#tSuVuRLY-Q)cy|Gviz`>mkV!LF(E43Ij4r{cw96K(5rta;U|j5mb8Yp`h< z-&3&-7ijHbO0Ha{6ivX<4a$sG`Rp!LS9h2`}M0+(mrqP!3=^}Z-}xllRK+LvQ zvHFFNqA!N(7i#IcpA0VJICPh7a`*_;Ik;>b6WPYv=zM>pVA)g;|4M_ zeb3w(<$|u8k6h53^u<=bT4VOMIF;>}aY0*=9U~?uS>=L~BV5pL_YoHq?^L;<of5Sw9s5Bozq_&r#w$81pdZ(LzORShvr5L+MZ$ONgC024G!WrC=s z!zwpIECez^2G3Q|%)|ttvOo_LG>`ZgVuFs!e^d+4{237;AmWAm<9@#{$_L4SMS&?1J_yoY--no>f8{gGvSJ2tKzBwsAXhSRKp_3^fUj}% zA#n#t`_dlbfW83^DA{1IPB!4Vh7L7&AOEzi zQkT>z?l6hxhwxMC>O;o?T>fdlr>{pIiv3!MBBXf2=$k0NyqwSh*Qjhvf#nGg=<$7a zy)U7@kbFrbYGfFf#?n$1SaCzG$)KvFNcHlk26C)%udjy@by)(NT`Z#Gbd^w=&i!5R z>t7yC3aWVJom0m`JN_v6bwC01qjFK9Aq^ouP!KPSz+jGOE;Hjx6NO3qR`6At$;z|} zuu=a>4rZ@3n<$=-{!S`wH&2J>BzX2_P^0p34DsRn5bvurvY8CEE6o(d$7(mP@J-;f)=xn-hAlZ`yl2BFu@D_CTPw{t&E7tO_@Nn&%%0^cY|J9s@{+k(&3@Eo>w`~G;7r;i}DK|)nv zCHI4%)B_l_-tQXqdKfERyj5(QPG@+9@`KMH)(#yr0=hh5XxsI}H6=-wfHDAg#PG@4 z^|)&Z?uMC#K&0vn$mB|Uozf)=4C%WWh3lE!iX#6D6(&y8Qvcefe1@JVu;MB}OaIXO zCk)qMFvo7ysNt|Y&`p-13+tEM*?IXmTBiE# zH*(&L@JAr!;s~bAr6m|oW8T3s>e42a=Vau1!{LVe*>b0STopuc^i_)2D5KdfJ3kKL zIam2a4RqNfJEaclleAZU>mk;geSnWy*xIii#=a_eZsCJRyMI)MQ);mJSH}r%Mc$dL zF28#3fbew8|3f#kk*S9PPZiuJi4TLBKVINIDSu30Sv8V#x<3bU-JlzpE7!ocNe%Xq zTqKgjVfp^`G{TSYxf>=yY*OxzA!0q?7X$Wsps}+KB5kmNkY$wiR3dd!>a3mKQ-Eqi z_d-?8o|JM7bG4T9b|}r^(}J%wBUBdAwq+CfE}~lK9YBnFM1oJQB@BDoJX}+nB#2=H zVi{nVB@lW!pwR&H2~eG$SAH}PmZ~|+Yn1j|nxQJ2CKvyBO#-Zkhhdw@!lM=94sf@} zuO@s-qe*e^by%7o1Ca?J*v;-|-H+hd1$XED_pKPrZ~1mWNk6)Mru9-I5ZRK68jIVe z&xW;fVpe-y9>yV+T;^==0BH}@R`CsXqT)PtWw-$R$isMAn~i-*qrP~sFCOfRC(@T4 zg1h4}3Y99=ZAJD?{$eh>64bNi7SszBil&Vb@6r^YmBldGKxxID3hn^)XfHT?II-wI z0NVM-EjfS{TtfQh*Z(mCX%*1X!?0D}igroBqzq*zfUp$h5%--Io?PaY@bOkM8TON$TltXmKu1;srtK5TwuL$w&_l z12YX{i+)_sK3-Ri2Fxdr>Qx31Tp2Xu1a241WS$G21kY`JCXiu1(`9~`&zxai&S$m<>Y@j^Iog7zs_zQn4pZ{w*%9if!1J z`u|({nC5+xvmV>Z}iSL~dpo z?J1||e7@xwA&i>@^dB#Yn}CZII@$$mXK|vyztbH`?}MulHkXTCio!1XErI*%RSIv! z7|laqVcI@Ka^bXoSu)5eUgXP+1>ASxwh!FWl<|Fs3 z(q}@)SHke^f^{dbDI*1b%6`G851BDG@oo%7?4aFWkmi^T^6Sq>S_LV=5~#P;>_FL-D0&64 z0Rg}cvGbr8BoDyX&v`xV+%EsPKVApqj<||TG{b2Hz9Qh5wsCybKJ}*H7$Un{_6czSN zAR+AmzQeqki}WnQjej|tapU373MLe3ytg{V@47)wml|A|$K=;A%9s|0k73WXZDO)n zjZLHOz=d+?E!z)vKU*`PwT%VPK&6 zL(0R5KN_O*MvZb^532 zuz-=^Y+E-Nfd4U`0ULXu4~!=rKNQ|a&`%HV^$qM8Gh9>E;_dJBr9uY_4dq8T&xBAa zrxsZ^u=sk+;5iID!lLVmx;zX&J(bd_3&(0ShIjb%w(CjT?mCzn{Wy4L)1$wMea5ir z!{AZ2!EkN91o(87C18^hD4WffPxB2ul}&VZbBt_eF#Ifm(3fl`cxP^N`VekNSI}}E zC|yODraVqlnu^En*4>0D{A2(>v4?;X%0(8q>v8hRI@u7Z>!-_aVdZB)dH4s!G-Rc6 z1MeROK;U1mEAC$%WH`O|NSly2aMCce?6)KNmlorUj*I{yYeWaoc<5Z!zfZ+N$VD} zd9OHz%%o6plzuY1<$v}CL?8%fTmj)hh|2)w7<7J@LdwQsI1p(h*GB+EliCDpTj^-B z5YgS{1gy_I;{@y##AePVX~@UClswplXX*YG#?Y?-*u7<@07F2$zidHTg`k1wir?>E@{fOno>ci*v=dA;-lFHq+h7`9>-%*HluQsdhU5jKAl3rG8NB< zf_Ty>H0BwNOo9Oxd_iX@3u_IYge~q9LA+!tO@|$A`$vj0Ohen6K?^e!JUOu9B<^cY z@e?JlI^^=_t{DomxA%LeZIPHBjtWLkPNxKnr+sl;Vfba zr2XskmZ3DqU*lcaP+BmOIz$T0 zxnHLbCq~F1_aRkYcnTGe6NS+qV3Jh^30quj&Hy=SfeE3(yhfldkN<%-V6pV5;so0- z%wyxEPEOTe^XV(8=7HYyo;CY;v>W=z(xd1)8y;nUCmm+-m6N*}nSHI3t*O@{;iIGj z zWya8ly;45|#B^oCLx=pkb|7xnZG6>lD94j&F5hJ1RKIS5OFEBBMvzv(I>u8l;HZ2n zjC9W=0d$tbzp*0)=?;U<7mu-*aplIVD|ZU6+-%g0&;iREzRBhrgh*0w3Gv4!4N|k+ zqrLQgl;4Uv>rwXucx!h$;NPso!$AHAxro27LYm2mXHlbr4BhHOc3gJS zMPhs9Gd~g%Rlh*{q}|+E0D@8*o;Hc!cf*ERg_`pPFfnZ|?oxqt$%U)e*4~I)va{k6 zh1WEp5}QF@Pp7STfPzL{xIy@*w}tAl;)?B<(aFe(%X*Tp{3SyZOMtM(m5F)}NLy}5 zVzkAZ4B5(-Sq_D;15^F!*#()GT+$+#?s{Vg*-hB?ZxSYGam$}TH2-+XFa+p}?KmQM z#k2Iu>Mn(A5cysb0(VEY{!ZBX1?$=36r8JPx^T|E4a2Hk(pWbcgo& zG{&Oc(n0h(jZTYuNVZ>J-4EEih|Uc_eP*E(a1#W3=@a?0vv_5=4LdoMyvu!!?LK`2 zo|3F^(1}w4em)Qho{67+eItv2Jpz}vbi9^RFPJLuyLG!aC2|%Z&l^oVUi6A_Z6@Qh zoKgp4I6qf8E&u304?xS_5qBzh zqr&^+dYhuy74jsNCew*xd!TK%$oHSEoTOTj@=a|!>>U1*Al1GZImvT6#_&FYPIo)lnBY+uPyPa6;QPd&d40AfR zoV-SF;{xFXn-ai7&I!QFc$@LBwAV8RAT|C2l=%3{8V2!{{=+pe2Yls58tf+ma)thJ zuQtGrHM_tam<)X!&sVL7RA5`;x$^AsD5wq`ikx91S&xNteZaO(DA24OOZAV(Gl!q1HU7f!cBUCd!jg)$SIoF@d1&xdB0lZjxp~mQ=G-o7xxw4K5)6z`x+R zH5w6Y%jmTRfcCiD#|kY9DHWWpxrt*}+GL;C#f}fkPmo7SlORq|ya__1mh1w=Ck=`h z2-;)=zFJO(ZwXWq7vS+;8~d8jam<*le8KBlt-wE88H@?r% zPaz2=0oB)B#4aJ{LpLgPjtIR4Vf4vuxY$OQ!jC-ZPD~%TXd5;Ysx>m1MXsKS7VJL` z0o-imJVRx17U@LgZNmY$cn1Jv+&HX;cbvl3mfP-`4hXk<2#UE-pBd7m0|ioNEqZX6 z4uY2@;DQ;*k@sdY-_KIL{G*KG*9E%-TL~br?L+eR09fkGyeOao#!-2(gL#WZ=?F*< zSMRj$+d^>xrX2K7R)h`Zt8~kgJV}DPUa4=e!?Q29zArE~?r$y4?DSOrSWmQc8iH3;4=)8k+qh7sRebp4DQf$+MzeuY>N#c_smUc%mN9rfNCPppo~;h_^Or+lmaq z?J}oJ2VwB@A%h)6RjEnfLYZ|i5f7%*7g+-Cr(M4}ZMeqdd6)`7_S+yk9gnL8>VfS$ zD3639;T&IXzrJM5dsV%P402eO0yvL;mf zxv5l5{a~nalM{z?$dgq&OuajKG@nwd1`nspVd-nwJmnbz5zFQ9aZ-cMqzNZ9=pTU> zjWl{X{N$B?Z?H?F_aqO;fbh8i_|?$rSV@dm?ksFdaz^|4_*B+U^`^;}@&c5QI>pum z{`FwVLmiD8DCfNwc24QXp6+L0_tV)*9K*(-41$W!V&{k!NvB`{<5`>EVbVa>W;(Qa z$w_TyAhiF_AMYkAF^ArCd(u*q9wlL)9mSL0dd4}}I)cc@c~WHi_jp1$h&c))M^wx@ zrJ;0?obi3+^biM$r@t`J_MlPPD}CY_i6cwH-7=L}Q{08+-=X0->LK8+naa2{Uyj-3 z{tUJ~!M(R8k@W|wlNx6~5Sna}AuYeD*~T@AA)`Gp zD1OuFy(8EfF9rRR$`hMbBtj!(pk#+qabjJ$*fp{2M#zx3o6@x8OB~X)YFb}PgA^Tk zdX&45;5^M>=gb5U#wN5$dmaNK-{G({(|uyy)*DI5bqc$~vwf$6kEMs${pks#Lu!Vy zw!5HYffU?23g^@vWYKlx&mC-`Lj@rXRKTv++h-QK+(!iWX;KSoLHvgrq23x6VFEAK z*_?g+Eu;P0c?ow6dD0PNOa^(AI_fnBgT~_xC(Ad{w+Fln!^csls0FVtk0~7;O0+_E z2NCbntj-lwr&GdeSB#|_(p16SdBr6ur1U9CbwUa%eAMcCQtr1i4hKk=;LF|FA6;N7 zc)$eo{?S2b6{r7af95a;xR6Hu8|q6v5iv1$1HPm3n~~YjsVAo1t5B={Ln3Uieu!R3 zn5)glumNAL8-5UV70)A#YSqMbhrS4;M1^ zz~xU#po}R7wXEUF&Ops|z`jb|7G*lovFWIwVUp9Hh@KE_%Z^l6(~qj#-0weCR|2V) z_Fk3kTp3##vR(z!F2s=nX*&`B$mZpg_Qh-fz^uqO*|8|mvV$nG9UaQ{<qL>CYpy5JdbE%NIP_Bcg@xddr7i1m%~_qQ_%@V5f0 zdBXgUXC{&cd8e9E;bJM4fQ_%^{g3Y!yA#Wj7X^}>N~7Oy*d3N7htXC(5u}GgI+nMU zujN31eu%R*Bxr!DIHh=yWY-%iQN7^&D6iJ3#0yFweA4M#t&yi-UwyZ3B^?r_LB9Sw zV!NFnuJM(4&QH~Otb&wg39KJb+2R>i-ZjcQut5vsC;)&La;RV^?G>cdprF);4>^4^ zaYE`SHd;7Ab$#gs5z}%=kQ{01M!9bio0NyE;A;ke@RP6ECH>_xhsr6VI&IWw&7Fya z*a);f8Rsbt=HDJcgz1roS@__nI*HE@Xb1-!K?-Zu)tqjBC0L(4(r{`V~?~R9Fao$>HQe3wDiXh~S<9)mV40=6$Imze7qDajHTHxzloS ze`3$Suc5Vj1b*Z*10x;v&{p=aP1R08i3Q$g(djBaEr@P+(`lOvKP1oo3Q1ZVU-=`B zr~uw@@M3lQeY$|PCtIUGxVhG8?A@36o5IuaAWJEL7H#QsffV|NUVh%{TRiwh@ ze~+nhApwz2Q_Itl6UcIrK_ooo;f^fGkKv7l)aa;EX5H_3HiW9lzGsLDvL=h(1jv{y zB_kjr&{@?QDkBX?zybM=9E7XWKh3~bdKEl{`7|^Kf0YKOd%M%xz(4mmGT2N<3}@d4 zQbIvRQSRVexYC|H|TUjyK`&M+r?j987eeJ!9)DnlKZkFSWD6j)CQjdjSQ zIQct27phJ=r7!GKmbY&>Rn~l|D0V3u;fXU>V`N5ZS!bfbAJ70aya#};X7n7GAa>P9^jZO9644izx8GLe zH03{jPY%=ClMqn`o=#$j(gLhKLzHnZVv2NlGv)pb7u^8)14O;m`83W(J%sg|objF; za+2`+L-6r@Z9nmNme`e8)?YxNxm!?aSmMzsLTf#8^Yx+UQZyQzea0)^t_~Tw`44VW zDIK)nHK1AfAZWco#UYjv;DqzV3apPQ;Q5qvga}l0E8HQ^p$qNFMk37T%@VMAki?@gTx|c|g_)YUij0t<-pSVa|(l@mOsJYW} z9c+cU=!vrIRaBKWB+=J7UoghIVMQ6;`&j~;&7_(uZ~Ywc-eb;SlBgYKtSO$(~QX7U|yPvEbjyo*vq%MjVVd$ivv53c!#{Zt_ntJt+ zC;T{m=^960Ko;HvXlCa6Mg*eC*=53i& zo-=a1cPA_19YXRZ)cP4+yRWFiqfw`n5RPYaL7O6NYjS~=PC0SL?NZxM@Kh@Qd#;wG zOzAVH^s%aUPgly)Vc?unN!_z%96MkH?AzRzDJP_(n+^a(WBH^@>lx^y7)z~H9FYc1fsR2oyk#h*{!HA3rVW4yvPCT(6bC{8u{uh z<{+Mbm)sP^>7#1CDRxPbzsmzm=FwdRno__o{~X5(oP0;4*^)+xrO56xbZed^Pu+(M zQ3_0b9(GR3B5_>4g+4Fygyc+mit`)}y`xbDgDsjoR?FJOj3`&XsOh=rQ4PNSmBMgh z3QpX;iDW*s+(sJL58|f{22%4#wW+rTI5~)cw-67#q9)r z{kpe_=_0g4O(5RBU%uxS_=ce}VAJ1TYLshRunD9FAv{8<#ltCP%_EPzg%AtHpmK`LqzMi#xM?j(oh5{tSRk3u`%WYXil8BfYbX$vIT znYrI(bp*;%&`3+;dBW*`fD12j(FrBpd^Wo9 zb;!@(LY{imad*f!7bBa#hY-tSl#lIU7?dpdikiYV*$AVlF>DImH5E05@`26+H;m7o zKJt5e5E3csNs7N{|Ln+|J%G28M=qF9r#O_;%~UzrRYuM2(Ic!=Eu_%tAD5w3V{C-N zH=~8(OdC!!iobhNOPr3YjKuTb&Xv0T(-ifrSbsVWH~lVQ?-9Y>ECfUGf?wCvCL|ig z+yvqtZWVJ|;_zWK^L~S;LpElY5Yf#wSXyfOQlA?2ug9qt6Cf1&{Dg{rFIoI&sKyLH zS1X#~Ne8Vr(&0)o33`mOjJlaI?R|j?gM8;l&30WFZ_uGNBWu{{zF3nIg~4$I`woim zkOSGE>^R&sv#@|5_@6YW?P|$cizJfm(RK^cpKu>wfV?kRT7aDY3ANd!-E{Bd&umeNMg5>rvWY502B8_l#J?C_ZQM7Siuwj&W_Lc z+FRw7H!&jAUw-6MMX?-GEi5oR`PMca1`|EhBh#j%RNB5pR=sVV$buy}QeEF!YDEL^ zup!4>%)c1$>#DFvlD4);&D4iw@}vsxOII9vVX?X1F*bUI3}lc)I;Q-V)}&%%`NujE zvjz8V`dZNkCCPRz;E0IP-=sw=gvQAa<4sb7=z|Z)2F(IF+kJ`7#cK!5fVPYg@Ocwh zt|Y*wVw{%T=>~E@3#^l0Ka4iN$$$p>ZW``nyyUu(J*Ih+8pUU@1yuoZP%<+?Bb{HO zpY^Ebi@{OXu8_}QE_bJD--VD{{wc0Ll50LIM4R&vu--5~qN2iazd>Mvu1?v@%uq@~b1yI{j1Rf8|+ zz6S%FYGxMNYEub97np))ezwc^XohZS|LZJpdc!Tsb4r~5M(nt+^Q zgO~wTJFVN0rA!KKkh|u zDc&T^_$zo0r#L_eA>HLZ(1ZK!QmcAS&Wymxj?VWob+cuKUjF4G=-UIDU|27H%Q3Cz z)AF80s8xu!qCJ*pE?D23ND}~eXNb{$c0{p56;*;t4)x)Ey(MkEc$Dm5B_=&>q zfpIy*QefA&?Wo3dk?{a&zNgg5?ITEE$oD;l^V{g~FI8;HE|>H@{~X%)uYUpWn*5WL z#NEK9REzk=reoD(IAWL`zUmvC!3Lcf$olu8eN0;qk-=C?9`!Q2WXlaIw;uGAIzieQ zSz>Vdr*H-CA8G8;X{WV;XrGJEZ*xg!UEGh3%pc`3H{w>&#%P^(GE7`E5;X5jM2hLc zgvrK|vCJi5p@H872tShpGLVy0?LZO7a})J$83wtz&gCmHVpSt28^YDtf6pQcBfQgT zJ>fC7YmJ^h1y$d95*r){K8NKy7%mKljrIA3kxM5zR=bZ)JDc==^5BIevr>VF;Mq5M z#cV`4|2xk|sV61!TFPkhy};1c13Uf)__&!EN-{P~e+ygoWeG{2_5Spq8!rX-rvTc3Qv03rRlzTXbl^eJ?w2 zAo;9-n{RMs!YpkI-{O?M4Bvzz#oz_Z#@uUU+9L4HrreW90Tp0(7dyGq6ldl>f%^=a z@O}~=255__{~hv|^Ql%#3)CreKu9(=w5u0N$6A18b1VXI9G15(V?`*d zHmMz}k6H&I2S_B>Ac#+xG@k2#)|-G3Wq=T00<6qrfjnEBx-Y8rsk9;@s@$VdWqM7xND)Qrz5s2>*{BMt-{x$Ou^&42z zsB0ESGHtu3KMMSGdO2;LdU)%!Zuc0_ittCqSYM`j8}-2unbN_e`-_A=Er!sC^X0!X zuMe%^6vmzd{QrpXk7Q4Y%}!)jEb6hH+UkyV>L5sa$aPC-^n5xV)c#;%s#eF%Bokja zXVRcloqHS}LIXz7x%CCPh3=)GZQyno4*?9%{R;#sP2Nos#Qxb)`pxY9{rar$QG!pC zcLo&&_>I1Gq)O})*>L&lb?VW@jz`E~7&tD^1UjA=hm6cC!+Yr!<0l)P3xAD4Mm6&1 z^O#LBG7NI_JSLqzI{XS{1adb>*h;zh(?9b^pZXaD_cEfIavvf<0TFadoe40uM!ePqR(@Tpqva zG+*Za2u*YYrF*2!iQD5 z1W1$u!qP0ke^)1LWo7*xz;RNRF;ECt_Ha7$LSAUdt6rdp*PV`L==)FjYcB+wSo zDDNB+(Q2e+96E8Dtk=n#m4yDf(cWb9c=aYg|0P{olBOKGbdZ=1UPGn%1$I;ebJJL9 zeo;MDk@pTJKz7o+N}ZdNRHlJP--)|~jx%CiR`sCp31BYkEderKi@i;}KKJ8~KTa-7 zUDVcI(SRI*_3YY&MS&!4cUWte0_>78{BeDIf{FZ3eD3XohnZ4iXlbXS)QjDN_;na) zRy>s`cK6}e&80)?l%x&FDC_<~?xlT6;`uRUDN>Ud1fgS+QA!FY3%*G}z0p2zszGpT zLZ~zGKjrgj$hPT!tPb+M_}}E?w`#&m|EFC1`3+6DjfvBEz1BUeA4l}f<7+__ zmG)-t+`_J-E~#E@D=y$W8np3K_6m3%`Mo?R9lzskL223+c8!3}`{Xp(#45u0A>3-? zH|@mA(%tB+p&HGhH_gw(4M^@?9)OS0yyUWOX1d>fRvPgrF}Hcqhd1&yJ8N$jF<0geR5AvaIgX!ZwY-GAWJf#yS@ zC*nLh#bsXLfIf)LCYyMEE?-p?#~8u91Nt9dCS%h?TYDpn%nsjd;`TW4LSKST*{wBr ze11S*3)wRFD@|yqs@vZj{*j88XOH$859~P5y8p=6bqAUcv>s><>%Oe}`ZxeH>p(NQ zUiV?o{EN-YTz*G#uydHD(IquxHVSS90e%SeiJm~0YSsY49R8W$UzRr!c%#AP&wsTa|0jss zleYjjw$|zX(Utk5;NB@@9v3pt3f4VqNPA)dnll|j2U2zZIV0m-nLulIKrwIInZ3yF7Qs~3ehPw&{V=K z#!TViRp#k9g-)?SN3PCn(v*u59qOA710H=H+TVQW_!BMO-RcDS0Ks54s~L)$=m(k| zznM_gzGj-(J=87*o&Gf!`8#*P#%jG(mgXA_^@yi+c>l)jwzN1q&Pn&`!rzhz?zkw3 zCw1XIHgU~GjW^LLp3*t3r^?zgcfHGn->M9FbU?}$_$LqDjEeS_7942D8M}WLX9?Wt zPyHPdx2kp?0ilu3kT$-J)XO^#h}|8$tI5)0+uLgM=b?)OR}J=aZzri2O0vt^Rx-?& zjV}Ly4Y+QV*=5@++qpVv`bC^z{+%zvU*c6YtR5Y#W*MZR1e-J{0wN1+O_fQ^4Akl6sQkMb|pS(>X3@h=6Xw&8oQ-H|f@gPSl)JhY4G z?@w)kCBXYptw68Ii_;L$NL%#R9p}-u@R_G|z_h@kB(7QPO!OoyYEQy8eO610)Y;Kq z)nb$OdV#mM=}`Hw@?!!!rflTA11h=^g5Cjw^^=mm?Y4`Y$8cyg4}4KL{(wIGOPGVU zJ)JgdeKGGHj8z?p<8ull33Ku@`-@q1KTsy71 zTzvkSE3uB?T!nVQu+~VE!M@*atzXm6CU#z2)d$dsvniiu)`#P5;^{bLXFDum17Gz^ zas_mXpj~RR`PN*lQaovvW&&~qtX_h2FnroBbqemig1cF4GKrn@y%{d)oG=ndD2omg zya02#Q}CxB5;AuRu-FCbUV)!>Fg%9NdT|B2_X{vGJePsN3EX}mvuQJx<%J+H;R1gr zP@}z7hu-D9Nl0aavKBUMep3K-vestSS@-hKy^kcw?@HsKJFFwPK)lybtPy)=lg%37 zpZg_bn(Wtcj;es2pVky!2n?yqKgawe>^&0fJzv``jkNv9+8@zG`D+2tGP4eFajgNm z*PzJ?pR_??@hz^{173pF0+Y9Fcyj-;VHwprQq177!?^rX0~ouxz6;hlrM)|>3~=Nmvi5TULtr~lqW zb|&Ezx9b2J@+2r2e!J?BH-YfQ^B}6XK7({+vH3|=&*qq%4!H_|LgZB)T9bqA3Vv^* z{P%MhmP#&-fb*PqA#qD0JFnpH+#6}d-6Dn)TV|4o|fawY8I7Z6= zRcWv|OnyomG|G#c$xDSOYxwH7k%q>MJL4R26qk+CdJ=Cw02b#auz5q60V7-N*z3dlbZlGosqC(P%CRNF9G1+cQB!5E-F6L!MRbpR(0_AelO4Iac zlvSEvig4Nyb1!@m)D_Tat>>TnrvY_#psbBMY%9NA!&i=>FKE-c-hly}OgSSw=}u+4 zOM-9V@5@(kBfNuPXNyBW%~0KPgxB)&>=B;tsN#amAE9nY7%OaWTDSAhO~b29w6C^H zVT?VWW2VK_E?1txlh2cU)gpyKeYu^lodw)tILijeST>}5MVuLS1Tmn+kwwIY6A;eq_?yGq(XzEuLiE?~*T z&%(Pvx)V47^Jj_Y6U!!2lUCFfOp9%|vGQdM-%0{FUpu{fDgGBAPCt*$L|K9nuNUXflZN@l2T9LBiy{Mv%h$sRn@e0Otxar*x;m?z@%_ z?x77w)psjR6`vp$**d-q4@5a)xt!Fd8s_rVC*swyJH=+jAssKzzKO3wa{-vG8&#hm zFwnT3f!J zxyLB=#qr=V2fFj2@k}$g+)0QvG$6aw)Ui7#53>Phel?9IGE{l&X`s?<`2zdl&rO%e zzQTuxRkcZC2sTnHzm6O!(jlSCyj%+Mwf%)f&Ev)6HdLPG&hwkb_VeGPh+PR~I;kxj zL$r~3U~jvHR>B$%ao z&j5RMop^*V@6vh=ARHZb@U`{Q@rolTf_7}j>E<2BZT{(7#je|6%;Y>h!5N0xsM|WVqKQ4dQkY7S`*a%Q5DwyHudZoD&%wrKVUvr&Is0^;Mf1Y z3J(}uU%B{#{h`X3W>B%FztFM&14eGx$MDVkmhbzp8vH{ag0jUo0GIC{IjQtV_X&U;vBL13D8b5CxR%MCsagn!L~3Rq$gMw=*l|DR(^!zs|KSZYk34B zgQKdMcgpCzneNblVTUhXLuCtJ^(&5@&SKsCrWRLb8_d7U+5`h@Vos0LfmM8uH9@a_ z55u)F4>bNasa$oXd8tt5X5?&EKg+jMZ4)d z?5wcC1tPz7H@^v;b3pb{|;Tbyhkfu{jp0sNPk=k*`=fMewa9^zUt7d`+4b| zPT+qQlmC{X4zUNO;l2(j6nQC-P5}nO&WN)=%Vnkbplcnx>yym=#D|W=gFL@(2tsVS z&AJUu((Jse)jnaKc`W~2G7TrkECp?nY?qD@dm*=k@xTBoq^Lgby>{taV3g(2$0^oN z7Jh~7DrbtNkdu*L2_v5Z&0sn(HFhaP2{1&tJj~qVlc(x-`PUO|ap+Y&^Br3n`V#e(zCNewmC=|7}C;7&fSG9X?BD(34K9ye%p+Q;zGdRW0B<}&H z_{%&f;rGrY&k0sk%qiD+!XfkkCU1TN_t!fJH$GKJtq2`60Z_}nReNj4xne!F1M8CI zu(Zps9mnx8ya9|Uv;2qAd5+@|-aH`VbIe&bzTnHyHsI|My*$l5sO8UPPJy`3p}yjG zaXZYw3gy4ZuxI`jJ@{W?zeYiZ0?`|9I@U*SrsBjb)$4q=sxSCCa?{yJ`QD8h6PV~a zqX0xb=Et#-V0rEh62>v4!x{N~)xTAU`0Xn7;sx(q$mykn?p$m>heoOwzcty)KUz=T zf99Ce*CvRqhi#HWV>#&*S3j)rW|Hxw)3;=nv(fSJbkah1%z}T}@Gk=i&%5lKN!<#e z6VNy41vYilk9cu%61KV=)arsyHf5#&HD^%dNyRC#ddU#B(P2`5v(;Zx-2!@wT?LLb zom3k(ns~>-#O=Tn*`y^J)Xz9A4|UK*34-r(zdS7V%>yPhVfL-f5Hc^2)SG`0o#0$V zZz9|SASbhHyh0~l+Bng8eh1y!z?`&c*qj*X2Ip_kgUEeN%vB9-|AK#tBIKeVTsjP$ z02}50uSd`n?~@)vTd@RYS=m_M9k~laPCm<(Sx=4}1nW8I+}AE?mlLFKn34tfIfJh7 zxA9;3EZa5}; z$1qpkLj*e51c50-(7wp75;>5=9?SQ|j%P#OhgNB9Uc*jvhP;svSGy2~?9sNvNAT_{h>>7!~yv!&HAxKcQofF#Nm=cHkrmcP}0{gb!g*D(h}f3ctw! zb?ec_H>70z}_}OU`4|=u$J83-* z2DK)t+P!Hq?TB1`cqP9{3-Va8wav3sQ0m+36TByT4`Fk!A#CXx!hXRAXx2E@pPDFG zKBfNenCegFg^n+T;hzeLU9PGYZ#)hr{26MwQ$vXWJUv>jw~=y&r5VS-Z%XZ(y&DFu z1Tm&DziD^o?)s?ChD?!){j)f{gPeyEYcq4#Ob{)`d(u9p@ z+Mr#%Y*#2^PjW<+2DJu4lO1%$OmQlP;TknzodAV=Bu(6YOAGcmLA8?=q?sBas$|O+ zrK)P%SMUwpz0bsK%02h15jdZwTI$OuXuQ18sQ>#uB9)LSoUMAuKWwLVpcO&VKL_a^ zW3;h>YvCOvkw$&7T2O7AA-AD8wS!DCJjs6o__1g24@gCVA?-Qg4Pdf8`adZ-67U60EP5%q|eH1Sx@Hu4V2KxG%zonQG}-bZ|8TBkBB$ zc1zVn^!|FWJFYCHq9JYu!Z+0MPt|y3%I|s^ulV#gePKd9IgZp;m+90wBz|Wd`jj3V z>xG);2xH5HzeY_}a*}(CC7}AqWiE2B`){0!LzhApV32-52i?fWR5jaX@Q$XLPN}W1 zrih||C{3QU0_a+jZ?N^Cclx>K!zXP%BTUihupYj9bZs^TxR$*i?I1C`+4dnm_!6x3 z-m575pA7qt+HwfD?N(kNZjF>Q&{he!a~nEW51oUKsE)1(PTwjN!ZVqvHERLQlvW(F zsKnJE^vcqD^d@1G)cdBpq(RaO7tli0;S`9I*FQmukLa z*SctEbWm`2dNWXsoYK8RE$uZAGbJEizi=|eU==o?Ry78wmfw0sQ5bpfO@!v;B~wUw zgqN@SZDikvd_$qvCqNkOQB7wm>1gAOgw?+;3s~9;<;6~AEnfiqM@8e}1Qd)LwO+H- zxp^LB{&mNBLM$ND{ovE{9DT9`V5@sEQ16?TW}_HAmKM8jTDspBZ}UH%V9#u}b+l!E zZfTL)-~oOb{1uO*pwZC=OSDV}lrRr=m>kG&X{jBjQB!oDxnh^Tb}SNsdwuz-k=FcA zC^E*+LS4o@{-p-7VQly|v3s2NW`9KqT1By-T^(nzifhC!tvA)THdP*q_z+Oeb}>I5)-W$Vfdg8f0Gm;gtwDE< zhBla-5uRjQweA|&y+U`vyJP(_4mM(BPDyvsR(@$DYz$NVE-P$RpCUVxA#dnFhP0lf z;Ppm-@ke>(-Gj=8Zyln^L;u?yC4&m84q;>;$n1cAT7$5|)T8?Qq4F+=$57ri%_%i{ zb?BGI#@7zUJ0ki*Zae*jig%E$0_3dMhiNox`Uw6zI2`%Na5{YjG*VVazmCSRaRQ8yUB3r$6T! zRAc-Yok45ozz3xESQNG143p!O@ZZ5I02O*&xU^l?ylwvgIe3Y{z->o zUG|EdGx>FBvj$A6;2-EOLyG>fx-Ujfw6>1KK=!|(Loj_7WC{HhV*#!Ngcv#qUPUFI zUpE&%RWwq_`#Ykkouz8(WJ*oI)X|g*Xf^a8rS{?1jiA)Nn94^}Ka8X%Q0i5fdWPKE z*iva_3VK|u!movn^G>NDM4mt0k0*yl;8}z-8HV#msG@cr(E@qj7r?HzQ=XNnK3lrj z8CT$cA8S}U7`20MzNjb#{@*W3K>d~i-f=FRp1T0DuNjD`sVsGgnrc616E8iw880pQ zs@vnp`hyz6-gG;q&qJ>feSh2#dRcwgOowZDLUI9*KK%Fm0A-wFcZ$Fl-~mpk1G{ZY zr`UW-z~ycWeNSJEN+8t0D$#{qLSsBr?isq(3lotKwOSN$rJ+wA|NF*@#_JJEJDExI zbC^LPy`@j()03dlkswE4Q*@~3wosQsYe~~QQtKitbcR(h1D|WD6KPf0Bk$DE6xPdH zq&H$9VWcfBe0Ag%IeRo6w9UY^+i!^(5ih|chbDaI)(ik#SidzB zeq4AvhbrveOy*k90F=P5Us<9CjQj?zO6HN9D4--;84rN=>Of%ahr@9|Rj<%;k8%qx z7H20^f1X9KLX$;whH_L+VP4|V;e$tuw#uZau3DeUrQ0L+-))g-U6y7`%T`2j`Mq}7 z!tWD1FQRcOyXm%f%KKhaln3j9tE^e9I$_)eqiw7yju_5*`qD8~O%4lJIH+b(#4NPt z@u+d_mrnO|qtmzC06dk`on3<-&oF-8IGqQ*ufm2To==gQJc%GoO-Oy#+g4A8*{aK* zA4%o+1@ATViQ4SFy1=~;jd|UBsXCWxF&aXgt2mFI2UOmLufoMIpCJvw1{p0g zY}N*EfB9DR%Qfta=WPle;a`l~(_*s`wx~4J4BU8gMEThNo*5fQx6u?g0I=c?^rI74 zV?iY)ovkL9RzoKWu;nmTVq0OC)U^lKdT!?5Z&sQs&Z7l!*^v+0=&eR{{1ha zPC6~Mlq`K)GcCfyVd;)vG6%~21?#t7v;&lL&XD&8!oP}btPn6}>g3m}lUrZ2fc-`~ zR1)pbpMI&@GCNhr0Tm~n=a}bZ(n7yGlT2!|1o4us)<#&Ce+oS>*4qT@agT9*@&J%m ztCLefXjAIKVc3pS6*Nr5a1`5~SI|Gvb1+cRz*8N z$%4B?C%7jH?m`2vm$r9&<+)CL(x?GWT9IdF!HQu+sAY-TS{iX+OW| z^v{>ud1W~RMsBy%31WAi_i10gT@Zr?%YMn$>9jUUcU|Py&1V7-W3Df-XhS1rJ&uPH z($3(wJWlt?if{2oz!DI@;g~iqx&Lf|E=;fmj~HyhR)Zt4%OQ0;_*%u0*a>U~=UIVT z!4=2o>Mya=Q3;}(QvvoDn-MSRJbIh5J^Ud$xxMJH?6$+4b=niR+kN(DI;TjwzWKN4 zlHoVWiCR+PdBIyHg@Ub#F!Y(zjiDfVIn;z%H zQzo(dp)wzUEcsTOcz&WC1m<-s0rfzVTKzlDJ zHi#A5!LOTy#g6KrSe!j^S6=mUPVg^}6RaJjx1mlMw5PE4_;rKvG?yziC5K@%31eV* z|5Ss`cel=EJzp{cvcAi*zGp;kzKP|lrx;9kCp*PDWB6T{{{`%t%Rd{0rVm_3yPtjaCTVUi`VGNPVccv zQsn@g5_D$d0Bt~$zw*D1kND|D1ddo|aEe`N-al?h*F+We=bm8Gl`i;GvSGSv<5i8O ziu%78F)o8i*kD_)ix*q7*6X-xN4k73-s1bcLoryuroqzg;1OL=juVeidc5Gz2VAT# zGpyJ3tDeGzXY!l!(dlup74TKoiLE*I#JcsV{i^Q)?7J7RZ!c!$s;9(-Zv^6}*4AtO7Xe|M-HCoHxZ0uZDq@EH+x5yui z4$w~Q98z|*%dcOw2+ve(<-kgL`joHHn!WnafQ#s`m8jTeW7N{}2NwOBW0BggLq~gW z5VDUu{TQ~leakhf!InA9MAKy3db>HUy3#y?E@<7?Y{764wKG-SwJ~9GRsQK?sx>(a zDBMtKUQ3qG0#3ZO)uVSWO?1KNv30tmZ_Riu_ zPFU1_Ek^7(e~c7$_!k?I-oWb0eH2?{zQ2yT{C}d5%y`T9yc(P#w~IQK_Zanw2^mtm ze0>*1afcnM;w)0|+z;>j-623b!K-&vDHNmFTJfP7FAOx}#@^yRy+w$GNIxc*P%pfP z)Nf={_Yi#p(VgEEKzQlUC5+%7J$oh#4bELYjsA6b-&3KG%~(OFsO0(cTh=7|@7?Lo z-!3W-^LN$(Ua%X^7Um%15FU??{;>j;0C>AEUk75yF<3lX(+N;5Hon=$H|15IcSzeE z(#N*FCvAIA_;vm9{?`AP&L)1xi{J1LX_qtjz0S#ff~V5o=%)AY;8{t3j!QI@Jm%`I7%NsZa9X z`y`9D#GPW7j;~ruOFlR6Nu)@)>W7YdDbizUx8(QEqNFeF(hr4#y##OepTZ?X9Yo3vl42O@S6a^C45yPXss z_J-EO7bgtBCVrDpz@1@Tclk$;T8)Y}e7?`+$0!!RdlGGzgN{D%3KfW_6C8;jW96Mt zhh1u>N2tQd?ZO(_M;trX1cc?GRY<;)|1r5w^18%N#0xO=hZS`2W`0^){B#~3LW!M5 zzUo4^qQoBHV5D@I#NhXOTg4HLW|aoxDVJ+7 z5|b8~mpzaPbsn=JQyNGT_b*fJ7)OAc1!Z@cFSk>TT&<3hks%TfjkGVHfaac+aEA{BC?fJe5t>h(&xoL zqjNBubix)q(Vt#*>=km+YXZK091O)nhGg-ZiO~eq)#>x%vnc!7CFthy;7Q1HJX!pH zB6fXJpJ#rBZtnU#^R@=|5WD4o0L#`U!7T3JE8oEg&`lw{jfUx)R!-9=l%q2|%Sju> zM6f9K>Y&u%koN371^8o=E=XtmWjdTLKu%noHgf9>lXpOA6u2|_zT{^eeUhKG`Gt!% zQHF(i+Mc)*$`kgT#{%$A;C2;oJ16^+U$ON`eh;uu(RqI*-Dr~@(EN<9#1HJAk;LH| zqL?0Y#`>8We{%owE=~}?LLZ=$s$E&IQFR);-{Yp8Mb7EE<;QR%UY~3(KmQP4buaA( zDI5qk1%A7kLy!6=UD7TazbPAVTOI_Nr8>v>)P8)`uK=ujTg70!Qwi7-w~Ob~y!%mU zwQ_YFE3LOQgIq$E1FZac`E#_?sk4ty9>iDO6DgkvnV=4=;0rv%HZ%Q=h~Gz}(?2DF z4!B7nG_CX) zKoH$vjDT^49smMCdgAIHQu8hDRf`fECMRmb_m!U?50Xo;ELEuC2qqexz`x$aKXoTZ z@L6T%^2(L*{L?q$_Wjm*091n=(8s?hPX7IQAXOI$^r&1 z3FOxXM<0z842G^+pPW=(K#!@grs$2sx4x0LICyI{21AC(2Q2`9th6h5%9z+DH3U!f z7mrTNTR-;ZylSrw-;;aZ^-=?INpZnb@#@!VM}lAHCv}@39qF&uA{C*WkT!aH7pvR7jg1N$UnJ&P7$4R@C3uLKb%T9&?vVa zCf?!zURm#B5li;R*po6=0QrD9W?(h!gE)SZYfv1BnZa+2iA^XB;a}GvtsvIvg5RVi zHc9rYE(&5-o_Dt|-v9yv{7d=AvpA9xkQH+OMvtTa@+|geQ8EnkwP#6;g$11^mpO$y?kFP@V|SfXhGlrX?*xj`0L;f;)P6&o%>g?PeO0TmGZ?Rd zH-~a01p|p(8)n-powd{r-(l0<;j*0N4q|$+t}j=|?UaI+J;Tpff;KUb>J;mZ+<{RlL3BxK$PIQwn^Fv}00M1(HDhu?}&ecJV zc$-+C9IP`^uDVHD7v^e`psSWWb}krfPUQB4P)4(DzL#RFLe`4&jdOH#T+-*y2g1*|h28Z8v9xI-t^;8BNQWJZ&S3)o2p_fuXf)V`^7g@*dL;x*y z+LjOuBwg9*^=QD}(UMq)6AV4K^$phXm-o73PvRa+kow&RpeqK`L+Aio3;3!99MuAH zY5_TCKOmJEq-n{vInupGDL*x`wu#+pkE*pL;AY4n%PmNQJ=wMo^JrUKmL@gYGPk*` z-HD{i3aqUj=(!|of=wwVh znJ3>4so`%?e@FZ_Aoy3OIsId=0j44vZ*j|4U{ndrGS=EM!ln>cSc9DWke@COyg7@Rn6Q`QGZnd*l*qE#RA>HQ1_Cb@&T4+Nx*h zIR5<^+9Z66<2TLF_Q5ADnnNJ(>)kaw;_ylhDOqdrB4u@w89v2#!Q)B!okL6u&;UHW zq2f{&WCAco0pA>d?IQ?Oeo%@?Dfeg_^aE}^0cUqzaa%p!c1*@$fH z9S4~u8~NYIkVCoa$SI3K3)0+Q(K5te%(?ti2R6i+`Ax9Ru7rPfT{bXYIoZg^95W<# z!q+(Zkl1NaK&%PM@hB+duTM4Ft5a`BB{^iqzsKVqK@4i0)-&FJ0xLGTVbWl{XM}|n z4-AUfss)bPU|X-#R8JjPJ!ufLTW1kFdWGaeSK0DrVll?epibr%J*{3P9+6+W$h2CA z3f5L{8pcn8v@O@s$~q>0_&Eh*PXM-KyPce^2*g~VPToXxEcMQ{AQ10BkMG&uq(*)9 zKTOP5ng7Y`nuF|iVKVxj_!lIgZj(q(H2fm-i9|orZsvOmD5rrrePoJ55B~!lAq|~N zKclDOkH0eEUDSK%I3x_O$0L(#aYc+=ogw&U!d|}J;FMN(3O=tver-Qa)e=qk0Mv9n zly#EN*VhF)RN3#6+=lz3TCXPzE?+)OYFUP*8RG-gp=#81b{}jr=i|MJsCzncsxb_b z^7#bviJ|NgwD+3A`|&Q)A^4`71TFdy8Tiqg*w^XMoxChTO8GOsHtJu0R|SeR@K5+f z(!YT}e*GHuw(MOjl~PTG2TJs@Nzm#arCh}@-2KAvHi2tGRanUX5PUB~eZu?UfgD8p z1{SiaW>AsbBKoN`K1!n=;4@H^E)`^qsv;ay7JE44@~3M z-(xQqa894jj79>fclyVk)UfzHGETuTHFm+R`zUCce!B#t#ArDO22-BdfNuJ<@sB3fc;4_&&%+%ETo@4!p`clKgLF0{=XAiq)) zqp3-%8iXfmzz$yv0|(6ixUIiKpSDh4Jeq6eoMgTnY398H;OkUhYBu^fA884MB&y*! z9vf%25J}R5h#an`Y`W}8xa-5W;6`rk^7IjW zIYDVVs`{Rkzj+d2H&}3Q7sQ-8jb|@Fp$?#sT!*U7Vfx5d-cQ>=Ov17IW4$!Hm6~I; zUbf`0K*hX1e4Z_m&l!+D>Cw+j6T8j)x*{&({7*#QHrD@NcDN5Be1e{?^!@oAVyCWj zuc(+jyZrf|IV|6at(Uwvq@$WTwx1@g1T=>dl`OvMEzSSb z2a%Ojy$G!Edj50v87w<2ufJB{Q)79Kk>{VzCCB|Aka7oKxlGGibyT&Z_b`u95al>j z=mg>rdXVl93k~FdSl$V5VHuBlP%|fNt2#;EA9%YAO-x};@SAi->-Mq{kp?_ELAgUM z?}PHP@$slS!tWfGcJWdYUxh2$pTAq|Of0k7D!MoP`qx9svTTdmlVZka_j_*J-FuK9 zlTy8w1{vOJm)f1Rzh?gpVYTt=CZiIVwVhvg5C^^J^CVPO4WhokbqBw0J8gVp<9-&5 zHN*^#HOq-wHGC(u^!i80vV`>*cUX{95p%>wQImJ&E z81LTGP%G8PZ>qD(ym$^5rL~)1cN~$4RWJNZ{oB;~X@D6$4d6J#f3eCXlk8GA|IUwg ze@<2`gszFH1e>o=8Xix_QR(7Ko5LwMDFkf*RV&nhU1h!0#am^k6ezh17{`!H>J&Q8 z2-b$ONdo`QE>!34*ezItWw*q%HPt_s>)q*pM`omq>zR?)fQC2VEu{MkfJK~BY5?Hg zfi2z+!?@vRr}r!D>rHB3cgtQ7>21JAH)?PPu3;WiPucNgs24W7tfz`cJF33(4#M1C zqnxu8ZT3E4<-(&Ny);F>VJRXyeYvW@{hiBl)G7T~kokQ$*@2s6J74+di%hAaw-a69 zVY^@~mS(BRe;2T>$qC3%g0x)Cd34eqSb*(Pn~heUH__I>Wi{vqYw9g6O3>EP24wt4 z5ZWoVL6@MB+EDw?sX&rTY8LCVssd8Or0P`O`+dFOyVofA@_~rHPiDlZ1RE5rfOgNJ ziQZ}>?)QBJu1%2E)i95HFVio`+3NzTQ)(dlLxc(Iu)tcQ&`3a5x6g19g7*%JwzlFs z!RMngH@+Gc5mI`gMs@z%R~T7nz+Inv zp@}S+?oq6}J=*Fi3I-4x`2L^CK*R^zfp~tn2R9nN@>3#ofw=H7=0t*>nu{MRERHel zAgJE5!_L2dSGJ(N%T!?9U6RF^OwOC)ls;;_%=;Q@` z0dGGWzD}sP(55KMfFY^40En>+zSf0PT)d^r!n z8lGQqOj+BW7YESuE!WMjII66jZ|(3T`<5F4nXU(93V-8Uo=mi ze-jiC0Jf)vCjJDw6Ce{E3~Tl|Ok9CfS0FW^U9CaY{;KgrIc3vg)N2xzzTV7O$J9*P z8TbZTQV`Dv#N*W`>HM~?UOLae|3&b;5hmMn*p%YCLpcmfB;cLY%5Jq$Dn3@vKD4z%=*K*zt zWe0FlXRwKM{3dn6)VrI3rtJ#7xs+JiQYZ6s8S7a4T~hhHo@`8nEz0R==<75*{j%;Z zHOg--j0C<8eXn6=BGM(H<6y-R3XR0U8p-|W4~9SdJ#(U0?YF zi9j#?fyq8doa;(vfarF|>nJ1SB-xmHsQ+D!rd$z#t0>nv`AKc4rlSW!UYx-q`uHnB z&M#Ng+!ftwKH&WtOACP9ltLpJ(8vd!%tl42hSIVqT8X^}etviIX6B30Ubg&6X>;>2_vSsw9~FGnnUDRSGnHyZ8c zvFb??Ma2YQWubR1n%MB-iY~m8E$t`gJlZSL`YJq(%Zr`_ZER9+n$Xxo?Bq*f#ifFj zci5NEm`|5D`0?*R+!AmPLZLYGyx?DMM1zbM1+LNidDyR-s5#vqqrNf=2!3koO(;Uf z?O+md)&kUA7Am$)S9ND&130Tb`7f!#LO+K37NA!JJemQDX~H38xyZ1Fg&~$7*g?lL zf3UIm*EYPJdsTIN+sId*R|D3%q$34Vo5BpzUA|}7uXGBL%7llSo2nwd5xg0Z7FA(0 z4QY5~2Jx0zsN?k4?Q{V87cBSoZk=vxk=~+)g6e@H9=FB;3TIvH#o^NL<0-PxXwe(vOVlk zJlkUVB@N4VUOk>|kU#w$6S({1G?%EY>;Tyyj2?342!KSWs_tLT;W@zsOX&n9z(0dR zuhVxsof}-u+JcD)x@jV|5mv{u;vZ6Rkgh@2NdQRAfq_nU`uYMPn2OP-X9#ZKquQKW zof!o)j@e~tIhQryNkw~Ee;(&Ot~9vZ4O1muFnm~WH(0xcMqQ9Z#;Rf1)dT@=l-T(Q z)Cbi=E3>d#$qQX6NOMXKgH4)gEMK8OnJ{Wnx?ScO<*N+%e&`ZYNE?r6|Ia#vLHK6N zA%w`kUxp#AGG|%ZRb#Y$!knQYn(CJhy??{u{Zp_TPdZ`5(Vg!5HcYUQqQNUpb1r){;-OrXVfwZB$m?`74-G@pb?Q zS|#*a+O1r4Y3E@04_w-bF-zM*%NkN#(ouCeQ{N~2>~bE3%Q=UlrowWDH8+aL+*Qx? zj0^DT@wtRgbD8IoJXqM1Ia{3B0>^=gz@{m?%l&?g*?E>5{e*&K{qB6tnqj)3>HP0zu zm4NS`cOxhMoo6!6_~%;*w{t9q!g?YytiQWoBqYxOB>$nFkbH#v<3ohx>CmfGMDour zp=(;0SiVZ^#!JbY!gW_*H|_ZoqI>?xxwPj?U6G)Q<8&o-g4|cNfQU&sQrtVGo26J< z=x8?beP$tr^Mk#U98uBX;;dQ*{6x#4^3|GR{-yd_7MJU^oLh@dc`3Lbyq51M$1_{U zL3spyuh^l{7;x`f@6f>4oMKOUPMOzGu@kqCun{us%+ulZMPANaNM}a*s3{TUfPD+H zS->t!!1|fz7MK4oY7`|)pv}hrW0V;dV4Y`>;Q!b2ny41*G5H8AyzrN#EX3jyNS&z0 zxD^Y>1gG(pKToj|{yWq2p4ixHO(0cy%sg6ovzQIV#_)}D9EH91gpn({mEUAH^L*{U zD7r%%e@G9T&a|moDEExJwX}Chra^5asOB+7mDa;cuTuOz3nh*&GXL$5ZkEpB#BKX2~BdsgJ9GT3);e2!PcwM2dPwuaZL=f>qEzeXf&*;m}Yzl z8x26`pmYL-cx3qo#-%_{r2Gs)i-8aGO~A8;emjJ97(p)QDD2D`o4*`< zc?iCvG2r{JgLVAk64m-&1?%XBypaly|4+d>_Tb$s1?zZIv!jj{U!?;HwNCCqUSAYT0>bwJmnr@b5qSG2Eix>7BcX4&dA)#|8^4^JF z!)5ED=*@_Um^j=eqTH5A9>UW3%HL_2VI}a|brGfuBn2b6)zo6O2Qi$MeC>y@ZnO^? zpdU`@L4)@$=Y$6-&d+4dDNQl(o3!C;5Qx78AcA!1?T6e+ysbUn=)GRR;2IfDU#giD z4XbrG^0fgV%;|!ZZ04)}MOSj4;6+b^E`$6n--uOhxBxWA)N6{W;FFwke~-BI39iQM z-a6${m(&^T8fHOHGJ;<>7@gk;)=qS#GhG4Hwtnty3&*MMA2Z}<@2GDB?>=ttPmi2&HZPh!(Zzcwg#|wK~`AscC zqHdq(2+sWe>OFx_Jin<~@FjmN@UQPFP85{DdShh^UWISV5He+QnbL&}NvkEW*+4$I z*7vmpDw{n;>!e+rG}1x{nDFhE5}v|nsRhcpf4 zw`=fXZ+Cb|UiGfLz^Ql|zt8hStAHL$sSk$AX74uwz^7g{nV&AdKa07KN=DM<12p

    _Ar3Boz zH09Ra-T}1~VhQ9yNTD5q*Sq}gWZYs#4?+T(>wy$yU@6w-e{z)khkh`W?6cwQ)X)}XEj3gmclBYzAMuWHr^bVLO?o{v4I z25hd?XcEx^eBHgoFIUnqSKba;#yp4TxT+Hvbqhvv(t8W$s|Irv=Ux34LWfztfp5c@ z(P#u~vymaC;J!ei?yFiniIh{>5k4ol>roLpg9J?!IXdJ&-_1NT+4o+XDXd?o6+O_*fxyeIhjhfe}8mxPs0?FTH) z?|p0%<)`XUP8nKCWe(xB^OMPP78% zs5!3kr)lW>e+m-nRz7668z7X|7wFqsOUmJ@f1^OjZsjLaE~s(C2j&j zHLF1Cvb5AbjaBYo>t!kQgV%TL=PRE4BJC+^jRRuT2=-Wmt{f6c3m=d=Xg z&33{@FMZ)sPk3jbsC7UPkG2WoDWLCtc|8WNw48j%rtFa#p+F9=|Dj$WpJC;b&S-?q znkR>LE0sq$G=Al}=)#YjDpZ^cK=hDCMcf@ z{(LQIFtIFB=jMNmkJH%wcPF6VR^HhXDQc7IrM)YY{J&x8vlMCn<~N_!Xg<=aZWI2k z$5SpHZ{NPKbH?Mij|aI6(tb=zqXV!ZyrXI1`8&E4g**HK#u$=1f8B9GEwcLsfD!iv zMRH8RBT4jj%D-5jco67yf%}j@x!ONx@C5(g(Cw!GH zI=SWo>w&U~b}1kR=ZST3(w(W&J;u%Gnc>$;qg}cu4aS1^?b4cL>A`f^QXwmh4?qfk z?NU4}>w9#Jn9AAUCOi6}joeg!s>$a_R?wmS!T?Pgh;Htg24P_fxs2fI1h+#cEDGeX1P!r+fcWr= zN4l}Wp;8tFl2`;NZg&`O#}a_eUT-Y#8tv_i?zPqMstMt~AeWbSjr7Koi{=F6s{a8% zTG)n7I(?e*?D5`_kOm1(Nh7}mY0v?#K}|CDB;l0}*C4fZ?8o_Zv`KAhvrg$OUuzV* z3Zy}v;bPZRuYt7{j&F=-*OHBLE^7EnlXPJ-OOr>TVfYlCpd`q{>Cu3XgXl4t8Xw~w z(9`(Uth}+_);P&%N~buL@#vK;$7_RJNQmMEUthspXJ4=DXDjc#-IFSaUGbhod$n#b zTIGL3F;WO-eZylY&8(*N6tq_d#by&<+bo9Wisy%v?hor(F)ki2DC>O*d{cdl9W!t< z?ryryplQFD1k*kfcEv`!&#ME7crpPVcB#$V&n9+HSd#$UaI(g$v-zheDhh0Hah)OD zrV(8n{`rdU0fXJQB-swTa0*^;Y}i~rK2c+rPEYlZ%@+JmYo_Sg3MZOC_?p@1&rbjn zD|Ct%^xhkR{3QGH)5W@UXf)1a-11oxI*R^gZ$V`F7IL-t7hZ|+(3e48`MKFh$@L{z z&X>EmjCH9md6~irKVCIxGuDox zP|%&|i{LsnwJII8P&QNg`UFwFDTgb|6LVj$#+$8c>9gP;SFjFx_UmNTuSz{%^;Z;) zK4T{!{3FoX*tbdM+ZDq5X}Roe1dKv(*v_weQxoX{-8DQEY5PSIYv!?V2SyAXjZEH0 z@1RQVs4S>^^uD=3KQZ>^VvN@d+~`oOIkTQrxdCj4QTOVZcj%yTszvX=-=tm-g$aL*m6od((yiey=BjgOIfOo7q@hnh zI$t>un9K4fbvJwGy8Me$vF}L@c5@O3fBN8`WP{}-{G{mNPv3Zlr5%2GjP@re+bI;p z-5`HX;y0}tq44iCI=KcxycF++KHG1ZWvgX~qfdaQ!hlVaN7-tbIj{bQD&Wd|BTmAP z)4$mK1BxaIXq{et1|Ije7Wl{NT++DhkS?D_JF|cjxU){De^k66js7!!M&Ij;+T!v1 zMVEinO~95ujo$;5-Nnh!%#td^I{kz2Q@%>8B!SfCw`;qjLMmWd5^AV5g-=A#jocgw z{-Im2hyD>e>1W^^`Wg4{DpK2A3O|kd6;%=Ul971nLZg=XrI00H4oiGofrKH7rP*9ch8=-Va zODYn8-i*B5^H5~@`{&@F+-I=~>1+9$^JEVBd?hK#uAr}g!13~DByNm;juLWb;m-!L z33q7ZH=$5@?&L}n2@g4ysPQ`m!`R*jj~;8ICK92-$k%H8(?2Uea>-*L?-hw<&^DhNmnV*ZnIAL!$UA=C^nAqb67}aXR_e zowTYF8!#T|5C-r~tKt-4krMCXK9yH?q64t~PB%KT5;7ZI){a%YYb2^@up2CTfFLEFr~e5ZX~utJl81C6r_ta{+=U$&z;AjY+H`jZ zHI3@unfqPVw$*b1hJULtY1lnxyDPJa@z~Y|PpWOb6L`7L<89flemX#Rr6GLG(k?7& zh_{um;$U$t3eTBDBb&q=cytPjn&Yu4+xj_)I$QP}tInI{^yk4=)=+vaWb(vuYSuQE zwLO~EqVwt@O&14$#>P3NK)Amkb}OC*;Uow8x^zly*mgVcNCP?OFu%!uvrh2m+>Fk` zY_Y@zYiLKZ>$p45_0pbs&6;~qRYR03OX+Zd6%OQ z_N(p|9~AIpm1K+kD@y-Wr23=G)h{|p$tTn;|R zsK8hMl?D}FqsHqV|5sYckHE-g!S}uJJv8sH=v&SAP!`&O!UuUE%an@%nJ{$BBA{eb zQB?<9oRH?ZU6?S-;K_!4TMJ(_o*Mw453V6inw|I~9mJqrnHX_n!w3ZS4q&48qf@w{ z)gwq!o`bEN+^9wX2NASe$i)>_Hr{{lhrzBma)Zh%J5V2G0NV<}eegw<4gC3A7PcVs z2`uO>-*6!JY^X;zF$Edwi;Z@(8prMQJo00Tt0XGL2BbYJlkNUI#lIfY!hOBTtOYc} z2;W_C9!c+-Yb(xUc!o786`@h^Tcaf%$BGat=GEY?A5Ia^yVshfzRZ%ddNnbmwlPT0)!9!$G%_024D*IwQ7D z44#h{o65!JKy^MxP^jiZs9xuPR%23>iu1}aji%H9MZSO{8B#EKK3QyTtxiHC=Sbmi zCRR_1BY29<<<(QUa3Pe|P-%tNl-;T})Snt^tIpSw*ip?(XRx+!CSs{+9KOXz-r~Z% z{K{lJaGs&+=iFZ(83gGDL-e_i!CHj( z&yvu9sI~;E+NHt#ivd;dDA<}HCB36E;JK-)o!n2eQ%jKM)vM}& zxC`|-F26iPPx$Sj*U4O`hOx@$YBdG}Mpz(AVtrL$rOwh4o-VjiYg&+w*60~#V^uqt z1q%gR+rm7SH8WpLiac`;bK1_7uCz7Uo?=~!=S^UCHn2l^JiYhp7tJKi>8f_0?s<<< z`PBbBElhZ$MSc+7w@*4*ln! zdtPbso^<)qpCV*kL|Mrz&pjXy>e`gaI`I>rrU1*4z?uD$ylS47|Me&IT{YQ&juXFO z27~C|kIs8-cmZ@;eg(4FIvOsZ!o;8C5A)DPOO_-9oi|09IN#u^mNjNeKr7zH}O|nZmDnIonl+!2kcxHZxy7F-2O+D5X8>wAAATzV4{>E zOAD#YQ4W?_{`8NiDu)?$9;4W~)YC`oTun!(Fyk@d+f_3erZIym)3Al4L(h`&;>%9= z4Y$6|qWl4ENH$BSe3W=t{-~Su8`3T9Qawfez*@;n5goR7qWY>JeP0OMUd|(I`xV46 zx<@C~PMxwld>S1fPM4qA*{#f~*Yx~1Z`s@uQ}M0Vo5s4c=RPvl9r^ApnfqwbcoQYh z9_?CM@&NqIHI;Z)E-u2y$tL%*rSlg)iqBq?x1^}}0r;|H$=s#$Ota?}KU(rYlBSr| zzOcl!beYFgv~=zxi;L!sGL0zFxR-es&!ex)mKQBmt1``BxVQ*Y=a+cqdNQsZQF3jj zW(1WlD_-cq(xyjNnjS4)=3Q=DR%}}4Sx{80W}vFdmKM#!9L39)c{K28`P`Bcjb?dK z@sfp}5hY2Qc|}WCYR1efDjnnTtW?LDLS4)lanmg~m*mmkOcRzXTUI=8Ja8i(sIKU7 z@4{lJdOkD?-J5H|uh3{DK{J0Y^n9LauGh05lF0_o%Ze91G1s$j*-{hBpjn|A5B(hj z$(k{bEL=K%ObPwlyc-hiNH3*Z1O97UPv2!Iw9>^R6dH3R?xy7u{o@I-QmTJ697cB)C(qw8z&K*B_ zJWRWHe5uAgcj*lt6ST)*U!o}#94?n;Op(Vu2D@?-&Y#8<0Yd~07$nAD35_V+yh}@q z7B1FIx6KeDg+0r#rzZSlx?x1g4Vn_qyoJk5#mnbHKWWIOq7_AM6AWcJfA~;Qt0+5%@XL)`q z-5Zl0+gB6akl0!^DdpILCn!; zZvL!WN&fsl|8x8odH$dN8}|Q2oriv+%rW)bM)*Ak|3;ML!ocj#!s#yCeHvj}fkWe( zmVc)WehX|)7sHPkj=KwB_i)@ze<(4!l*cSDUgjP%!aD{D2O#2zl7~jjqyH~n>}8Ld zF{N{14J~|xFgAt&#zp>1iTnTae|_Wlxc}q-adBFmK0d*anA9g(^HcwI$93UuEgZ7uB0xV5kI3tX+4){0H!EG*ROw{i-rovqp@DEj| z#6eU2hoq#YUBjy9blu(h_`bZcpLRfZpM$*$?-!5B`5O zIx`0JRpQI%0m(}MVKE*S3;s`0IQSp_$6fY6{4Yr{K@Kgv=@eLlu(0tz*pv)zjjWAR zL#|=vkrGPeL+pRL*q2C74QGM`;PVYiOpBZnuOui2C6V4fjQcQ-`ba%hTf4G;O{(TU z|A!;tqNTB|p1yF(mHB(6tC}EOv}I2H?HO-|CP{@Cweqw5{NMPGK`wTDtS9)Nd7N2O zjNCv;Zsd1_*Q|lhBT7cxbYqEGb7O2(NvQ+5KFNdPv?+trbba|D`gr42S10uApKeGT zFjRxZ)JiNj-!j%b_Yrp%K8(Bdwrs2UmV4&gADiyU8h4K{W$D=4CXZS6*p&Nkz2olb z_Z3fj>@mw?*CWDB^A=6H<&pbujWqWk>%;S-m06a&>$te`wMt4-MWSi={{V@b#`Ki8 zO`2?XOvx9VQ}1vU+&OLfU3VAGn0e2=v+lbenweKL|Iq~tA6v9|36PM-i%UG-(y|pR zpZK33y+Z3h>38P7KRq?^m!CbD{`U=kOW4oN7W>y$ zSJ_;@hrql?gZe`@?s~{wq1%I`wb+@0_;dmD%T(-L&A;Q_p#p zx!+ikx}vGl5ZCxjms0zDc=7Z`x;tJpo}Zud&>tR<7}(;OC?>+ynFlWr~$Ze415G}~$s7EF08`=+wn zTr2WRZ<;b=(c_Qbdd~{?m_}>ktpeOei>kxcK2AsRHpa``^Mj#K|GM^_vhce>-i+JBXWU z6-y2xeol(hzK%FryKK1w@${L0es?G0>XG-9&k$diNl%_doc+hI22DV`9oN5c1mf)-V~U5of$_uR%@#Q70_NqPbCe%VvnR>b|~ zOZz4v{$Cu^G6mPb_08AM!F6!*#%=fGS~&c#mwt!q;cNFhK3o$U|5=uU>*DC!C-ZS_ zOc?i6Dz1-t+A}U(BV5*`4{@FR;^ofWxK^@iXT;-rNxS>>=eTB!e=VAc>*hw&n_8! z^?J=eKgacT@t>z3z%};pn8A~AofZGR?GL!tPFUw%h3hTj8uxX$=0YDm^);@$fh!yS ziED3UVez-P{$81T{qbG5->@U0E-<{Y=IK4xgj+xPeD%TOEA}iswRYO-AGa(UT>Q63 z+v5Mcq3Ms?SG$wmPB~wz+c2@;)%ocI#`Ssp>dMu+>wYXdl<|VOG4IL0@3{HH+U9{t zwXf#5QjD)`dvxgJO_OflS-Wq$>&7)j7hlO-d*6w!&&U6L+0KUs&&r?r%nVd(l#Nk;cAk$z0%)7$16fQHgi4hoscvqUFU!B`97$ z0&Mb~@P!UbG}DpCDk(wX9~i%dz=}=-4v(D%U^&JJB8z$_@Izkr0(KSz9M-&r#lRB- z8yihQ?y3k_#3`7gXa#VIC7OHYF7~P?AIL(HNI!3`2O3|wyogFJpX)9%EiRhxF;O2% z7s4o}0Url)J7be^7|XoHkn0g|$x4kA`NCrFa%2ljRs#3D#N= zl_p?HOF_z~K|2{|2e2wyq?ug2tfXW#YYrL6MNoe3ywS^+LLnL+ibS&0Wvspc&!TN-`wE(Mkq zIA`psZQeYnnAl<{pqW&Rr$DCV3jt!zG1Jt^jX+%;1Hg4IE2fzQvdN;z_vn#M@BAtX z^h~0&q}S_=q7@zn2m-nTPj+DLmdsTd>?ky0R=s#^qp_(ECX|PI&bkU$E_r0x;)QMk zGRic|2|s!th|Pe9C3BaqWQUH}EDc_r1L!&RWFANX#S521|DdnYX`M88-sQ6hgh+Q09t}?ME_FxO+QKF35g41TCD7Tquw-B%z|iQNlNOB= z7OH3A{Dnm*e9;Y=-< zTVi^o2xgii=_)gCg4_JAL!?h#1FGoJ{el>pd^JkW&_4~ld+ zWOY7zFtk7&aL?RC+bk-TS{Y21Ayy zmV{BJq>QD-Rt(unmO`Y3!Pt#`jT%CF+ZUDeinJ(|N?K?U?MX^$(O$2#lD8r<^E-E& zt?m7O`+mQ_f4V$#&T~HJInTN0JZHJjoqLCk<`c%4fq3n~7!3&l=77$n0%h1416@I= zn4lF9KQljZ3X_eT2#XDdNeF!9jH6bi15vixZc9~J|wRIZY$>3Z=ks&2O zg;T-E0Cs_l%`hfA7#T^?;Ci1bn8ap)>whL#D}q?8VBG41!;_*huc{By?N}1NVxSlJ z;cRp|0!qKVK6>d6*N`U=C6ru-$x%pyw3M|+g~1!ZK&qsaqKc~8P_nwTzTV&=GO`Mq zI z3}`EO>yNtGkmRVCPzZ_-WTDP|#6gZmQ5QQJPYen)q9qt5gfT&gUM<5a3~Dg^ovn(F2Vx!JlwY$h9`Lxy9hRFDN=`ta6gG|)3YgU2fc z^kxAX&KIK6e}R;~k+wAL=W@6n3!pzB{cHVjnsEL&kWU?y`x#JzQ2EQT`tMH{)$g8) zJYS8FKcHAZk%@vdyuZY+c2po)NJT{jh9uD(@N@IXja}qWfs|wcE+H!o5!7S`DF9q* z5R6N~GMJ?Bp&*cym4|V8*as`4fRwBtgnxC&iN}!-896*b?z@r{p*Z||-Xujtl7fSj z?@B5Q#g+IUGRiopjQXimRlo1862WEuf%8YW1?2f3G732Ot1V;;K^%V&+CWg94g6FJ zAXoiJwi|?b#o%XtY#3<7-#%x1>iAs(_k7Ph-Pd6CqQ+}c)alC z|2sOK&*Oarq_UL9n~bHc0=y^SPXao>R5bsfKfW%*0;Kr?N(WQ`=n|kEfSv#0PmpP^n)(@w{@lK&S5I~tqSRM4RFc;)=0P#>jd01L22=zet4ul@d z1Y;Wh33d2)&F=$(9~XWD=qaEcK=5yQwgc)01b_bn{@r)@M>O?JnR>>?df#+xBthnZ z)aElct~K6)cxB_pgY4jQ0(Lv)+YU|AcT9Gccf3cmul>Y0Blav$&seiqH~X+DGcAnG zOs0lRoymf{p>Pv>Gh>TnuoFs(3XhI*3W_t1iyP;ioaD_+NOoXC-ai~n43PsOz%!er z6}$jrNkaF(JA^FM;kdxf3(4vv4*%y2!ZKF|BhEk zFw6dIGRTjx(Q6@><*!5=bn?Ry&`VJ{7&F0rayiP^Q zy&QjZ^xnD$9vPRaZ{^qwmT{`SdrN(-W`Ek!or6rS@2!5csV#oM=;))N&2KB89NVyP zWQ4=&jGDqlW8XU!mg@CHt*Lt2O(3g(S($r>8Xgqx<`Y-O_1(U3^12VJ5)0&gkF1RM zy}B&!-88jqt?gb5s%QJvTii0t4oZD5<`yGB^v8|}?{nShGp3)063psbvZqj-lc6=J@!F^^mJ?+TJ$59(=r0PgV+;^Wj8?cp8 zUp#kykY0pFWlVoqwP%#|>q8bQF>hvC>CE^NJ+{EO$lf8VQRd|QN>cN)Qv=h+J(Sj0 z45@OVYxtC3sZ&h6=wRVL=tf(A%kh(SY2LD?0YlG2rLAf4V;Nl9*3iZ>o&MgJJ684% zH*I^Qo-|1dB@7OGXShFEH`Uegs+9`phjZSsrtlK5NO)w(!A^ zK9$?uYTzx@KW`ekH^JL|`GHY$!Y}$>uANuLds3p*b!^k*KFYDhx&u+wU#87zxo(*D z+;cO_q4&s%sVi@+-q`PQulvE1aSN|(w^iv6>^T3t>E2~(Ct-TZL~ilI=EFMg$GF~W zE7-JV_5Qe(`(BmYS{K=TeP#1w+J#D^Ig?f%U7xhF|LLby!DE!Wm5=4;59^X&T{3^4 z`>=*pVQb!ByT0#CdDW2NMn)-v6;5#tlU|kT5SJ-7mi*O`7*pN)_ue6ePnR}5b}v7C z^s&l7metveyXE_CglzLymrN<(s3tV7OHm!F%cAaU&+DsB`Do{L;o5{h`{gRHzAjm6 zy`V+Ywsq=4?fr%dG-!TR9JLk-8(#VNm`~BfU_I^QGF@70g1qvYnRTcG=LWYT9nq1v8-;nrfpxH{19qwmv^!bRv0s zz!)g8J*zsVhOF~a(xBpd&{tvY1mq|(@k$eam_!t$P$%#`%pa@0)PW6!W0 zY3sTBA1-=hpSki_x4MtnJ<9{~avohIonc+PDu_7rqul;OiPe*Q(-+fD>dM^ztg`0% zC!^gN2~MW#2~Hn}XZ`h;<^6m0X5*8qFYbEq_hv_?{cZ(`W<~1@7vI=wCv7jhYW~zn z;gwZsEyvWP%6f?TGA}ps@o0AmdApd?B-u}^m5+^0%h;$Fe;{pE< zD(_!bMYp+jUngGKTQ4q>-u_^y$C9f#TZcILrVqYHPCT)B>xt!=E-x+Lq`&FY%54jo zA`^6@A^5D*kYSzVyK1IpR#P6R*VOAxaV=Gzah7F3gw}gJcy6gu-1{ib+s%a1nzA>m zNJGj0{j3S3$O~n+t%fK~lG&j*`)&KG?!Nx_8^@lmDP~Vn(~fm`mUl4bqTMfQfm(FV#v(YOVjBQRg^6?3To$5Up-y#IqA!e4~DYZ zyWD!ZCy&vZs5sI>BOt9_gS>0R%p=MwJ#6m2*C9F$uL>MaJX?Hm!OrVuiw^4ZmJ>Q3 zm^SS2i#nC7>&qf;st8lu(e%!$z^H9sW~V`p5qpmQ5nZ)dru66}rw4B+YFnAc<&7#E zPBLF>bH__u3zFEgdTv6s#kSb#gNlo9sXp9uj8eW+4u* zyGq~WM098OYu{JA^W&Ls^|=X?GTz-)oi^{;(cli%c@(?HD%R8^Pv1Gbs@PMe<{s77 z?0%)s?t|Th>e`4-Eh~4Hr0!MQ;xz3$7hH4mBAi^Fdjw`p?k)|y<~VZ2oRY=)U&@YL zIMbk~wrFkQmGD7-_Qg5l9U3Gdd~OK7{A?@dX&=O4yy*8Ybgk8*~7?MKWyamPvb|2ukWF%+4kHd zCetH7oCt%q-Hg*C-;DS`9e4ER(J7To$b%0R1)$==EUjTjVC3VqxUW%5zK7y!uG%hx82Xraf=`%~}e}Qk(1y zA{i5kxzyvif8VXRZSiiIo3d3;-zBYCL!w-|p**lBjFsi`0#?sQ$d&!E$)T6s2xh@48nooq5d;4xyD-9aGyK!9Glfqfk z40e8|nb-_l_+fsA$>`O@j^2S-qfw-}YPFnaWA!DS)y@2i6}g z2qb3)cYSPp5mPqRtIs)e$*6%p8?;wg|7{!OB%8>i7gn%iEX@XbKLp%YF7JKtx^(&z z%|iJ|b{oZe!FjW>q0v@1S))iD&G#?JkA7dqgBsT=T2v2us~FzDbwS9M%(kXM*VdGj z%~!jdZ=_PIw{Omj(`oA$yl%BhuA*C*rOj4q-1u}P$IyiSVb?ypFsm^U=NqRdDGz&3 zPc^esj(9O+?a}tyQYnw1ewQJX$f132FDuUQ>dD$26#H*$9kURB(3 z3#|M_x5q`= zEsm?u?pk8YIp9AZnsmyiBt>t3$H_uFM{g$G_w=d{zHZwF9(0>`CVr?txpYax=Mc}t zl~otLyORzdzx&0JIr4RS&Lgd5X$lcJPYe9ltenkujP*H8UE_Ug`SgdxOHcL=I^Haq zUF#fJu2j4`G3kiLqN6hRcRwzbF$@{!XnK~Y+5H|yHfk`$cfdZnO|Nf zYt3$wc`g}~PsnhYEE{j3m$5d(KmJ+X@jqST56?L>^vkj0mQ-)Ax79nokXte)-#M$e z&vkyMhjwo}^Wa_8+-;LoW<m+GE6$UbzKJu$+l!r3Q{z5c|F!-5b^(uO1K3w6bC7W>TyleCHwpxqP z9*#A;J4P1lB|V#es_Rx?>E;=R5l@$Q1`U6gxA|E8(z;J{y(xPnw0CT8?%qbAoF=ZyTxq#L{wCUcqjcmx11$EuBUe?)A*<36CM!%$Ds-QQQ5TL&!U@ zWM8{X(Yn8^t^5X7zub6xeDU3*U1Mh~8$-0*duv1H&P+n9^|kBzhM5`HXj^rPj&<*g zKiuj!{bRmg+}_cT+jnR@9@u-psi<$=+ZAV9IvRbf^hbx>>eZ*Kq^%+Nr+Nn@y|q`^ z(6fAg%d?x&l4>>4AM$C;Qi+pY!IEmluN$`cte*5{=%|eb(w4i6qHQ!k>IP}|hiv(% z@%R0a`gg}AeH`#Um_2@$MD3)SKa^5t==a{0tq+~iSmD2RU*n9y9j+_y8=D9E{%N-6 zGj(14>hxDl3#|Ktn{;QGo^nrF`0PlzfByhv-Vn z$U1JWdQj&OeC=VB@BWL_VGnvv&0(zY)m%il(OCQJaOA>>Z4VrHvn}mqCs^OQdiX?n z)2sN}oZ!Cw${{P9VkVsqdR}y+c%G4$W=j43*dVu0E94ateN^VWh*OX6otNoZNxs5h zG(J7-7U6hfd`_hrL7#NsiI(N2g2P*nNjaCWo3gEJVzMi!6I`^05_(6)k2)Bj#=LK{ z?!}li%i5DPhw)D=@=ILOnr0lmWNLJ&MyF)q#bJ*>UB7UmL5Xnf!k&)OsY_N5-F(vh z=9P!{n$7RY-ZE&gkO-Z)=6r_9=N0Z_4n96QyST@)_Tv|>nCt%Pd%fjI)9m2#`s?L2%JZeq9FdEVe8%#)L9yE|aYcds=CM`VCiPP# zo2Cc9Of@wv(AoNEZOh{^<4an!yW^JHCdz6qA7AD|jXQPjj%jkkimekK(Txg=oMHyG zYrTJ_=yh-UUu*8%)4rA#*c7wS`fA9*A?lJ^V+YRNSd~*ay#JHW-y=SbNH*PRGNF7O z@oM9&z^tyl)vuHMHcnJ$HzwT3UmLjLVe{1ES(>XormN06A9wP?`F2^=bW^G3Ck~qz zbT(=nz zo2yG6&fRH9ld;*_POdj)x*eVKG^Od;qg6ib@eP)^KOrnO-rbJPWJSe^FWuKH+;f^+u&GIcH^fhbIG{z1Qd}mO`nfpS0uJKvI0WjVEDlFr zmbZwSMNMQ0zT5i0{QeAna4>?XC~+{tzv-8S5rK+Q7DfX1Fb9MjBsT?Ecn07Z-{AT62LOeE%28lh1_qi0vKckzEd!v7fQ!&ruaO^zby25|M#cl#n(0cG&33L22&~AgAp>OkW0-~6K)dPg^ zms~jv{+Gy6J0oI(vPg{ykgVwa2MSs=Lny?{N6l8ChYCU}zkCW>ESd66UeHSaUMSo? zcmPo-2=Q6`kAg!KgoJ&*|86MgLn=Rjs8odbEdEEuAu2+`Uq$|RLqVrA_yNRVAjD_! zKL!pl5W+_MFNGpA7(S74I0*4s{Eve}9E3z}wIam-ZZiQPK8ybmaEO4A(33DI5%DN8 z0|;3BkBCD=gham)PJ#p@jD(P2z+rzR93mkkIMAVm2k^tYF!5RZkBmcPghZ`ZV8$4U zS+BsZh3r?z`UGZ(k(l)f>{7`7gsewkMi_}%kHD^k>_=q%!p#69vECQhg;4(!unz(j zAz%#Fy-06ED9i77I3Qpdh}Z`aix4pe>t3X{A(Z8JJpSAie;i@migz?5=w76^A(Z8J zJRFd)3}ozsj77*8gLN;`+Yrj~J01?mg8pal2Q9z3(LpNc4m45F{5U6k6PQ2Y;S(K2 zU`}YFzRpDv94TSQGG@2IufD#KtUttfE~-;CC(7h5W9?Ioy1(NrP`L z=p-R9Cp1xDJ~)S)KQ`&|%^jU&1;YVN6qrBG;bRvYv;47&b`_c5wOAMOyB6p0xd)qb z_;U?vYDB(iVkXEpO`H>({=Vh~O$ZSP-{AiP(n8bc*Sz2qAR_S_{C_}NX!`w{7nmSK zB7KAZ4@e75-(T|r6M)F%Z}9&CX^|D+XDh;2E5xr$i>x3&TT#ARVSZg&WCi-!iuBbA z_3P3iE7;Fgw69jUUzZly(F%=Je1!fO|265acC!kkH7*N377c=O5s| zCJhOVTztd|Z9Dz}{!7x?f=omZCxS#`Ha1o`*r0*avpLw9LG$o^9+_3V~|lF6H*pZ zJ~}SYGSG6-@=<*Vj@FNY=3}6F`9{SMDi6_v*9DJ53^Kn~6kG<;jnqJMKkA zF|;g*N=90ON`X-_74b4tkdceV;eQGlTQE?sG@1tY4w;Its{n)3uuDKVj?N^Aml|n( zGP>bJWw1Chg$U(EWl_CwKCEsc`j9$M(Y#1q1!ZCS_zN9|3DpltM)eBPU~fHz2*0`m z61dF)4?p{zVir5=S}$%|k@Hg^b9-IO07AL`L%pUy}&uCE_~KMHVR^zdXU>s2(!5T_K~o zh}iCph}IoK@{v)UXdbLCd*$4! zwpgm(yQY(8Dit$68k}F^q~d5?PAG63xU-29UfE;6?BE<8@6_$NONT41j$9YFX<+4) zw4J)-!&3uk+MgIzjJg?q^GEKvaOCAYiczblsy#8Qz4Xq-{+1W*SD)y%N}Dw-k-zof zt*W1clHtrhqvg#18nAzLYWwbM4_yLWYCpxE zIDP8;!Xo{?QKhePjXQ5jYUPtGns46ytFW!TH)ER)Yu4Vg_QOYi*vK=OvTMZp3X76; zUm9l5VXbYHTb22tlXrbyPOkgH@|ef%W4WqR2xX;8WVh^vo;?$~El<|oxNWeeCGRyv zAd;jdq$I!pmXLq(fcS4j!r!{_)fdC@uY~_I{Qvnt-$s+biV86b2{wYfy&MhAAzRNW zQ>Q}kJNW0%Nt-ouuMD>0fIqcuxNM`F3f_Z(_rzWghaT6L{@Yl{IqmX zVhkLDvd{!jAR4iCg*;p#8k34YFGr=#7J8foe}08F6^&47vG9A_a9rps{NhF6lJGAC zfSIaJ0uY$4>wuBgNdb}u1g5b%89=gt1_6=-Bo9adkRl*37O+do{X#vs(qyq>juL5gD1JK_9^fv(g4M2Yb(BA;`Hvs(&Kz{?!-vIPC z0R0U>e+$sx0Q5INkX$VRk^}N`Kwb{W%e7$RY0;N&n^z(;ArjKj27l;>mX{y;WFYdG zAM)UkmtS~@sC}XDJPc#Tvy9SX*ev*YOC#v(1h^Oh3pu%Z3Hyp3>O-81t`M{<8G^Ju&15)_J^V~o8ka{$o~BZuPn3-H&Btcj{gn-NIfpnLJT;A*NoS!|w)faJjz9RZP`TF7au)Ae(c?}}vpW>IF z1Jd7Tb9tsRLb&r{3B3I60GC+KK)v^D;PTu=>OYlVe?5?XXA_qP3D=+4&u>3_fFCR5^4KEn8_vgr0WP(f%gYqO zQw9AG@F0Ni5W!0X{SWYtAK+&J9$m!cd5PfeeEd1UO95Uaf^QJ?KWOP;Tev(cVLVyT zzIp&R+XDYcmT>#@@%zsn;OfO(-UpHLI|b(t@K*reDAK-b`R$tnaAql&7b7AcAvk}4 z9{_lbgs^!6Cu<1H8RLZ2X4{+AkR39{{c=jQb1l9Dw%%Tv1WDd|5@j{OtgLQ3J-eh<++x z|5<>)spayJZ7m;{l;od3z+3CMyhkGP_xSQ8AXiY&n&s_%XBP<^vkbe&F z%K%RnsehcH|G~*puW)%zBDftN*8{i{z#od>cLnDUaLGn4uT%uzBCvme=K=hPNc$fY z%zpqM*7WW3|9$*EejmS&-^cIc_woDqUmfCaxBcGRZW*Phhm)~QSLEI}e&DYl@xVNC z@)$2a3H6$P#J2sum=hg$++D_iecv(oa9Q?&IL$FXyOuqyc9@vW3Pa5Oz5JbOA zkcwrL5i4(qSb65I#kpgeUF;hW{{ zVW8=Uu<$4rp2otnSa=Bwo3OBjLHI}Kv&kOKCxK=E6XWq4G21a7zw5ON<0Ub^0^_AH z{s6{HWBgH!Cu96+jF-Xq3m7je^j#hTBzVU)S-^QU2qNd-fuH|(UJUaM>yrI%p1<59(eK)Ds?hp5O_^dz)whj` zg3Vy z<5^Kmn1Sfw?C?a09vg*z&_iQDB$1U0K0^Qma>l@KRD|fPF#nKvW;6?;!!OHB07aoe zL=Y$r&H|SSaw9sJ(cwX8Dnt)V0J-2F4}o8p`5!vqdG`~T`*6F&=WjgJ7qeUOYgsj{ z45HZlj)%@d^EW|A9-oi3FpMa+Uf`kpPwa<=nHo`SJ-|bJJrJ)yuHOhNQ%YcwOdem)@sRwJ^LNM6 z@b<;mBRuSyDVm=uChsFAPgz9}3AaoA`23leJib2=te1puw!gT1n3%jdCU5=^yA?K1)}c@Ki<{)sGRf7W99@%`0-t(g3G^yA?l zV)FRD1S&!084^G4|J7Jocs52Ewht{s<&BkpEMI`>Hx^&_F!>SRv0vd(;v4f~5GG&z d^Y!cZ@%#9F{62mkzmNaf@qZq=16u&N0RVbDo&f*= literal 15371 zcma)j1ydYd*DdZIT$AALE(5_G9^5T>a2q^8@WI_(g1fs68r&TQcX#=C?=QGjU8lPD z+Gn4&R@bTSuIfz@i3Bz57pxBjdzy|8R5MQ=+PsBD39Ho34EzfH3Wa+}%WT{@C1qbb zAEBGs1ie3=hV(uq9ltnEAuqdl^|Ac%CW&pz#2JH!x*7`;jtqNw~X&okO36<;nH{~ezMr<$MrF!OG)lj( zS0AH&(*1^0ZN1XC&)G~4@hU$V$D@`iI3x~$lg z?}1&ncdvFDe}CO`o&6+xG}Xpm=yN0V+(PT+kL+kN@g2V8h>T!15s+*o_y>6DFd{jj;T7Sr6m4ab4umaXYeEwV_kLKjhGP zrN11PgsSvU_&g`}W8sMXBP054Ou#1~I;Y$Xm^L9K%oMWUy>L};NK39U^zK6T-lEhAO(vo{ zG)b4$u^*l{v?@gguynoFEi*H!GV?Ep_KhS=-zmV*MI}P3asBg!^q+Fw+Y+udYqs3(r0si9y6_L_1_-_ohlr3{!j_GN#xXnxbiJ*?-aJ7g^)?ay@*CI<|SSBoj%OKaGRMC@GCqT^su_CXE$Nx{^Wt){T(} z^Lw>=$Li&dd>R>@hAkQ?V|AnO(0m5bQr-sj8eF;>2f8tphQL!tasz`B6#0aHJ4s8P zc11f^ej%l1M=y0=%^5g{jzdW|E^g$T7Chfl`N&JIAxV%{}=QPiutIfRFMPAMyW-d(uN$f zK{p7UxF-OZ)L$v}GFaDS$W`ICocochdd!r_vGB+Io9>tmC*3^4!WQz2MkANa8T5AJ zTpG!*`!<@@eoYjLHhfcSF}>z_WXe+|PHv!h)}X0d`!>6mb@2qOIHEwWNpaNdzCeb7 z=ocN{mo_tp{a2(lrVS7i^IM~S> z{$Rc7fBQ>eS9LvOpv2k;fUwMgH<##eD;uh1X-aEirR96Jt=L$Ql)*Z~@AL8i4sB&QZ^Gxy3z+fJvdQ=ZY~N&2`MI8`hW-!ZcvP|4-tft z$f);`4y?uQM+8v;*|SF;5y16?7sjtBU|!CN$(SsV(a0k{xSnVe{HP6X z=JdmZ7=ZjkugJkJgcs&Mq>xZTib)^^M2nOBrwun$N8aUr@e z@gawFf+j6ru^_yh8$&>Ph#1(xL}>i6^3yz4GyPwn8+bHJwyxMKCh!>1q)A8Mr&~1t zqeb#bi`BH_k5lj!2bhsai_mBshz@Dy>;hfc{ErplC#&uRC)!U3o7wybAa)>D6CYTJ z2j@mFkQtIe)E#=I1pc%H72*$W*&!QNc|H1~QJ}KYK9|^QIQXIo&M>W^qnO%q3%(R| z`HyHV5A0wjS?Yi#viMIlO3%MEC>bDGy_Y zzSOB4voTKO1R-iWB(pH409+~D5O}LP*HRQ}BSRq-rRP}+x|J??20jCn`(`M4~-Q6>2-#;KxQ{3@#~CLH{Ukl(3~=G+p_Eq*3T=XXV4 z(`E%edwqs<2o6a;7aLMlE_jutkQC0kru8MfL;FOeOC1V7LlVAU_LQH=<~Tn~zNk#* zzsg>S3TIpcl;2_mQa+iX91DFWqn6%$;(#9pqE87MVQsOW0O9fXPvAjy@>3-M)RuZp zd-lWN$ESR(JN-{UZtmuD-G{tv<);M0z-Nokkm+2}r=s9ehtC<4;=YV~{?X4AZbYA2 z5su+M5&BYsPx?azEG4h9F=7T{PYLAVD-ur$N@BhwpBzuca|u-6Kkty{XJKhot3`<&l?8?Y==CS%@n$G%Mz0;x%~A}?PBtW9=qs_ zEqm-6-Po#(FzI&Q3QUKcxaP7JN{Ec)D@VK{_>nQo-a<@Oh6+VHk>4bMzhL`({4&9ss zq!s~I1K-~m#JyaGG^-8*fKAuk|5$*#lUXm_(p_65(uVuCWIejRjbcW3LmDD#4oTH* zh@+ydb)fOZ!GAYGn36>}rR;RjNF&WgSH6V$NbenSVv{n?M125KkBkX5tAuY_=Jhzk z_9d}ju?DA3CIde##{80gEe=}`6JkpQV9S?l24!EvoVabuyc&fAFDvW^-YAwEr5t3EsDa3yyM1KnA zH68XTW4yUzVUCnW6tEt+(1mB-@~$NJV|;hVtvMwtPfpk#;pB^(F&dDZ$a(M3KM{J` zFtB2o!>;K&N1T1%cD%7=f^15V`@C3t<`D*NM)hc?P;)E-#1%8U;0(mz&9;cfo1pGs zafAL|(f{I%ZDl7$7h&-t&g4wtm8x0f0lt9se=f&cd2ZqAzRycqR7uop^)NkdE8?!6 z{NrdkskQP_OTg|YqF)@twTg{?_D`CKci}@ zpeNKb(KQM5)Qdry92Qo807wjPsc1kYBOh1cP^*w#Eq0-r6%ZT%KvD4ANnM5q>sX$wttTn;(V9$beZTICJzOPay+XQA95MP*j$v|sUcj`SW9rD9N;cwd}^56xIQ||)yX;w z6zA<|W(*L;_gs<4hB*Tc{Jp*FnW=&U-}lg4cw{iK^tU*Ew}8)(&_?c9kRDVtdG*&b zMfBs2aIL=HvjHca2r=1Wz@Ga9`QW@c4DQpvQRqd=%G0>l8`*3<47LBI zp9^h{mCR}zp=}$ZwHIjkd!a^4Xb$BM2)zGTP>s_bvK*qXg;UPx`EvSYZl5#P0CC>- zZO;7iH1x6$>kBij)we%_FX4}b{;s)9kzlb&c49<>AuBkET_iqwf^ql4c1v4>TVW7P ze0s6)Z$2XfCJLP`IFxq7{bx1f6YeaoKL|CkDIKWFnwJuUU~JzzvgagM=9A#JJ>1oq z-s`iLy)ozckji;ix?4JQfmDfr{QoK!bZGyUo=VT>Kf>bTyG!MEJ`j>H7UT`s#|?r< zd#fFX$pelAN|hcmq7yOt{dkT!nhFXJ?KxJQ|h*+g{%9nP+5ZIQEt%Ej3a&Zis%YT-@j5|P4UU{ha z@;ss8J9A)PIS%Tk3mU87~=FlD#vTt&NbtRE|Av;{U3r3bfMlyB;$jZRck6Z zu##r!-`=*=|CWyJ)&)|RH(a0}iYql88+4Xe5laHy8Is?ba*yiR&>yBZ7Gi=0CXL6F&kQ@1j<* zhbq`wH(8lrc6iSI&Z-{p#mkgA5ha^AH*Wzs&JD#47p+VwoitmBziYJEr2OUKj9q5MsE!G7Y}QZQ-xL&k8;Ux!wixDy`f6;D|53YKYhCD(<{ABs+a#KGWc0xBlL+Ez#4RZ2bj#bH82DcW#6cCmS(> zyjzh42i@F}nT!-ee%N$vaXC-T3tYdhGGPfuweZfvp%coP@%^dhuZzgccr>XEGDWn< z=CG%8Ge{i-)tl89m%+$|{37EE{ecx=?s!tY4x{(2F~ORajQ_ms{>)%2@^?vt+Vq#u z64DSr2tkGf;x0zxd}59G+e=ES7#Hz^_xxZY%Y12;o)BtBW$2%Xg7SZf#y)#c=2)Cc z{k)ufR?;WM;MS~s`g9|u&N~GCKudxt8wJ>=@6+awk*V@$SyEYuuz9wjzqo~1qk?dH zw^W}(c(5r(aYL!So<|5={~TBMTsfH=AH>8m&ct*l#*uI{1YIaFkrK<$ooekCF=Z?e ze@y4;!{Hdb!s$O+)88TD>g})aUnP4s3-!8h{MJu1 zzLerU+&Ymy%H1YH*Aw@IKTGU$2-*3T${oQ1PTp`v4e zDZ0`hFN@!ont60CQD6{JlvBIPFBb`qeg{;qJG1FSM-^u6`BYwx9v-2SDa}vFKjJ}= z|NdL3vY+ui?JQ^P;f>AL5?hdK8x6VdS3ru)BvZ!)%NX`bEu$Yvh+dF;P0IKZV&jIq@it)tBo(}vRS&}DiC6(FU!)C(ZTA!cf*^E0-o745r zv^0U~pm)`4gGh3DBkdSXMLRalB2ZllMvgl7+DXY7!BqoYPEEdV3{|&ESO|KIO}{SL zwtNv@bO~N@L*IyXa5qb3%|gSrm0_MTdieIczrd4!sW<*%X#`Vyh7c0;bssGP4Cr4D zk_=S8bePnTyLVE{^zx~frqi3Ci#y?ZBgv(x!qPdu*XjaY1{qj2)Xh>#Q<8(>Tdru3 z3?kB0uBrM!OQ@6KYH6jEHT1}avPP$JkDGsXL1;qN!?8=b{kqY*_etw0bMxHZni(yN z!s~s{;6uw`{P_EU_i3R4YM}Sj`9sTyR#^ns)u2aYYf_!hf0-dxW<1F38T2RGz8Y z@@d-g*b4)wJ7#_9d%^Iw_&mx^?B;o*Lki*lp-K`Zqo9KPC?3Z7R93#i{|7z#M6}WTLy5D*0{~c&b($oL&AHU9%Y)0~rAYLA2kRZ_6 zN@BW^yVvjdP8^>pR5BkY+&*s-x4RjjQ>SDxvNC0Zy@-!y>1}$qsy!HDPg3wIS}M&U zXMuKVKyqO~G7A>fQS`E8NBBNYgON0t4P=`MLIz|l)X=^<)BXIKqT^mjl2(Xg+8-oY zN3FSGTM2r`A>X4W!!nc)q;PjwykH| zMJXO8kEXMfEc2@evRSsfT&;4B`vKPZL?HQ}7Ic>(5aUKILO=)~S^waLV;*>o27`IQT^i*@vh0lR+w5Y z&%e_izDA?!vHPQ2Heav>SbA23W)Mlyo;K%&=H!>?;=`K@zE0!*UEFgcTQ^=iX8f*? z3DQ%%YETLgTP<^E}E^0k8)>02GRFs0uCr?Y~ex+V2U zlIM#07N21v17~heZ;6j*;(J3e@QhfDpR)@e4zEnU6~^T!Y(j&(pR7t)84I7H(%Srh ztf=NSG@X3!Tz^b$-I*hNF&01PFC6I96K)=DDR1%W1gTB&KrDoEJgTUwpoNYGoB!oq?`GrW#0_WUCzHc_Y89`%;6Nq%DCU_b8^)GR*w~PW1fn(e=y@xDLVEtNdqNHSI8MO>U=~#s2moT_W>?)k{s% zCI~*!BW2^Hmq)SsC!40vFWY6KHhm6}kGXFgKeo6<*Bp_)*tV&6YfQ09t*fkjwL-gQ z&DcS^_V-btfv7fz-@*I>_?w7k~<)2zNHTq)o$a zkt~0+A7FZzp;LoZKgnMXBtPVK!YIj4G#O_^l;7oZd=NADfT!5*JlJa{-3>NNHCO>%O9hhjJt}EOlWz5Kczyi1fe*Dz#*PqU@h}p$RF2RyUi1R#@FqjA;&6`mO`ZMWQ(g zoncmUb?zs%eYk~%JG|ZtEwoQ@i6v9)t0im9#q@KXDmJ}A2mEbp^ln&Pshh31GkWP{ zmxrL}a$BbKT*gCX7r5ai$@Jq7{x^nBW|H!tMy$%d#8^LeF5+?#XXfE*+xDQ47}LT` z0Eb$Y1U+{Ec9FWa{(`urbA;Ca9+zx?6xw(TLV^P6Y?z zWc&jp76lPq!~(*`*cubBJ<9$J9iF7G<BNsXV}3-FbwIUNHLk>c^`MYNT{uWC-mKC z$sqo%$zz1^_Ory}J}nEbW7iM)g8M&uY>BUQ$yV5lfT%my!F`XywafLgw?b1wBLv9F zc`b?&g3v}m$XoD>-qn0f=2dXkewmUBpLJ!#z3U=RN1)ljYAwV!zp`ot!*QJC`QN)b zQv8X`sJ@j?jTTBg1LR-lf3ZN0SmtAAzoh364vOjQWQTTG|alpU0JHdu^rW@HO zgPJk8>( zFVZ${o(<<=fM|As%!qBm*f`$xW34bw zlsQ=3n~L~@hBI@o_6HGhFN_hV=iq3>Q9uPHr~=lz;(BR)X_!OD$&=IN>27?ZV7ZE_ zzIS4S`elzVq7z_Y?adiyOORk|n)rQ@1-RC=C@?NXbnhaC$CIDAoyW%|n+$!KJG<;%?nlwEX=1Mx z37xOfVEl(uT>2>}jmT?FJOFiq$oXPl<8*`AXE3eKIX}H(FU!zhWE)%OrvIb)ZF8q#NRVWh(b?p%39FmtuXx(zp^ghVHxCjiU3>TzijCdwwIx`g z-5V)pMY~|NYZF|2{_S$vfr|jo#9~%%TRCVI=}#M7K}J2hEI|b+3h<7bNBu$;)g3e?#R}gKe7ici?p#X_h93PZ0ZlwBu)+t<0ww zoBFlxR&)Ind}wz99rG?WtHu1OOga!eI4he3)aGV!gdb%l6X>S8&^8)ras-PH z(N*y8o}|hg^k`pwNdo?W5nz4iPEzs1t=|9MWw*1VLMJjR>F~@(s7!NiPgqGqx1SsO zQHbq6$kg4`BophJ28X!0n#m=uzxa(MTq1)wHcmBHGDFiDdy~i6iH^LlG>p7rIK7~u zC3adck)*V`g=R7?^5u|Fp2Z%HoQ}Z5w$)XgP_Y_2hZ`n`+ZnuaR$!+DVsuB*QW@6% zvE3|%t=w~vF61Q!xCKwfRZDG`_Iq%XHrnFmQTcFB`%v#-s?S@2g>8Oz@@l?L>r-_O zvEH}spCWzw6=MjJAUw^P>r?4N%}DD@Z9*G8mrzI52UYqzvCkM~0E*x6y-<0v$uu{l z0?6lgE{Sa@dMF6d2n+$EO_$|HgTa_BS-G3Y{#V8t9Z-TbSJ)Ig`?6j$7oKdjB)m$0 zm<6|F$fN$mjQ)vYSZYQ(*L#Ma$MyUsYp1RApD_L`rssslm&>p)gs*Y6F|p6ZOf^WI zD25=0A6dF|CEE;)M%Sb8N@wV>U7PYDG6M#+KLSO4ufB#Eyc@Xjqu*xvO{_>(pl}v@ zg6)Su9;N9*(*JZdAWl`Ac6!~qN%DbuNa#)bjNg^cM88L~K`t@9l&f}c@45_EyEfX_ zBk2CS)&LfaJQ$BZ7}ITkt5coVobhyK!zVKibM^D@LYJ6!pZ~7;lG_Fc{uTXI_T1@aQSKmY7$2ijs=W?u*Q**(!L!NVyENfKVdd_lO^9+?tEd3l4=fX$Gb&diG1H_BMvV~VyQQa8#b!fDq#?C%IqK6q`#!6sxR>=JZY?-w{(kH)eY*&S4NsCJF@M~M?^2d%k zXbqyAAJfaEY&ov}_QkV`wLDZyFMyf23M&l5Mt66;h6p?c6r*dvvSMnOy41GFe3#`l z9H}XNCSWKmbEr}&^D_wk?`~3^PUC}%pz{}N;YV1r;6P`lf)3@kf+!4RO1c1m;nS02 zs}4nM)Ov_F=`2{C&0L>oHbhgKuD~d`5cCj|z0MT4eybj~{`%$!VwobVzGw?dLkUMg z;H+<;eaGLak?(fgiMb|h&cL@$X8KRyvBeO~-;k!nMj+Tr_q%S}>pAaBHGo(eF!oBv zZ@4cF0mh@#!WedrFXpCb@E)5t+TPZ||MOZVL+<=#BFBpAer$eNHJhCk-Llz5)E)M{ z+6iswuVFKKHPb{$+_kLPM9_cy^Dio48gBI&kv-C{uC$u%$cT}=|B6faloNkGhExKP zEmzV0JD92oB-Z@J&~^#Bx-$$+hgcXoUkaLj^%9f;tiuwja+tr?8gZ!a`~4txXc5 z`Y#cGw6TQJ=lqI7fZ7;WzX|(RQPXf~AoKlNtxhN#&-BJDYcL#Tu{0rmvA65$^IDiO zB|ielrBH=^g@xA%TcF?PQvNs>++BL!+JMNx{Wf0s!p!siIk{pv<+J_%1B-?N>9_a^ zV!B<S;|!RV#5 z&F_6}onDEs#-VhZg_D9&k!fxv{EAk#&aGH;F_PyzwK#Qx!gl9Kg3Jr=(P?vy%Owk- z3_+gA>qsFw8rqHIIA%!}Ps0u9#z}~@HcIxs`k}ucA?2xqZ7ryq5jk(6<;*}pgB}I- zn-RCixdX9@>T=0mrW<52(wJjrh`$ zY{xT{0~v#lOxtOiiimbYY`mrYlcc2O7!YP70V@ z!R3cd+^-ty^W%FB{^%a7my(+jvA9u_1Yr=R*uuLWa$49!%F=0bh$6>SDSU~SK_y)m z=U&wDO0qu##L^x0DGdR}l$ALyOHgS=%_W(lnjYq;YcEN;y*)}$k4-f0IfIz=j@?j(>p>2B6F$&5!u^t_j|imM9kvRq z%jOii`GwPMv(v^HhZJ5`HwJag{U;cg@Y_0!cB%v#t zgR5k*w_8hB;zc;TG!;>XP21S40Yq%Xq`}VBtNeYeyQ^{LtvB!Mt4=u0oN#1@QBjcI z@j=2os_T5{F&BTuS1uh$RDeA2ohz+y`WLcLiEhoRvr91$M$VHGR>@O0&8=lxo9zYl7Blx0vDT2?r;kdsS2{9%(%w=89)p zQg@@E)PWk;bMZ1l_fHga8{Q}aK^xy7FUM3Ay~yac?LOU)xkZM9h!mce9qeWo%1pPn zs}+P)xJvwDr*8_d7?Dm!{T8pL$tzR+tWza?KkT07Lia5lu5g;d#)qn9L@60W1z&eE z0Exr2{`-+u-QPUJE7PR*v6{nqTPIS5^V^%Y5syI18Gjj^Rx~9o-*{V40O1yY`^*V5 zT1M|0D^a?EmO>*MhA+QeJ4FUG&`W-4Jdz1PO7!PFXEcLz8aT^Z%SH_~6mI)8Y)@xJ z>$v68v2|z~qlPf0cC`~f+H{6vfYDuv@er)BC5vln!rL=UIhSwv-~1iqM7F8D-sMo2 zujqww8>hJaTnj}VuFlZi7OHR0xLGJ!f?dSU3_JH?H_<=-bNo5QO9Rd;ipZghGq_Rd zG|}P`iU)s6??gL|I6 zfD(4bW<)k4*f{Vfhy16yiJiahbK7G44vo4U5%@iSlXEgb)Qp}sOk3ts<6mLnHeHji z(2DGtt8;=r9g*=UDyfj*B;id58aMtxLnwGQ<>x&lKt)f4e9sFGS#zKVu2mEJ7Ad~G z?5k;351vd#S)fc~EMvz4xkQvvRRKz&iG~=i&mYEE1r2!YqjLQKi86*TiJa{S23vUS zLrVQxFiOy{%ZNb3uu?k=d_`GEmDaLF8e=Am`y*dnGn60%6uP+03&(aIW(50eEIf!T z4vtlssBgiP(P`E|X^!ohpGnC&-H$;>V_N`7H&EP=g?q(PwZZzrTd(Gi=PsX7tVGx< zn#qn+{+7~1VPK^DDi&e1fhj`6Ud9Rr`@U;hmh+LV7^C^IMy}e2)thI?jAHF~rUMQ4<}0qCpy*im1?qpJ#s) z7e~XA6n3advUg%V^Uf~AV2pU{ed&Lji6w$7 zH#%qBD5zp1tdMKCsD}W}8;Va%xH$eF1Fg8=*9jPgT0^KL%9UzP|JBjQYkf(JiZ4w7 zKfJ_pPYgJ26}sX;dD(AzYSvsv0!Ng^b27HO0Rz7P?h2egTd2=YfBlwyxAfMcAeHn4 zZ?;pVV8)NpV|@o7jS`BKQGHD}olB;K3;F^;zye^WtkLkdu4Q?9f2$31RhP>QY#LXo zoQ^9CYix{mD_v-8I^i=vF!W#z6wA096I58Pjv71(lzL86>d^p6}a6JZf!0&P%LXx=J%U!Y{7APoqA8n?ny0BK^QZ(1X8l+%mJ?? zE9Y+}@6EVJDR>13(kT)zyee9`Q*E?-q2(yY1rK1nurZYxpi5)`uRQDMp<8;0(YyZh@vi_ zmn}yhf1MWcw!Di&@cH5Sdg=gFZnW*@gq{QjJ zNBTWwHs=3Dsv+83z;5+RMfQbH8$BVgs1T@ z9B>5#8P&WhRI%ww4_EM(TcXQWr|H^C7!o6;iheYdA9^aN)=QNz;SPGr#TZ!!C#s&p z@ zaY@UaFh9)|c}jDTw<@!LSeB`*`A%9XV~SB3Ngu66l0odWTpzWbuc)ZRj@wY;_&ZmU z`Cnyaz9HvI#c-dSCLfukQjk8vy#g1x5J{`&7)^U4@JA+z!P@r`sR*w!m1K@09?oD> z+(EU7Y|Uw9+hG*Rxc}G^xkf##2#bAtO|?xzHf*`&r=z$Sog>><6nM}z2~f%estZAq zHR;YN4kzjfWL#Nth&253_9^^{A}3RV?eb)JYIb0*1#M3(COrei{e2W!CUKvplfNwK z?CE@e3(M!TM`_IR7b12@`p54{9cy8bSpM#ik##nNwQLPQq%x$%64r0=8 z$hc{gl!JW8QHjYhpu{L#DY}gNSIK}G9KO|^KFVlPHN$VC11+Pq->J&JZ;xJmVY(i+ zyfx9VM3|n7lhy#0Hx>AU5KyG0BY$j<4r9ZcNe|&t7>MLq_{q|btG4OPi3h0vyl$CXK)B#PC!bqX{ollpX zAYk#bG8%O)3b(ldAFG61Bz!n)OL8)^gMq4<7 zY^fqmWIG*_#-kCVYQ#^QC09bfT;x_{ywQ-Y#GS!zY?)zvoUq)G080?3o9F(Yep{yI z_2Mb_KVeq0FIN%PJnhi0Orycmy|T?@XPD;Yjm~Q~Ny{{7wDpH;-UyU)SEh?Xd2o z3F)f}38wdk&Y{*5ur$9fL>K)_tYXZlt*!6fFrCU6yGKJW6xp$;_riQOuulfW@u&Ds%I#lEq01P%#J_Vjb-WCn#IFo7yZGEf(B>Y?-aY>z#NK*g@>kk}XW(48>WZi-W1XGeSxA_XIr! z7YxZGBt0u|Cp?!pkVf z6mIsnZEWuVk|B$~A6zOfT3OzY_}{IE{eh@en`hS>ffNCP4cBa@9NQ@Li6n@W-v{lAYy8P|n_d%qbXeRgv>3eG{mpe7~!DSnYh| zeZ5HWOuQ7-uK&F1;#CL8=oVFWx4zD)jd4Ai961^>G=O_!W_c@qy&F=v2zXa7=@JBV z|J=M6_s=?ct{gD{)OjED`uQG~eE^=W!~y8{zd*Kr;;m2Ag8UfwOnW;<*%W|VyQvsf z$7@RXcxCrHn5E8embb^u?9;O!OT8rqcNgO^6ekwA&j|*Qj5wo@sTkh{u1I~axX*bx zLMN>skf$d~Z}Px5;fcRS$%_eDtU-Qet@ZEddv$(wA6h0Cjy>}qF*lzoP;0AG_jEtp zp6+5^2Trtes@{iO-e>{W!vKYwjsCUGU8D4E#HO~DZp!ZWJb#2|xsrEUWBk z;%Bd<{C@TQ(8%e?=u^bA&13677GHnAH*tT?+j4`O&5UOMDgW1rydhY%l_^rq1%e55@uO8N77r(I4{B7cng_%#6vkCFU*X76iE&D0ALuPDC#2=%VVkhj?^~K! z`qz@|^aX+swgU+gA`(5YV{nG>9lGH`eu5FePat{1RibvHMGkw@wZ4`pCq&3shzQsO z9NTxssbulXICHY)Pi_h~P&6ISGY{ zPvU3G!z^f$9fPB5?q={zw$(Z*3jUY2!=7vGtcP>k=it&7Hv8Z#FmUw3Z!mte>C*OR zHpaOvaaNGu0DQ!mo=%+f_oiXbiTf|t+`qC6cHjL|h}p`uS{-M}GSO%_r-h#bc!ZPW zPwAMe1=_kwN@N?K@S##<+t1Od<9rta1>QLfB{!;ix3T&wiC|^tu5|=VR1%zgb{eWEgTIKM^jZo!tJ2}Ap=-=pXN|46 zHOlKP5Y67uXS6BIyEQIa4Bw2cx|RcMv^o@38r|jH&Br*2RHgly=Hs(9uJ3|hzbJM# zJk$r-S6n4x0%vwZBFJ6`#dsOAuYKaq%a0DbZ|iY#E^h&c zy^+CZ{pPyQPT?F_zry%8u8det`Tsq=gpurt^a=Vqzare2pSriQ#?d{nOO9V%YoU58 z?TW~qL#RgIqH!g?=NCMiR5Z^XrW#Z$iY* z4n~JBt;(B??^Ap7LNeV;T1Q)BzOU!i!~W4PtWT_LAHl5K@^&o4&9Q9|(fN5c%hivQXA;>0y!&zk~ik@yk%5*Ru}H<#*2Wl7nZ)?DyA< zlhG)D$_M{~tc_>tSIidQ3qET9HrfxBO+;auh4{%d|1CD3&)xq2w*3Fcu(z7>0YgUr K;14wn1@(W&x0d1n From 90c989ee5951f14f51ac85608fc449741179c30d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 27 Jul 2019 23:38:21 +0100 Subject: [PATCH 156/383] [stream-refactor] BufferedWriter must disconenct Stream, not Protocol Fix a race where if Stream.on_receive() detects disconnect, it calls Stream.on_disconnect(), which fires Stream 'disconnect' event, whereas if BufferedWriter.on_transmit() detects disconnect, it called Protocol.on_disconnect(), which did not fire the Stream 'disconnect' event. Since mitogen.parent listens on Stream's 'disconnect' event to reap children, this was causing a very difficult to trigger test failure. Triggered after <1000 runs on a Xeon E5530 with hyperthreading using hackbench running at the same priority: $ hackbench -s 1048576 -l 100000000000 -g 4 --- mitogen/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index d6fa8922..a04cf1a0 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1701,7 +1701,7 @@ class BufferedWriter(object): written = self._protocol.stream.transmit_side.write(buf) if not written: _v and LOG.debug('%r.on_transmit(): disconnection detected', self) - self._protocol.on_disconnect(broker) + self._protocol.stream.on_disconnect(broker) return elif written != len(buf): self._buf.appendleft(BufferType(buf, written)) From 4e6aadc40aeeb0c0eebcd181033223c1ad78b285 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 00:27:31 +0100 Subject: [PATCH 157/383] [stream-refactor] fix LogHandler.uncork() race During early initialization under hackbench, it is possible for Broker to be in LogHandler._send() while the main thread has already destroyed _buffer. So we must synchronize them, but only while the handler is corked. --- mitogen/core.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index a04cf1a0..6fc0df47 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1463,6 +1463,9 @@ class LogHandler(logging.Handler): self.context = context self.local = threading.local() self._buffer = [] + # Private synchronization is needed while corked, to ensure no + # concurrent call to _send() exists during uncork(). + self._buffer_lock = threading.Lock() def uncork(self): """ @@ -1470,13 +1473,25 @@ class LogHandler(logging.Handler): possible to route messages, therefore messages are buffered until :meth:`uncork` is called by :class:`ExternalContext`. """ - self._send = self.context.send - for msg in self._buffer: - self._send(msg) - self._buffer = None + self._buffer_lock.acquire() + try: + self._send = self.context.send + for msg in self._buffer: + self._send(msg) + self._buffer = None + finally: + self._buffer_lock.release() def _send(self, msg): - self._buffer.append(msg) + self._buffer_lock.acquire() + try: + if self._buffer is None: + # uncork() may run concurrent to _send() + self._send(msg) + else: + self._buffer.append(msg) + finally: + self._buffer_lock.release() def emit(self, rec): if rec.name == 'mitogen.io' or \ From 93342ba60c4891f31698057164712bcfc9552b22 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 01:07:45 +0100 Subject: [PATCH 158/383] Normalize docstring formatting --- mitogen/core.py | 38 +++++++++++++++++++++++++++----------- mitogen/master.py | 34 +++++++++++++++++++++------------- mitogen/minify.py | 12 ++++++++---- mitogen/parent.py | 4 +++- mitogen/profiler.py | 5 +++-- 5 files changed, 62 insertions(+), 31 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 6fc0df47..16815976 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -337,19 +337,25 @@ class ChannelError(Error): class StreamError(Error): - """Raised when a stream cannot be established.""" + """ + Raised when a stream cannot be established. + """ pass class TimeoutError(Error): - """Raised when a timeout occurs on a stream.""" + """ + Raised when a timeout occurs on a stream. + """ pass def to_text(o): - """Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of + """ + Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of :class:`bytes`, otherwise pass it to the :class:`str` constructor. The - returned object is always a plain :class:`str`, any subclass is removed.""" + returned object is always a plain :class:`str`, any subclass is removed. + """ if isinstance(o, BytesType): return o.decode('utf-8') return UnicodeType(o) @@ -1952,7 +1958,9 @@ class MitogenProtocol(Protocol): return self._writer._len def on_transmit(self, broker): - """Transmit buffered messages.""" + """ + Transmit buffered messages. + """ _vv and IOLOG.debug('%r.on_transmit()', self) self._writer.on_transmit(broker) @@ -1961,12 +1969,16 @@ class MitogenProtocol(Protocol): self._writer.write(msg.pack()) def send(self, msg): - """Send `data` to `handle`, and tell the broker we have output. May - be called from any thread.""" + """ + Send `data` to `handle`, and tell the broker we have output. May be + called from any thread. + """ self._router.broker.defer(self._send, msg) def on_shutdown(self, broker): - """Disable :class:`Protocol` immediate disconnect behaviour.""" + """ + Disable :class:`Protocol` immediate disconnect behaviour. + """ _v and LOG.debug('%r: shutting down', self) @@ -2628,7 +2640,9 @@ class IoLoggerProtocol(DelimitedProtocol): self._log.handlers = logging.getLogger().handlers[:] def on_shutdown(self, broker): - """Shut down the write end of the logging socket.""" + """ + Shut down the write end of the logging socket. + """ _v and LOG.debug('%r: shutting down', self) if not IS_WSL: # #333: WSL generates invalid readiness indication on shutdown(). @@ -2912,8 +2926,10 @@ class Router(object): del self._handle_map[handle] def on_shutdown(self, broker): - """Called during :meth:`Broker.shutdown`, informs callbacks registered - with :meth:`add_handle_cb` the connection is dead.""" + """ + Called during :meth:`Broker.shutdown`, informs callbacks registered + with :meth:`add_handle_cb` the connection is dead. + """ _v and LOG.debug('%r: shutting down', self, broker) fire(self, 'shutdown') for handle, (persist, fn) in self._handle_map.iteritems(): diff --git a/mitogen/master.py b/mitogen/master.py index 71c6085b..909c3cef 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -91,7 +91,8 @@ RLOG = logging.getLogger('mitogen.ctx') def _stdlib_paths(): - """Return a set of paths from which Python imports the standard library. + """ + Return a set of paths from which Python imports the standard library. """ attr_candidates = [ 'prefix', @@ -111,8 +112,8 @@ def _stdlib_paths(): def is_stdlib_name(modname): - """Return :data:`True` if `modname` appears to come from the standard - library. + """ + Return :data:`True` if `modname` appears to come from the standard library. """ if imp.is_builtin(modname) != 0: return True @@ -139,7 +140,8 @@ def is_stdlib_path(path): def get_child_modules(path): - """Return the suffixes of submodules directly neated beneath of the package + """ + Return the suffixes of submodules directly neated beneath of the package directory at `path`. :param str path: @@ -301,8 +303,10 @@ class ThreadWatcher(object): @classmethod def _reset(cls): - """If we have forked since the watch dictionaries were initialized, all - that has is garbage, so clear it.""" + """ + If we have forked since the watch dictionaries were initialized, all + that has is garbage, so clear it. + """ if os.getpid() != cls._cls_pid: cls._cls_pid = os.getpid() cls._cls_instances_by_target.clear() @@ -668,7 +672,8 @@ class ModuleFinder(object): ] def get_module_source(self, fullname): - """Given the name of a loaded module `fullname`, attempt to find its + """ + Given the name of a loaded module `fullname`, attempt to find its source code. :returns: @@ -692,9 +697,10 @@ class ModuleFinder(object): return tup def resolve_relpath(self, fullname, level): - """Given an ImportFrom AST node, guess the prefix that should be tacked - on to an alias name to produce a canonical name. `fullname` is the name - of the module in which the ImportFrom appears. + """ + Given an ImportFrom AST node, guess the prefix that should be tacked on + to an alias name to produce a canonical name. `fullname` is the name of + the module in which the ImportFrom appears. """ mod = sys.modules.get(fullname, None) if hasattr(mod, '__path__'): @@ -845,9 +851,11 @@ class ModuleResponder(object): self.blacklist.append(fullname) def neutralize_main(self, path, src): - """Given the source for the __main__ module, try to find where it - begins conditional execution based on a "if __name__ == '__main__'" - guard, and remove any code after that point.""" + """ + Given the source for the __main__ module, try to find where it begins + conditional execution based on a "if __name__ == '__main__'" guard, and + remove any code after that point. + """ match = self.MAIN_RE.search(src) if match: return src[:match.start()] diff --git a/mitogen/minify.py b/mitogen/minify.py index dc9f517c..09fdc4eb 100644 --- a/mitogen/minify.py +++ b/mitogen/minify.py @@ -44,7 +44,8 @@ else: def minimize_source(source): - """Remove comments and docstrings from Python `source`, preserving line + """ + Remove comments and docstrings from Python `source`, preserving line numbers and syntax of empty blocks. :param str source: @@ -62,7 +63,8 @@ def minimize_source(source): def strip_comments(tokens): - """Drop comment tokens from a `tokenize` stream. + """ + Drop comment tokens from a `tokenize` stream. Comments on lines 1-2 are kept, to preserve hashbang and encoding. Trailing whitespace is remove from all lines. @@ -84,7 +86,8 @@ def strip_comments(tokens): def strip_docstrings(tokens): - """Replace docstring tokens with NL tokens in a `tokenize` stream. + """ + Replace docstring tokens with NL tokens in a `tokenize` stream. Any STRING token not part of an expression is deemed a docstring. Indented docstrings are not yet recognised. @@ -119,7 +122,8 @@ def strip_docstrings(tokens): def reindent(tokens, indent=' '): - """Replace existing indentation in a token steam, with `indent`. + """ + Replace existing indentation in a token steam, with `indent`. """ old_levels = [] old_level = 0 diff --git a/mitogen/parent.py b/mitogen/parent.py index 375c5622..737b777f 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1429,7 +1429,9 @@ class Connection(object): self._complete_connection() def on_stream_shutdown(self): - """Request the slave gracefully shut itself down.""" + """ + Request the slave gracefully shut itself down. + """ LOG.debug('%r: requesting child shutdown', self) self.stream.protocol._send( mitogen.core.Message( diff --git a/mitogen/profiler.py b/mitogen/profiler.py index 74bbdb23..e697d599 100644 --- a/mitogen/profiler.py +++ b/mitogen/profiler.py @@ -28,7 +28,8 @@ # !mitogen: minify_safe -"""mitogen.profiler +""" +mitogen.profiler Record and report cProfile statistics from a run. Creates one aggregated output file, one aggregate containing only workers, and one for the top-level process. @@ -152,7 +153,7 @@ def do_stat(tmpdir, sort, *args): def main(): if len(sys.argv) < 2 or sys.argv[1] not in ('record', 'report', 'stat'): - sys.stderr.write(__doc__) + sys.stderr.write(__doc__.lstrip()) sys.exit(1) func = globals()['do_' + sys.argv[1]] From c02358698b79b7cbeae34e5e3aafd86cb58c3d82 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 02:51:56 +0000 Subject: [PATCH 159/383] [stream-refactor] don't abort Connection until all buffers are empty --- mitogen/core.py | 2 +- mitogen/parent.py | 96 +++++++++++++++++++++++++++++++---------------- 2 files changed, 64 insertions(+), 34 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 16815976..6b182c85 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1554,7 +1554,7 @@ class Stream(object): """ buf = self.receive_side.read(self.protocol.read_size) if not buf: - LOG.debug('%r: empty read, disconnecting', self) + LOG.debug('%r: empty read, disconnecting', self.receive_side) return self.on_disconnect(broker) self.protocol.on_receive(broker, buf) diff --git a/mitogen/parent.py b/mitogen/parent.py index 737b777f..6b3da70b 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1227,8 +1227,8 @@ class Connection(object): #: :class:`Process` proc = None - #: :class:`mitogen.core.Stream` - stream = None + #: :class:`mitogen.core.Stream` with sides connected to stdin/stdout. + stdio_stream = None #: If `proc.stderr` is set, referencing either a plain pipe or the #: controlling TTY, this references the corresponding @@ -1264,7 +1264,7 @@ class Connection(object): self._router = router def __repr__(self): - return 'Connection(%r)' % (self.stream,) + return 'Connection(%r)' % (self.stdio_stream,) # Minimised, gzipped, base64'd and passed to 'python -c'. It forks, dups # file descriptor 0 as 100, creates a pipe, then execs a new interpreter @@ -1405,8 +1405,8 @@ class Connection(object): def _complete_connection(self): self.timer.cancel() if not self.exception: - self._router.register(self.context, self.stream) - self.stream.set_protocol( + self._router.register(self.context, self.stdio_stream) + self.stdio_stream.set_protocol( mitogen.core.MitogenProtocol( router=self._router, remote_id=self.context.context_id, @@ -1419,11 +1419,11 @@ class Connection(object): Fail the connection attempt. """ LOG.debug('%s: failing connection due to %r', - self.stream.name, exc) + self.stdio_stream.name, exc) if self.exception is None: self._adorn_eof_error(exc) self.exception = exc - for stream in self.stream, self.stderr_stream: + for stream in self.stdio_stream, self.stderr_stream: if stream and not stream.receive_side.closed: stream.on_disconnect(self._router.broker) self._complete_connection() @@ -1433,24 +1433,52 @@ class Connection(object): Request the slave gracefully shut itself down. """ LOG.debug('%r: requesting child shutdown', self) - self.stream.protocol._send( + self.stdio_stream.protocol._send( mitogen.core.Message( src_id=mitogen.context_id, - dst_id=self.stream.protocol.remote_id, + dst_id=self.stdio_stream.protocol.remote_id, handle=mitogen.core.SHUTDOWN, ) ) eof_error_msg = 'EOF on stream; last 100 lines received:\n' - def on_stream_disconnect(self): - if self.stderr_stream is not None: - self.stderr_stream.on_disconnect(self._router.broker) + def on_stdio_disconnect(self): + """ + Handle stdio stream disconnection by failing the Connection if the + stderr stream has already been closed. Otherwise, wait for it to close + (or timeout), to allow buffered diagnostic logs to be consumed. + + It is normal that when a subprocess aborts, stdio has nothing buffered + when it is closed, thus signalling readability, causing an empty read + (interpreted as indicating disconnection) on the next loop iteration, + even if its stderr pipe has lots of diagnostic logs still buffered in + the kernel. Therefore we must wait for both pipes to indicate they are + empty before triggering connection failure. + """ + stderr = self.stderr_stream + if stderr is None or stderr.receive_side.closed: + self._on_streams_disconnected() + + def on_stderr_disconnect(self): + """ + Inverse of :func:`on_stdio_disconnect`. + """ + if self.stdio_stream.receive_side.closed: + self._on_streams_disconnected() + + def _on_streams_disconnected(self): + """ + When disconnection has been detected for both our streams, cancel the + connection timer, mark the connection failed, and reap the child + process. Do nothing if the timer has already been cancelled, indicating + some existing failure has already been noticed. + """ if not self.timer.cancelled: self.timer.cancel() self._fail_connection(EofError( self.eof_error_msg + get_history( - [self.stream, self.stderr_stream] + [self.stdio_stream, self.stderr_stream] ) )) self.proc._async_reap(self, self._router) @@ -1477,33 +1505,35 @@ class Connection(object): def stderr_stream_factory(self): return self.diag_protocol_class.build_stream() - def _setup_stream(self): - self.stream = self.stream_factory() - self.stream.conn = self - self.stream.name = self.options.name or self._get_name() - self.stream.accept(self.proc.stdout, self.proc.stdin) + def _setup_stdio_stream(self): + stream = self.stream_factory() + stream.conn = self + stream.name = self.options.name or self._get_name() + stream.accept(self.proc.stdout, self.proc.stdin) - mitogen.core.listen(self.stream, 'shutdown', - self.on_stream_shutdown) - mitogen.core.listen(self.stream, 'disconnect', - self.on_stream_disconnect) - self._router.broker.start_receive(self.stream) + mitogen.core.listen(stream, 'shutdown', self.on_stream_shutdown) + mitogen.core.listen(stream, 'disconnect', self.on_stdio_disconnect) + self._router.broker.start_receive(stream) + return stream def _setup_stderr_stream(self): - self.stderr_stream = self.stderr_stream_factory() - self.stderr_stream.conn = self - self.stderr_stream.name = self.options.name or self._get_name() - self.stderr_stream.accept(self.proc.stderr, self.proc.stderr) - self._router.broker.start_receive(self.stderr_stream) + stream = self.stderr_stream_factory() + stream.conn = self + stream.name = self.options.name or self._get_name() + stream.accept(self.proc.stderr, self.proc.stderr) + + mitogen.core.listen(stream, 'disconnect', self.on_stderr_disconnect) + self._router.broker.start_receive(stream) + return stream def _async_connect(self): self._start_timer() - self._setup_stream() + self.stdio_stream = self._setup_stdio_stream() if self.context.name is None: - self.context.name = self.stream.name - self.proc.name = self.stream.name + self.context.name = self.stdio_stream.name + self.proc.name = self.stdio_stream.name if self.proc.stderr: - self._setup_stderr_stream() + self.stderr_stream = self._setup_stderr_stream() def connect(self, context): LOG.debug('%r.connect()', self) @@ -2181,7 +2211,7 @@ class Router(mitogen.core.Router): except mitogen.core.TimeoutError: raise mitogen.core.StreamError(self.connection_timeout_msg) - self.route_monitor.notice_stream(conn.stream) + self.route_monitor.notice_stream(conn.stdio_stream) return context def connect(self, method_name, name=None, **kwargs): From fc57c1d1a09f765c643c4ae68c3af2fe22d4089b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 11:20:39 +0100 Subject: [PATCH 160/383] [stream-refactor] repair preamble_size.py again --- preamble_size.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/preamble_size.py b/preamble_size.py index b2cbd60a..692ad7b1 100644 --- a/preamble_size.py +++ b/preamble_size.py @@ -21,7 +21,7 @@ router = mitogen.master.Router() context = mitogen.parent.Context(router, 0) options = mitogen.ssh.Options(max_message_size=0, hostname='foo') conn = mitogen.ssh.Connection(options, router) -conn.context_id = 123 +conn.context = context print('SSH command size: %s' % (len(' '.join(conn.get_boot_command())),)) print('Preamble size: %s (%.2fKiB)' % ( From 6a106f03ffd3053eda0ab695376b034a55f1c955 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 11:22:34 +0100 Subject: [PATCH 161/383] [stream-refactor] make mitogen-fuse work on Linux --- examples/mitogen-fuse.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/mitogen-fuse.py b/examples/mitogen-fuse.py index d0cd9a3a..c1b17032 100644 --- a/examples/mitogen-fuse.py +++ b/examples/mitogen-fuse.py @@ -241,9 +241,13 @@ def main(router): print('usage: %s ' % sys.argv[0]) sys.exit(1) - blerp = fuse.FUSE( + kwargs = {} + if sys.platform == 'darwin': + kwargs['volname'] = '%s (Mitogen)' % (sys.argv[1],) + + f = fuse.FUSE( operations=Operations(sys.argv[1]), mountpoint=sys.argv[2], foreground=True, - volname='%s (Mitogen)' % (sys.argv[1],), + **kwargs ) From 0e6de532de03ab88a0fafb00cf4ffc51acbb4cb9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 11:35:05 +0100 Subject: [PATCH 162/383] [stream-refactor] fix testlib assertion format string --- tests/testlib.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/testlib.py b/tests/testlib.py index e0ab827d..3eeaa461 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -331,7 +331,9 @@ class TestCase(unittest2.TestCase): for name in counts: assert counts[name] == 1, \ - 'Found %d copies of thread %r running after tests.' % (name,) + 'Found %d copies of thread %r running after tests.' % ( + counts[name], name + ) def _teardown_check_fds(self): mitogen.core.Latch._on_fork() From 1843f183a3857d8feb996d1ece905e48aa6b6bca Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 11:55:53 +0000 Subject: [PATCH 163/383] [stream-refactor] fix flake8 errors --- mitogen/__init__.py | 4 ++-- mitogen/debug.py | 2 +- mitogen/unix.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mitogen/__init__.py b/mitogen/__init__.py index 47fe4d38..5e2e29b6 100644 --- a/mitogen/__init__.py +++ b/mitogen/__init__.py @@ -111,10 +111,10 @@ def main(log_level='INFO', profiling=_default_profiling): if profiling: mitogen.core.enable_profiling() mitogen.master.Router.profiling = profiling - utils.log_to_file(level=log_level) + mitogen.utils.log_to_file(level=log_level) return mitogen.core._profile_hook( 'app.main', - utils.run_with_router, + mitogen.utils.run_with_router, func, ) return wrapper diff --git a/mitogen/debug.py b/mitogen/debug.py index 3d13347f..dbab550e 100644 --- a/mitogen/debug.py +++ b/mitogen/debug.py @@ -230,7 +230,7 @@ class ContextDebugger(object): def _handle_debug_msg(self, msg): try: method, args, kwargs = msg.unpickle() - msg.reply(getattr(cls, method)(*args, **kwargs)) + msg.reply(getattr(self, method)(*args, **kwargs)) except Exception: e = sys.exc_info()[1] msg.reply(mitogen.core.CallError(e)) diff --git a/mitogen/unix.py b/mitogen/unix.py index 7be4a464..1f48c6b2 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -85,7 +85,7 @@ class Listener(mitogen.core.Protocol): path = make_socket_path() sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) if os.path.exists(path) and is_path_dead(path): - LOG.debug('%r: deleting stale %r', self, path) + LOG.debug('%r: deleting stale %r', cls.__name__, path) os.unlink(path) sock.bind(path) From f45d8eae660bb4c3311ba3baa53d6a3f390dd72b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 13:23:34 +0100 Subject: [PATCH 164/383] [stream-refactor] replace cutpaste with Stream.accept() in mitogen.unix --- mitogen/unix.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mitogen/unix.py b/mitogen/unix.py index 1f48c6b2..c0d2bb9c 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -148,9 +148,7 @@ class Listener(mitogen.core.Protocol): stream.name = u'unix_client.%d' % (pid,) stream.protocol.auth_id = mitogen.context_id stream.protocol.is_privileged = True - side = mitogen.core.Side(stream, sock) - stream.receive_side = side - stream.transmit_side = side + stream.accept(sock, sock) LOG.debug('%r: accepted %r', self, stream) self._router.register(context, stream) @@ -167,9 +165,7 @@ def _connect(path, broker, sock): router = mitogen.master.Router(broker=broker) stream = mitogen.core.MitogenProtocol.build_stream(router, remote_id) - side = mitogen.core.Side(stream, sock) - stream.transmit_side = side - stream.receive_side = side + stream.accept(sock, sock) stream.name = u'unix_listener.%d' % (pid,) mitogen.core.listen(stream, 'disconnect', _cleanup) From 87440ec6f7a27d6bc89cbbf6c8b581149197cdc2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 13:58:53 +0000 Subject: [PATCH 165/383] [stream-refactor] Debian Docker container image initctl --- tests/image_prep/build_docker_images.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/image_prep/build_docker_images.py b/tests/image_prep/build_docker_images.py index 9fc89c05..76564297 100755 --- a/tests/image_prep/build_docker_images.py +++ b/tests/image_prep/build_docker_images.py @@ -26,7 +26,13 @@ label_by_id = {} for base_image, label in [ ('astj/centos5-vault', 'centos5'), # Python 2.4.3 - ('debian:stretch', 'debian'), # Python 2.7.13, 3.5.3 + # Debian containers later than debuerreotype/debuerreotype#48 no longer + # ship a stub 'initctl', causing (apparently) the Ansible service + # module run at the end of DebOps to trigger a full stop/start of SSHd. + # When SSHd is killed, Docker responds by destroying the container. + # Proper solution is to include a full /bin/init; Docker --init doesn't + # help. In the meantime, just use a fixed older version. + ('debian:stretch-20181112', 'debian'), # Python 2.7.13, 3.5.3 ('centos:6', 'centos6'), # Python 2.6.6 ('centos:7', 'centos7') # Python 2.7.5 ]: From 14f8f00d4db9d0ea1b0d3af5409f3c699b1fb298 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 14:24:23 +0000 Subject: [PATCH 166/383] [stream-refactor] mark py24 as allow-fail This needs a day or two's worth of soaking to fix all the remaining nits --- .travis.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.travis.yml b/.travis.yml index 24525bb1..b8ae0c62 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,6 +29,11 @@ script: # newest->oldest in various configuartions. matrix: + allow_failures: + # Python 2.4 tests are still unreliable + - language: c + env: MODE=mitogen_py24 DISTRO=centos5 + include: # Mitogen tests. # 2.4 -> 2.4 From 6da991fae961b7f04fa2687f59e0b2e2b556d1e2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 15:56:45 +0100 Subject: [PATCH 167/383] [stream-refactor] Py3.x test fixes --- tests/data/stubs/stub-su.py | 4 ++-- tests/first_stage_test.py | 2 +- tests/parent_test.py | 8 +++++++- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/data/stubs/stub-su.py b/tests/data/stubs/stub-su.py index 6782490c..1f5e512d 100755 --- a/tests/data/stubs/stub-su.py +++ b/tests/data/stubs/stub-su.py @@ -9,9 +9,9 @@ import time # #363: old input loop would fail to spot auth failure because of scheduling # vs. su calling write() twice. if 'DO_SLOW_AUTH_FAILURE' in os.environ: - os.write(2, 'su: ') + os.write(2, u'su: '.encode()) time.sleep(0.5) - os.write(2, 'incorrect password\n') + os.write(2, u'incorrect password\n'.encode()) os._exit(1) diff --git a/tests/first_stage_test.py b/tests/first_stage_test.py index 20d7fd1b..53f98373 100644 --- a/tests/first_stage_test.py +++ b/tests/first_stage_test.py @@ -41,7 +41,7 @@ class CommandLineTest(testlib.RouterMixin, testlib.TestCase): stdout, stderr = proc.communicate() self.assertEquals(0, proc.returncode) self.assertEquals(stdout, - mitogen.parent.BootstrapProtocol.EC0_MARKER+'\n') + mitogen.parent.BootstrapProtocol.EC0_MARKER+b('\n')) self.assertIn(b("Error -5 while decompressing data"), stderr) finally: fp.close() diff --git a/tests/parent_test.py b/tests/parent_test.py index 1accf222..7ac482c5 100644 --- a/tests/parent_test.py +++ b/tests/parent_test.py @@ -14,6 +14,11 @@ from testlib import Popen__terminate import mitogen.parent +try: + file +except NameError: + from io import FileIO as file + def wait_for_child(pid, timeout=1.0): deadline = time.time() + timeout @@ -226,7 +231,8 @@ class TtyCreateChildTest(testlib.TestCase): ]) deadline = time.time() + 5.0 mitogen.core.set_block(proc.stdin.fileno()) - self.assertEquals(mitogen.core.b('hi\n'), proc.stdin.read()) + # read(3) below due to https://bugs.python.org/issue37696 + self.assertEquals(mitogen.core.b('hi\n'), proc.stdin.read(3)) waited_pid, status = os.waitpid(proc.pid, 0) self.assertEquals(proc.pid, waited_pid) self.assertEquals(0, status) From c0513425ca98f86128dbd2f70198c651990b5797 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 16:46:40 +0100 Subject: [PATCH 168/383] core: more concise Side.repr. --- mitogen/core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index 6b182c85..626d5297 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1785,7 +1785,10 @@ class Side(object): set_nonblock(self.fd) def __repr__(self): - return '' % (self.stream, self.fd) + return '' % ( + self.stream.name or repr(self.stream), + self.fd + ) @classmethod def _on_fork(cls): From 2ccdeeeb87c41f30dad36ad3f242317b16a36e7d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 18:50:15 +0100 Subject: [PATCH 169/383] parent: tidy up create_socketpair() --- mitogen/parent.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 6b3da70b..c46b104a 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -260,13 +260,13 @@ def create_socketpair(size=None): ``stdout``. As they are sockets their buffers are tunable, allowing large buffers to improve file transfer throughput and reduce IO loop iterations. """ + if size is None: + size = mitogen.core.CHUNK_SIZE + parentfp, childfp = socket.socketpair() - parentfp.setsockopt(socket.SOL_SOCKET, - socket.SO_SNDBUF, - size or mitogen.core.CHUNK_SIZE) - childfp.setsockopt(socket.SOL_SOCKET, - socket.SO_RCVBUF, - size or mitogen.core.CHUNK_SIZE) + for fp in parentfp, childfp: + fp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size) + return parentfp, childfp From 6b8a7cbcc4762dd84d1adda78eb5312bfed2e607 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 18:50:49 +0100 Subject: [PATCH 170/383] [stream-refactor] parent: fix crash on graceful shutdown Now it's possible for stream.protocol to not refer to MitogenProtocol, move the signal handler to a MitogenProtocol subclass instead. Fixes a crash where CTRL+C during child bootstrap would print AttributeError. --- mitogen/parent.py | 51 +++++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index c46b104a..97ec4949 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -638,10 +638,10 @@ def _upgrade_broker(broker): # This function is deadly! The act of calling start_receive() generates log # messages which must be silenced as the upgrade progresses, otherwise the # poller state will change as it is copied, resulting in write fds that are - # lost. (Due to LogHandler->Router->Stream->Broker->Poller, where Stream - # only calls start_transmit() when transitioning from empty to non-empty - # buffer. If the start_transmit() is lost, writes from the child hang - # permanently). + # lost. (Due to LogHandler->Router->Stream->Protocol->Broker->Poller, where + # Stream only calls start_transmit() when transitioning from empty to + # non-empty buffer. If the start_transmit() is lost, writes from the child + # hang permanently). root = logging.getLogger() old_level = root.level root.setLevel(logging.CRITICAL) @@ -810,7 +810,8 @@ class CallSpec(object): class PollPoller(mitogen.core.Poller): """ Poller based on the POSIX poll(2) interface. Not available on some versions - of OS X, otherwise it is the preferred poller for small FD counts. + of OS X, otherwise it is the preferred poller for small FD counts, as there + is no setup/teardown/configuration system call overhead. """ SUPPORTED = hasattr(select, 'poll') _repr = 'PollPoller()' @@ -1106,8 +1107,8 @@ class BootstrapProtocol(RegexProtocol): """ Respond to stdout of a child during bootstrap. Wait for EC0_MARKER to be written by the first stage to indicate it can receive the bootstrap, then - await EC1_MARKER to indicate success, and - :class:`mitogen.core.MitogenProtocol` can be enabled. + await EC1_MARKER to indicate success, and :class:`MitogenProtocol` can be + enabled. """ #: Sentinel value emitted by the first stage to indicate it is ready to #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have @@ -1161,6 +1162,26 @@ class LogProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): LOG.info(u'%s: %s', self.stream.name, line.decode('utf-8', 'replace')) +class MitogenProtocol(mitogen.core.MitogenProtocol): + """ + Extend core.MitogenProtocol to cause SHUTDOWN to be sent to the child + during graceful shutdown. + """ + def on_shutdown(self, broker): + """ + Respond to the broker's request for the stream to shut down by sending + SHUTDOWN to the child. + """ + LOG.debug('%r: requesting child shutdown', self) + self._send( + mitogen.core.Message( + src_id=mitogen.context_id, + dst_id=self.remote_id, + handle=mitogen.core.SHUTDOWN, + ) + ) + + class Options(object): name = None @@ -1407,7 +1428,7 @@ class Connection(object): if not self.exception: self._router.register(self.context, self.stdio_stream) self.stdio_stream.set_protocol( - mitogen.core.MitogenProtocol( + MitogenProtocol( router=self._router, remote_id=self.context.context_id, ) @@ -1428,19 +1449,6 @@ class Connection(object): stream.on_disconnect(self._router.broker) self._complete_connection() - def on_stream_shutdown(self): - """ - Request the slave gracefully shut itself down. - """ - LOG.debug('%r: requesting child shutdown', self) - self.stdio_stream.protocol._send( - mitogen.core.Message( - src_id=mitogen.context_id, - dst_id=self.stdio_stream.protocol.remote_id, - handle=mitogen.core.SHUTDOWN, - ) - ) - eof_error_msg = 'EOF on stream; last 100 lines received:\n' def on_stdio_disconnect(self): @@ -1511,7 +1519,6 @@ class Connection(object): stream.name = self.options.name or self._get_name() stream.accept(self.proc.stdout, self.proc.stdin) - mitogen.core.listen(stream, 'shutdown', self.on_stream_shutdown) mitogen.core.listen(stream, 'disconnect', self.on_stdio_disconnect) self._router.broker.start_receive(stream) return stream From 9035884c77617ce0fd59519198adb2f786f80ecf Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 28 Jul 2018 18:57:30 -0700 Subject: [PATCH 171/383] ansible: abstract worker process model. Move all details of broker/router setup out of connection.py, instead deferring it to a WorkerModel class exported by process.py via get_worker_model(). The running strategy can override the configured worker model via _get_worker_model(). ClassicWorkerModel is installed by default, which implements the extension's existing process model. Add optional support for the third party setproctitle module, so children have pretty names in ps output. Add optional support for per-CPU multiplexers to classic runs. --- ansible_mitogen/connection.py | 102 ++- ansible_mitogen/planner.py | 13 +- .../plugins/connection/mitogen_local.py | 2 +- ansible_mitogen/process.py | 579 +++++++++++++----- ansible_mitogen/services.py | 6 +- ansible_mitogen/strategy.py | 139 +++-- mitogen/core.py | 6 +- mitogen/parent.py | 17 + mitogen/service.py | 35 +- tests/ansible/run_ansible_playbook.py | 6 +- 10 files changed, 623 insertions(+), 282 deletions(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index 42fa2ef8..4d310d75 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -38,6 +38,7 @@ import sys import time import jinja2.runtime +from ansible.module_utils import six import ansible.constants as C import ansible.errors import ansible.plugins.connection @@ -459,15 +460,10 @@ class CallChain(mitogen.parent.CallChain): class Connection(ansible.plugins.connection.ConnectionBase): - #: mitogen.master.Broker for this worker. - broker = None - - #: mitogen.master.Router for this worker. - router = None - - #: mitogen.parent.Context representing the parent Context, which is - #: presently always the connection multiplexer process. - parent = None + #: The :class:`ansible_mitogen.process.Binding` representing the connection + #: multiplexer this connection's target is assigned to. :data:`None` when + #: disconnected. + binding = None #: mitogen.parent.Context for the target account on the target, possibly #: reached via become. @@ -518,13 +514,6 @@ class Connection(ansible.plugins.connection.ConnectionBase): #: matching vanilla Ansible behaviour. loader_basedir = None - def __init__(self, play_context, new_stdin, **kwargs): - assert ansible_mitogen.process.MuxProcess.unix_listener_path, ( - 'Mitogen connection types may only be instantiated ' - 'while the "mitogen" strategy is active.' - ) - super(Connection, self).__init__(play_context, new_stdin) - def __del__(self): """ Ansible cannot be trusted to always call close() e.g. the synchronize @@ -585,6 +574,15 @@ class Connection(ansible.plugins.connection.ConnectionBase): self._connect() return self.init_child_result['home_dir'] + def get_binding(self): + """ + Return the :class:`ansible_mitogen.process.Binding` representing the + process that hosts the physical connection and services (context + establishment, file transfer, ..) for our desired target. + """ + assert self.binding is not None + return self.binding + @property def connected(self): return self.context is not None @@ -672,18 +670,6 @@ class Connection(ansible.plugins.connection.ConnectionBase): return stack - def _connect_broker(self): - """ - Establish a reference to the Broker, Router and parent context used for - connections. - """ - if not self.broker: - self.broker = mitogen.master.Broker() - self.router, self.parent = mitogen.unix.connect( - path=ansible_mitogen.process.MuxProcess.unix_listener_path, - broker=self.broker, - ) - def _build_stack(self): """ Construct a list of dictionaries representing the connection @@ -691,14 +677,14 @@ class Connection(ansible.plugins.connection.ConnectionBase): additionally used by the integration tests "mitogen_get_stack" action to fetch the would-be connection configuration. """ - return self._stack_from_spec( - ansible_mitogen.transport_config.PlayContextSpec( - connection=self, - play_context=self._play_context, - transport=self.transport, - inventory_name=self.inventory_hostname, - ) + spec = ansible_mitogen.transport_config.PlayContextSpec( + connection=self, + play_context=self._play_context, + transport=self.transport, + inventory_name=self.inventory_hostname, ) + stack = self._stack_from_spec(spec) + return spec.inventory_name(), stack def _connect_stack(self, stack): """ @@ -711,7 +697,8 @@ class Connection(ansible.plugins.connection.ConnectionBase): description of the returned dictionary. """ try: - dct = self.parent.call_service( + dct = mitogen.service.call( + call_context=self.binding.get_service_context(), service_name='ansible_mitogen.services.ContextService', method_name='get', stack=mitogen.utils.cast(list(stack)), @@ -758,8 +745,9 @@ class Connection(ansible.plugins.connection.ConnectionBase): if self.connected: return - self._connect_broker() - stack = self._build_stack() + inventory_name, stack = self._build_stack() + worker_model = ansible_mitogen.process.get_worker_model() + self.binding = worker_model.get_binding(inventory_name) self._connect_stack(stack) def _mitogen_reset(self, mode): @@ -776,9 +764,10 @@ class Connection(ansible.plugins.connection.ConnectionBase): return self.chain.reset() - self.parent.call_service( + mitogen.service.call( + call_context=self.binding.get_service_context(), service_name='ansible_mitogen.services.ContextService', - method_name=mode, + method_name='put', context=self.context ) @@ -787,27 +776,6 @@ class Connection(ansible.plugins.connection.ConnectionBase): self.init_child_result = None self.chain = None - def _shutdown_broker(self): - """ - Shutdown the broker thread during :meth:`close` or :meth:`reset`. - """ - if self.broker: - self.broker.shutdown() - self.broker.join() - self.broker = None - self.router = None - - # #420: Ansible executes "meta" actions in the top-level process, - # meaning "reset_connection" will cause :class:`mitogen.core.Latch` - # FDs to be cached and erroneously shared by children on subsequent - # WorkerProcess forks. To handle that, call on_fork() to ensure any - # shared state is discarded. - # #490: only attempt to clean up when it's known that some - # resources exist to cleanup, otherwise later __del__ double-call - # to close() due to GC at random moment may obliterate an unrelated - # Connection's resources. - mitogen.fork.on_fork() - def close(self): """ Arrange for the mitogen.master.Router running in the worker to @@ -815,7 +783,9 @@ class Connection(ansible.plugins.connection.ConnectionBase): multiple times. """ self._mitogen_reset(mode='put') - self._shutdown_broker() + if self.binding: + self.binding.close() + self.binding = None def _reset_find_task_vars(self): """ @@ -853,7 +823,8 @@ class Connection(ansible.plugins.connection.ConnectionBase): self._connect() self._mitogen_reset(mode='reset') - self._shutdown_broker() + self.binding.close() + self.binding = None # Compatibility with Ansible 2.4 wait_for_connection plug-in. _reset = reset @@ -1024,7 +995,8 @@ class Connection(ansible.plugins.connection.ConnectionBase): utimes=(st.st_atime, st.st_mtime)) self._connect() - self.parent.call_service( + mitogen.service.call( + call_context=self.binding.get_service_context(), service_name='mitogen.service.FileService', method_name='register', path=mitogen.utils.cast(in_path) @@ -1036,7 +1008,7 @@ class Connection(ansible.plugins.connection.ConnectionBase): # file alive, but that requires more work. self.get_chain().call( ansible_mitogen.target.transfer_file, - context=self.parent, + context=self.binding.get_child_service_context(), in_path=in_path, out_path=out_path ) diff --git a/ansible_mitogen/planner.py b/ansible_mitogen/planner.py index 2eebd36d..96b06995 100644 --- a/ansible_mitogen/planner.py +++ b/ansible_mitogen/planner.py @@ -148,6 +148,8 @@ class Planner(object): # named by `runner_name`. } """ + binding = self._inv.connection.get_binding() + new = dict((mitogen.core.UnicodeType(k), kwargs[k]) for k in kwargs) new.setdefault('good_temp_dir', @@ -155,7 +157,7 @@ class Planner(object): new.setdefault('cwd', self._inv.connection.get_default_cwd()) new.setdefault('extra_env', self._inv.connection.get_default_env()) new.setdefault('emulate_tty', True) - new.setdefault('service_context', self._inv.connection.parent) + new.setdefault('service_context', binding.get_child_service_context()) return new def __repr__(self): @@ -328,7 +330,9 @@ class NewStylePlanner(ScriptPlanner): def get_module_map(self): if self._module_map is None: - self._module_map = self._inv.connection.parent.call_service( + binding = self._inv.connection.get_binding() + self._module_map = mitogen.service.call( + call_context=binding.get_service_context(), service_name='ansible_mitogen.services.ModuleDepService', method_name='scan', @@ -405,9 +409,12 @@ def get_module_data(name): def _propagate_deps(invocation, planner, context): - invocation.connection.parent.call_service( + binding = invocation.connection.get_binding() + mitogen.service.call( + call_context=binding.get_service_context(), service_name='mitogen.service.PushFileService', method_name='propagate_paths_and_modules', + context=context, paths=planner.get_push_files(), modules=planner.get_module_deps(), diff --git a/ansible_mitogen/plugins/connection/mitogen_local.py b/ansible_mitogen/plugins/connection/mitogen_local.py index 24b84a03..a98c834c 100644 --- a/ansible_mitogen/plugins/connection/mitogen_local.py +++ b/ansible_mitogen/plugins/connection/mitogen_local.py @@ -81,6 +81,6 @@ class Connection(ansible_mitogen.connection.Connection): from WorkerProcess, we must emulate that. """ return dict_diff( - old=ansible_mitogen.process.MuxProcess.original_env, + old=ansible_mitogen.process.MuxProcess.cls_original_env, new=os.environ, ) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index a8827cb1..0eaf25a7 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -30,6 +30,7 @@ from __future__ import absolute_import import atexit import errno import logging +import multiprocessing import os import signal import socket @@ -41,9 +42,15 @@ try: except ImportError: faulthandler = None +try: + import setproctitle +except ImportError: + setproctitle = None + import mitogen import mitogen.core import mitogen.debug +import mitogen.fork import mitogen.master import mitogen.parent import mitogen.service @@ -52,6 +59,7 @@ import mitogen.utils import ansible import ansible.constants as C +import ansible.errors import ansible_mitogen.logging import ansible_mitogen.services @@ -66,28 +74,55 @@ ANSIBLE_PKG_OVERRIDE = ( u"__author__ = %r\n" ) +worker_model_msg = ( + 'Mitogen connection types may only be instantiated when one of the ' + '"mitogen_*" or "operon_*" strategies are active.' +) + +#: The worker model as configured by the currently running strategy. This is +#: managed via :func:`get_worker_model` / :func:`set_worker_model` functions by +#: :class:`StrategyMixin`. +_worker_model = None + + +#: A copy of the sole :class:`ClassicWorkerModel` that ever exists during a +#: classic run, as return by :func:`get_classic_worker_model`. +_classic_worker_model = None + + +def set_worker_model(model): + """ + To remove process model-wiring from + :class:`ansible_mitogen.connection.Connection`, it is necessary to track + some idea of the configured execution environment outside the connection + plug-in. -def clean_shutdown(sock): + That is what :func:`set_worker_model` and :func:`get_worker_model` are for. """ - Shut the write end of `sock`, causing `recv` in the worker process to wake - up with a 0-byte read and initiate mux process exit, then wait for a 0-byte - read from the read end, which will occur after the the child closes the - descriptor on exit. - - This is done using :mod:`atexit` since Ansible lacks any more sensible hook - to run code during exit, and unless some synchronization exists with - MuxProcess, debug logs may appear on the user's terminal *after* the prompt - has been printed. + global _worker_model + assert model is None or _worker_model is None + _worker_model = model + + +def get_worker_model(): """ - try: - sock.shutdown(socket.SHUT_WR) - except socket.error: - # Already closed. This is possible when tests are running. - LOG.debug('clean_shutdown: ignoring duplicate call') - return + Return the :class:`WorkerModel` currently configured by the running + strategy. + """ + if _worker_model is None: + raise ansible.errors.AnsibleConnectionFailure(worker_model_msg) + return _worker_model - sock.recv(1) - sock.close() + +def get_classic_worker_model(): + """ + Return the single :class:`ClassicWorkerModel` instance, constructing it if + necessary. + """ + global _classic_worker_model + if _classic_worker_model is None: + _classic_worker_model = ClassicWorkerModel() + return _classic_worker_model def getenv_int(key, default=0): @@ -119,6 +154,330 @@ def save_pid(name): fp.write(str(os.getpid())) +def setup_pool(pool): + """ + Configure a connection multiplexer's :class:`mitogen.service.Pool` with + services accessed by clients and WorkerProcesses. + """ + pool.add(mitogen.service.FileService(router=pool.router)) + pool.add(mitogen.service.PushFileService(router=pool.router)) + pool.add(ansible_mitogen.services.ContextService(router=pool.router)) + pool.add(ansible_mitogen.services.ModuleDepService(pool.router)) + LOG.debug('Service pool configured: size=%d', pool.size) + + +def _setup_simplejson(responder): + """ + We support serving simplejson for Python 2.4 targets on Ansible 2.3, at + least so the package's own CI Docker scripts can run without external + help, however newer versions of simplejson no longer support Python + 2.4. Therefore override any installed/loaded version with a + 2.4-compatible version we ship in the compat/ directory. + """ + responder.whitelist_prefix('simplejson') + + # issue #536: must be at end of sys.path, in case existing newer + # version is already loaded. + compat_path = os.path.join(os.path.dirname(__file__), 'compat') + sys.path.append(compat_path) + + for fullname, is_pkg, suffix in ( + (u'simplejson', True, '__init__.py'), + (u'simplejson.decoder', False, 'decoder.py'), + (u'simplejson.encoder', False, 'encoder.py'), + (u'simplejson.scanner', False, 'scanner.py'), + ): + path = os.path.join(compat_path, 'simplejson', suffix) + fp = open(path, 'rb') + try: + source = fp.read() + finally: + fp.close() + + responder.add_source_override( + fullname=fullname, + path=path, + source=source, + is_pkg=is_pkg, + ) + + +def _setup_responder(responder): + """ + Configure :class:`mitogen.master.ModuleResponder` to only permit + certain packages, and to generate custom responses for certain modules. + """ + responder.whitelist_prefix('ansible') + responder.whitelist_prefix('ansible_mitogen') + _setup_simplejson(responder) + + # Ansible 2.3 is compatible with Python 2.4 targets, however + # ansible/__init__.py is not. Instead, executor/module_common.py writes + # out a 2.4-compatible namespace package for unknown reasons. So we + # copy it here. + responder.add_source_override( + fullname='ansible', + path=ansible.__file__, + source=(ANSIBLE_PKG_OVERRIDE % ( + ansible.__version__, + ansible.__author__, + )).encode(), + is_pkg=True, + ) + + +def common_setup(_init_logging=True): + save_pid('controller') + ansible_mitogen.logging.set_process_name('top') + ansible_mitogen.affinity.policy.assign_controller() + + mitogen.utils.setup_gil() + if _init_logging: + ansible_mitogen.logging.setup() + + if faulthandler is not None: + faulthandler.enable() + + MuxProcess.profiling = getenv_int('MITOGEN_PROFILING') > 0 + if MuxProcess.profiling: + mitogen.core.enable_profiling() + + MuxProcess.cls_original_env = dict(os.environ) + + +def get_cpu_count(default=None): + """ + Get the multiplexer CPU count from the MITOGEN_CPU_COUNT environment + variable, returning `default` if one isn't set, or is out of range. + + :param int default: + Default CPU, or :data:`None` to use all available CPUs. + """ + max_cpus = multiprocessing.cpu_count() + if default is None: + default = max_cpus + + cpu_count = getenv_int('MITOGEN_CPU_COUNT', default=default) + if cpu_count < 1 or cpu_count > max_cpus: + cpu_count = default + + return cpu_count + + +class Binding(object): + def get_child_service_context(self): + """ + Return the :class:`mitogen.core.Context` to which children should + direct ContextService requests, or :data:`None` for the local process. + """ + raise NotImplementedError() + + def get_service_context(self): + """ + Return the :class:`mitogen.core.Context` to which this process should + direct ContextService requests, or :data:`None` for the local process. + """ + raise NotImplementedError() + + def close(self): + """ + Finalize any associated resources. + """ + raise NotImplementedError() + + +class WorkerModel(object): + def on_strategy_start(self): + """ + Called prior to strategy start in the top-level process. Responsible + for preparing any worker/connection multiplexer state. + """ + raise NotImplementedError() + + def on_strategy_complete(self): + """ + Called after strategy completion in the top-level process. Must place + Ansible back in a "compatible" state where any other strategy plug-in + may execute. + """ + raise NotImplementedError() + + def get_binding(self, inventory_name): + raise NotImplementedError() + + +class ClassicBinding(Binding): + """ + Only one connection may be active at a time in a classic worker, so its + binding just provides forwarders back to :class:`ClassicWorkerModel`. + """ + def __init__(self, model): + self.model = model + + def get_service_context(self): + """ + See Binding.get_service_context(). + """ + return self.model.parent + + def get_child_service_context(self): + """ + See Binding.get_child_service_context(). + """ + return self.model.parent + + def close(self): + """ + See Binding.close(). + """ + self.model.on_binding_close() + + +class ClassicWorkerModel(WorkerModel): + #: mitogen.master.Router for this worker. + router = None + + #: mitogen.master.Broker for this worker. + broker = None + + #: Name of multiplexer process socket we are currently connected to. + listener_path = None + + #: mitogen.parent.Context representing the parent Context, which is the + #: connection multiplexer process when running in classic mode, or the + #: top-level process when running a new-style mode. + parent = None + + def __init__(self, _init_logging=True): + self._init_logging = _init_logging + self.initialized = False + + def _listener_for_name(self, name): + """ + Given a connection stack, return the UNIX listener that should be used + to communicate with it. This is a simple hash of the inventory name. + """ + if len(self._muxes) == 1: + return self._muxes[0].path + + idx = abs(hash(name)) % len(self._muxes) + LOG.debug('Picked worker %d: %s', idx, self._muxes[idx].path) + return self._muxes[idx].path + + def _reconnect(self, path): + if self.router is not None: + # Router can just be overwritten, but the previous parent + # connection must explicitly be removed from the broker first. + self.router.disconnect(self.parent) + self.parent = None + self.router = None + + self.router, self.parent = mitogen.unix.connect( + path=path, + broker=self.broker, + ) + self.listener_path = path + + def on_process_exit(self, sock): + """ + This is an :mod:`atexit` handler installed in the top-level process. + + Shut the write end of `sock`, causing the receive side of the socket in + every worker process to wake up with a 0-byte reads, and causing their + main threads to wake up and initiate shutdown. After shutting the + socket down, wait for a 0-byte read from the read end, which will occur + after the last child closes the descriptor on exit. + + This is done using :mod:`atexit` since Ansible lacks any better hook to + run code during exit, and unless some synchronization exists with + MuxProcess, debug logs may appear on the user's terminal *after* the + prompt has been printed. + """ + try: + sock.shutdown(socket.SHUT_WR) + except socket.error: + # Already closed. This is possible when tests are running. + LOG.debug('on_process_exit: ignoring duplicate call') + return + + mitogen.core.io_op(sock.recv, 1) + sock.close() + + def _initialize(self): + """ + Arrange for classic process model connection multiplexer child + processes to be started, if they are not already running. + + The parent process picks a UNIX socket path the child will use prior to + fork, creates a socketpair used essentially as a semaphore, then blocks + waiting for the child to indicate the UNIX socket is ready for use. + + :param bool _init_logging: + For testing, if :data:`False`, don't initialize logging. + """ + common_setup(_init_logging=self._init_logging) + MuxProcess.cls_parent_sock, \ + MuxProcess.cls_child_sock = socket.socketpair() + + mitogen.core.set_cloexec(MuxProcess.cls_parent_sock.fileno()) + mitogen.core.set_cloexec(MuxProcess.cls_child_sock.fileno()) + + self._muxes = [ + MuxProcess(index) + for index in range(get_cpu_count(default=1)) + ] + for mux in self._muxes: + mux.start() + + atexit.register(self.on_process_exit, MuxProcess.cls_parent_sock) + MuxProcess.cls_child_sock.close() + MuxProcess.cls_child_sock = None + + def on_strategy_start(self): + """ + See WorkerModel.on_strategy_start(). + """ + if not self.initialized: + self._initialize() + self.initialized = True + + def on_strategy_complete(self): + """ + See WorkerModel.on_strategy_complete(). + """ + + def get_binding(self, inventory_name): + """ + See WorkerModel.get_binding(). + """ + if self.broker is None: + self.broker = mitogen.master.Broker() + + path = self._listener_for_name(inventory_name) + if path != self.listener_path: + self._reconnect(path) + + return ClassicBinding(self) + + def on_binding_close(self): + if self.broker: + self.broker.shutdown() + self.broker.join() + self.router = None + self.broker = None + + # #420: Ansible executes "meta" actions in the top-level process, + # meaning "reset_connection" will cause :class:`mitogen.core.Latch` + # FDs to be cached and erroneously shared by children on subsequent + # WorkerProcess forks. To handle that, call on_fork() to ensure any + # shared state is discarded. + # #490: only attempt to clean up when it's known that some + # resources exist to cleanup, otherwise later __del__ double-call + # to close() due to GC at random moment may obliterate an unrelated + # Connection's related resources. + mitogen.fork.on_fork() + + class MuxProcess(object): """ Implement a subprocess forked from the Ansible top-level, as a safe place @@ -136,30 +495,27 @@ class MuxProcess(object): See https://bugs.python.org/issue6721 for a thorough description of the class of problems this worker is intended to avoid. """ - #: In the top-level process, this references one end of a socketpair(), - #: which the MuxProcess blocks reading from in order to determine when - #: the master process dies. Once the read returns, the MuxProcess will - #: begin shutting itself down. - worker_sock = None - - #: In the worker process, this references the other end of - #: :py:attr:`worker_sock`. - child_sock = None + #: whose other end child MuxProcesses block reading from to determine when + #: the master process dies. When the top-level exits abnormally, or + #: normally but where :func:`on_process_exit` has been called, this socket + #: will be closed, causing all the children to wake. + cls_parent_sock = None - #: In the top-level process, this is the PID of the single MuxProcess - #: that was spawned. - worker_pid = None + #: In the mux process, this is the other end of :attr:`cls_parent_sock`. + #: The main thread blocks on a read from it until :attr:`cls_parent_sock` + #: is closed. + cls_child_sock = None #: A copy of :data:`os.environ` at the time the multiplexer process was #: started. It's used by mitogen_local.py to find changes made to the #: top-level environment (e.g. vars plugins -- issue #297) that must be #: applied to locally executed commands and modules. - original_env = None + cls_original_env = None - #: In both processes, this is the temporary UNIX socket used for - #: forked WorkerProcesses to contact the MuxProcess - unix_listener_path = None + #: In both processes, this a list of the temporary UNIX sockets used for + #: forked WorkerProcesses to contact the forked mux processes. + cls_listener_paths = None @classmethod def _reset(cls): @@ -171,69 +527,54 @@ class MuxProcess(object): cls.worker_sock = None os.waitpid(cls.worker_pid, 0) - @classmethod - def start(cls, _init_logging=True): - """ - Arrange for the subprocess to be started, if it is not already running. + def __init__(self, index): + self.index = index + #: Individual path of this process. + self.path = mitogen.unix.make_socket_path() - The parent process picks a UNIX socket path the child will use prior to - fork, creates a socketpair used essentially as a semaphore, then blocks - waiting for the child to indicate the UNIX socket is ready for use. - - :param bool _init_logging: - For testing, if :data:`False`, don't initialize logging. - """ - if cls.worker_sock is not None: + def start(self): + pid = os.fork() + if pid: + # Wait for child to boot before continuing. + mitogen.core.io_op(MuxProcess.cls_parent_sock.recv, 1) return - if faulthandler is not None: - faulthandler.enable() - - mitogen.utils.setup_gil() - cls.unix_listener_path = mitogen.unix.make_socket_path() - cls.worker_sock, cls.child_sock = socket.socketpair() - atexit.register(clean_shutdown, cls.worker_sock) - mitogen.core.set_cloexec(cls.worker_sock.fileno()) - mitogen.core.set_cloexec(cls.child_sock.fileno()) - - cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None - if cls.profiling: - mitogen.core.enable_profiling() - if _init_logging: - ansible_mitogen.logging.setup() - - cls.original_env = dict(os.environ) - cls.worker_pid = os.fork() - if cls.worker_pid: - save_pid('controller') - ansible_mitogen.logging.set_process_name('top') - ansible_mitogen.affinity.policy.assign_controller() - cls.child_sock.close() - cls.child_sock = None - mitogen.core.io_op(cls.worker_sock.recv, 1) - else: - save_pid('mux') - ansible_mitogen.logging.set_process_name('mux') - ansible_mitogen.affinity.policy.assign_muxprocess() - cls.worker_sock.close() - cls.worker_sock = None - self = cls() - self.worker_main() + save_pid('mux') + ansible_mitogen.logging.set_process_name('mux:' + str(self.index)) + if setproctitle: + setproctitle.setproctitle('mitogen mux:%s (%s)' % ( + self.index, + os.path.basename(self.path), + )) + + MuxProcess.cls_parent_sock.close() + MuxProcess.cls_parent_sock = None + try: + try: + self.worker_main() + except Exception: + LOG.exception('worker_main() crashed') + finally: + sys.exit() def worker_main(self): """ - The main function of for the mux process: setup the Mitogen broker - thread and ansible_mitogen services, then sleep waiting for the socket + The main function of the mux process: setup the Mitogen broker thread + and ansible_mitogen services, then sleep waiting for the socket connected to the parent to be closed (indicating the parent has died). """ + save_pid('mux') + ansible_mitogen.logging.set_process_name('mux') + ansible_mitogen.affinity.policy.assign_muxprocess() + self._setup_master() self._setup_services() try: # Let the parent know our listening socket is ready. - mitogen.core.io_op(self.child_sock.send, b('1')) + mitogen.core.io_op(self.cls_child_sock.send, b('1')) # Block until the socket is closed, which happens on parent exit. - mitogen.core.io_op(self.child_sock.recv, 1) + mitogen.core.io_op(self.cls_child_sock.recv, 1) finally: self.broker.shutdown() self.broker.join() @@ -252,64 +593,6 @@ class MuxProcess(object): if secs: mitogen.debug.dump_to_logger(secs=secs) - def _setup_simplejson(self, responder): - """ - We support serving simplejson for Python 2.4 targets on Ansible 2.3, at - least so the package's own CI Docker scripts can run without external - help, however newer versions of simplejson no longer support Python - 2.4. Therefore override any installed/loaded version with a - 2.4-compatible version we ship in the compat/ directory. - """ - responder.whitelist_prefix('simplejson') - - # issue #536: must be at end of sys.path, in case existing newer - # version is already loaded. - compat_path = os.path.join(os.path.dirname(__file__), 'compat') - sys.path.append(compat_path) - - for fullname, is_pkg, suffix in ( - (u'simplejson', True, '__init__.py'), - (u'simplejson.decoder', False, 'decoder.py'), - (u'simplejson.encoder', False, 'encoder.py'), - (u'simplejson.scanner', False, 'scanner.py'), - ): - path = os.path.join(compat_path, 'simplejson', suffix) - fp = open(path, 'rb') - try: - source = fp.read() - finally: - fp.close() - - responder.add_source_override( - fullname=fullname, - path=path, - source=source, - is_pkg=is_pkg, - ) - - def _setup_responder(self, responder): - """ - Configure :class:`mitogen.master.ModuleResponder` to only permit - certain packages, and to generate custom responses for certain modules. - """ - responder.whitelist_prefix('ansible') - responder.whitelist_prefix('ansible_mitogen') - self._setup_simplejson(responder) - - # Ansible 2.3 is compatible with Python 2.4 targets, however - # ansible/__init__.py is not. Instead, executor/module_common.py writes - # out a 2.4-compatible namespace package for unknown reasons. So we - # copy it here. - responder.add_source_override( - fullname='ansible', - path=ansible.__file__, - source=(ANSIBLE_PKG_OVERRIDE % ( - ansible.__version__, - ansible.__author__, - )).encode(), - is_pkg=True, - ) - def _setup_master(self): """ Construct a Router, Broker, and mitogen.unix listener @@ -319,12 +602,12 @@ class MuxProcess(object): broker=self.broker, max_message_size=4096 * 1048576, ) - self._setup_responder(self.router.responder) + _setup_responder(self.router.responder) mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown) mitogen.core.listen(self.broker, 'exit', self.on_broker_exit) self.listener = mitogen.unix.Listener.build_stream( router=self.router, - path=self.unix_listener_path, + path=self.path, backlog=C.DEFAULT_FORKS, ) self._enable_router_debug() @@ -337,15 +620,9 @@ class MuxProcess(object): """ self.pool = mitogen.service.Pool( router=self.router, - services=[ - mitogen.service.FileService(router=self.router), - mitogen.service.PushFileService(router=self.router), - ansible_mitogen.services.ContextService(self.router), - ansible_mitogen.services.ModuleDepService(self.router), - ], size=getenv_int('MITOGEN_POOL_SIZE', default=32), ) - LOG.debug('Service pool configured: size=%d', self.pool.size) + setup_pool(self.pool) def on_broker_shutdown(self): """ @@ -364,7 +641,7 @@ class MuxProcess(object): ourself. In future this should gracefully join the pool, but TERM is fine for now. """ - if not self.profiling: + if not os.environ.get('MITOGEN_PROFILING'): # In normal operation we presently kill the process because there is # not yet any way to cancel connect(). When profiling, threads # including the broker must shut down gracefully, otherwise pstats diff --git a/ansible_mitogen/services.py b/ansible_mitogen/services.py index a7c0e46f..a8fde265 100644 --- a/ansible_mitogen/services.py +++ b/ansible_mitogen/services.py @@ -326,6 +326,7 @@ class ContextService(mitogen.service.Service): ) def _send_module_forwards(self, context): + return self.router.responder.forward_modules(context, self.ALWAYS_PRELOAD) _candidate_temp_dirs = None @@ -372,7 +373,7 @@ class ContextService(mitogen.service.Service): try: method = getattr(self.router, spec['method']) except AttributeError: - raise Error('unsupported method: %(transport)s' % spec) + raise Error('unsupported method: %(method)s' % spec) context = method(via=via, unidirectional=True, **spec['kwargs']) if via and spec.get('enable_lru'): @@ -382,6 +383,7 @@ class ContextService(mitogen.service.Service): mitogen.core.listen(context, 'disconnect', lambda: self._on_context_disconnect(context)) + #self._send_module_forwards(context) TODO self._send_module_forwards(context) init_child_result = context.call( ansible_mitogen.target.init_child, @@ -443,7 +445,7 @@ class ContextService(mitogen.service.Service): @mitogen.service.arg_spec({ 'stack': list }) - def get(self, msg, stack): + def get(self, stack): """ Return a Context referring to an established connection with the given configuration, establishing new connections as necessary. diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index 01dff285..a1315cd9 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -31,6 +31,11 @@ import os import signal import threading +try: + import setproctitle +except ImportError: + setproctitle = None + import mitogen.core import ansible_mitogen.affinity import ansible_mitogen.loaders @@ -145,11 +150,17 @@ def wrap_connection_loader__get(name, *args, **kwargs): return connection_loader__get(name, *args, **kwargs) -def wrap_worker__run(*args, **kwargs): +def wrap_worker__run(self): """ While the strategy is active, rewrite connection_loader.get() calls for some transports into requests for a compatible Mitogen transport. """ + if setproctitle: + setproctitle.setproctitle('worker:%s task:%s' % ( + self._host.name, + self._task.action, + )) + # Ignore parent's attempts to murder us when we still need to write # profiling output. if mitogen.core._profile_hook.__name__ != '_profile_hook': @@ -158,10 +169,60 @@ def wrap_worker__run(*args, **kwargs): ansible_mitogen.logging.set_process_name('task') ansible_mitogen.affinity.policy.assign_worker() return mitogen.core._profile_hook('WorkerProcess', - lambda: worker__run(*args, **kwargs) + lambda: worker__run(self) ) +class AnsibleWrappers(object): + """ + Manage add/removal of various Ansible runtime hooks. + """ + def _add_plugin_paths(self): + """ + Add the Mitogen plug-in directories to the ModuleLoader path, avoiding + the need for manual configuration. + """ + base_dir = os.path.join(os.path.dirname(__file__), 'plugins') + ansible_mitogen.loaders.connection_loader.add_directory( + os.path.join(base_dir, 'connection') + ) + ansible_mitogen.loaders.action_loader.add_directory( + os.path.join(base_dir, 'action') + ) + + def _install_wrappers(self): + """ + Install our PluginLoader monkey patches and update global variables + with references to the real functions. + """ + global action_loader__get + action_loader__get = ansible_mitogen.loaders.action_loader.get + ansible_mitogen.loaders.action_loader.get = wrap_action_loader__get + + global connection_loader__get + connection_loader__get = ansible_mitogen.loaders.connection_loader.get + ansible_mitogen.loaders.connection_loader.get = wrap_connection_loader__get + + global worker__run + worker__run = ansible.executor.process.worker.WorkerProcess.run + ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run + + def _remove_wrappers(self): + """ + Uninstall the PluginLoader monkey patches. + """ + ansible_mitogen.loaders.action_loader.get = action_loader__get + ansible_mitogen.loaders.connection_loader.get = connection_loader__get + ansible.executor.process.worker.WorkerProcess.run = worker__run + + def install(self): + self._add_plugin_paths() + self._install_wrappers() + + def remove(self): + self._remove_wrappers() + + class StrategyMixin(object): """ This mix-in enhances any built-in strategy by arranging for various Mitogen @@ -223,43 +284,6 @@ class StrategyMixin(object): remote process, all the heavy lifting of transferring the action module and its dependencies are automatically handled by Mitogen. """ - def _install_wrappers(self): - """ - Install our PluginLoader monkey patches and update global variables - with references to the real functions. - """ - global action_loader__get - action_loader__get = ansible_mitogen.loaders.action_loader.get - ansible_mitogen.loaders.action_loader.get = wrap_action_loader__get - - global connection_loader__get - connection_loader__get = ansible_mitogen.loaders.connection_loader.get - ansible_mitogen.loaders.connection_loader.get = wrap_connection_loader__get - - global worker__run - worker__run = ansible.executor.process.worker.WorkerProcess.run - ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run - - def _remove_wrappers(self): - """ - Uninstall the PluginLoader monkey patches. - """ - ansible_mitogen.loaders.action_loader.get = action_loader__get - ansible_mitogen.loaders.connection_loader.get = connection_loader__get - ansible.executor.process.worker.WorkerProcess.run = worker__run - - def _add_plugin_paths(self): - """ - Add the Mitogen plug-in directories to the ModuleLoader path, avoiding - the need for manual configuration. - """ - base_dir = os.path.join(os.path.dirname(__file__), 'plugins') - ansible_mitogen.loaders.connection_loader.add_directory( - os.path.join(base_dir, 'connection') - ) - ansible_mitogen.loaders.action_loader.add_directory( - os.path.join(base_dir, 'action') - ) def _queue_task(self, host, task, task_vars, play_context): """ @@ -290,20 +314,35 @@ class StrategyMixin(object): play_context=play_context, ) + def _get_worker_model(self): + """ + In classic mode a single :class:`WorkerModel` exists, which manages + references and configuration of the associated connection multiplexer + process. + """ + return ansible_mitogen.process.get_classic_worker_model() + def run(self, iterator, play_context, result=0): """ - Arrange for a mitogen.master.Router to be available for the duration of - the strategy's real run() method. + Wrap :meth:`run` to ensure requisite infrastructure and modifications + are configured for the duration of the call. """ _assert_supported_release() - - ansible_mitogen.process.MuxProcess.start() - run = super(StrategyMixin, self).run - self._add_plugin_paths() - self._install_wrappers() + wrappers = AnsibleWrappers() + self._worker_model = self._get_worker_model() + ansible_mitogen.process.set_worker_model(self._worker_model) try: - return mitogen.core._profile_hook('Strategy', - lambda: run(iterator, play_context) - ) + self._worker_model.on_strategy_start() + try: + wrappers.install() + try: + run = super(StrategyMixin, self).run + return mitogen.core._profile_hook('Strategy', + lambda: run(iterator, play_context) + ) + finally: + wrappers.remove() + finally: + self._worker_model.on_strategy_complete() finally: - self._remove_wrappers() + ansible_mitogen.process.set_worker_model(None) diff --git a/mitogen/core.py b/mitogen/core.py index 626d5297..10cd4385 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -901,7 +901,11 @@ class Message(object): unpickler.find_global = self._find_global try: # Must occur off the broker thread. - obj = unpickler.load() + try: + obj = unpickler.load() + except: + LOG.error('raw pickle was: %r', self.data) + raise self._unpickled = obj except (TypeError, ValueError): e = sys.exc_info()[1] diff --git a/mitogen/parent.py b/mitogen/parent.py index 97ec4949..4b2ac388 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2159,6 +2159,23 @@ class Router(mitogen.core.Router): finally: self._write_lock.release() + def disconnect(self, context): + """ + Disconnect a context and forget its stream, assuming the context is + directly connected. + """ + stream = self.stream_by_id(context) + if stream.remote_id != context.context_id: + return + + l = mitogen.core.Latch() + mitogen.core.listen(stream, 'disconnect', l.put) + def disconnect(): + LOG.debug('Starting disconnect of %r', stream) + stream.on_disconnect(self.broker) + self.broker.defer(disconnect) + l.get() + def add_route(self, target_id, stream): """ Arrange for messages whose `dst_id` is `target_id` to be forwarded on diff --git a/mitogen/service.py b/mitogen/service.py index 886012e8..9e17482c 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -92,6 +92,24 @@ def get_or_create_pool(size=None, router=None): _pool_lock.release() +def call(service_name, method_name, call_context=None, **kwargs): + """ + Call a service registered with this pool, using the calling thread as a + host. + """ + if isinstance(service_name, mitogen.core.BytesType): + service_name = service_name.encode('utf-8') + elif not isinstance(service_name, mitogen.core.UnicodeType): + service_name = service_name.name() # Service.name() + + if call_context: + return call_context.call_service(service_name, method_name, **kwargs) + else: + pool = get_or_create_pool() + invoker = pool.get_invoker(service_name, msg=None) + return getattr(invoker.service, method_name)(**kwargs) + + def validate_arg_spec(spec, args): for name in spec: try: @@ -239,12 +257,13 @@ class Invoker(object): if not policies: raise mitogen.core.CallError('Method has no policies set.') - if not all(p.is_authorized(self.service, msg) for p in policies): - raise mitogen.core.CallError( - self.unauthorized_msg, - method_name, - self.service.name() - ) + if msg is not None: + if not all(p.is_authorized(self.service, msg) for p in policies): + raise mitogen.core.CallError( + self.unauthorized_msg, + method_name, + self.service.name() + ) required = getattr(method, 'mitogen_service__arg_spec', {}) validate_arg_spec(required, kwargs) @@ -264,7 +283,7 @@ class Invoker(object): except Exception: if no_reply: LOG.exception('While calling no-reply method %s.%s', - type(self.service).__name__, + self.service.name(), func_name(method)) else: raise @@ -690,7 +709,7 @@ class PushFileService(Service): """ for path in paths: self.propagate_to(context, mitogen.core.to_text(path)) - self.router.responder.forward_modules(context, modules) + #self.router.responder.forward_modules(context, modules) TODO @expose(policy=AllowParents()) @arg_spec({ diff --git a/tests/ansible/run_ansible_playbook.py b/tests/ansible/run_ansible_playbook.py index b5b459a1..467eaffc 100755 --- a/tests/ansible/run_ansible_playbook.py +++ b/tests/ansible/run_ansible_playbook.py @@ -51,7 +51,11 @@ else: os.path.join(GIT_BASEDIR, 'tests/ansible/hosts') ) -args = ['ansible-playbook'] +if 'ANSIBLE_ARGV' in os.environ: + args = eval(os.environ['ANSIBLE_ARGV']) +else: + args = ['ansible-playbook'] + args += ['-e', json.dumps(extra)] args += sys.argv[1:] os.execvp(args[0], args) From 0f63ca4c685526921fe087f87016732978536329 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 24 Feb 2019 20:08:33 +0000 Subject: [PATCH 172/383] Make setting affinity optional. --- ansible_mitogen/process.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 0eaf25a7..6cf3a968 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -226,10 +226,11 @@ def _setup_responder(responder): ) -def common_setup(_init_logging=True): +def common_setup(enable_affinity=True, _init_logging=True): save_pid('controller') ansible_mitogen.logging.set_process_name('top') - ansible_mitogen.affinity.policy.assign_controller() + if enable_affinity: + ansible_mitogen.affinity.policy.assign_controller() mitogen.utils.setup_gil() if _init_logging: From 7e51a932319a45bd3921715127482458fd07284e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 24 Feb 2019 20:08:52 +0000 Subject: [PATCH 173/383] core: remove old blocking call guard, it's in the wrong place It should have been in Receiver.get(). Placing it here prevents *_async() method calls from broker thread. --- mitogen/core.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 10cd4385..56e9e9eb 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2046,9 +2046,6 @@ class Context(object): :class:`Receiver` configured to receive any replies sent to the message's `reply_to` handle. """ - if self.router.broker._thread == threading.currentThread(): # TODO - raise SystemError('Cannot making blocking call on broker thread') - receiver = Receiver(self.router, persist=persist, respondent=self) msg.dst_id = self.context_id msg.reply_to = receiver.handle From 807cbef9caa374bbf96ac90a81221388345e832d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 24 Feb 2019 20:09:48 +0000 Subject: [PATCH 174/383] core: wake Latch outside of lock. Given: - thread A asleep in Latch._get_sleep() - thread B calling Latch.put() Previously, - B takes lock, - B wakes socket by dropping GIL and writing to it - A wakes from poll(), acquires GIL only to find Latch._lock is held - A drops GIL, sleeps on futex() for _lock - B wakes, acquires GIL, releases _lock - A wakes from futex(), acquires lock Now, - B takes lock, updates state, releases lock - B wakes socket by droppping GIL and writing to it - A wakes from poll(), acquires GIL and _lock - Everyone lives happily ever after. --- mitogen/core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index 56e9e9eb..ea55f7bb 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2489,17 +2489,20 @@ class Latch(object): raise LatchError() self._queue.append(obj) + wsock = None if self._waking < len(self._sleeping): wsock, cookie = self._sleeping[self._waking] self._waking += 1 _vv and IOLOG.debug('%r.put() -> waking wfd=%r', self, wsock.fileno()) - self._wake(wsock, cookie) elif self.notify: self.notify(self) finally: self._lock.release() + if wsock: + self._wake(wsock, cookie) + def _wake(self, wsock, cookie): written, disconnected = io_op(os.write, wsock.fileno(), cookie) assert written == len(cookie) and not disconnected From d6faff06c1a60c6d95486ada12c65749f403cf2d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 24 Feb 2019 20:09:48 +0000 Subject: [PATCH 175/383] core: wake Waker outside of lock. Given: - Broker asleep in poll() - thread B calling Latch.put() Previously, - B takes lock, - B wakes socket by dropping GIL and writing to it - Broker wakes from poll(), acquires GIL only to find Latch._lock is held - Broker drops GIL, sleeps on futex() for _lock - B wakes, acquires GIL, releases _lock - Broker wakes from futex(), acquires lock Now, - B takes lock, updates state, releases lock - B wakes socket by droppping GIL and writing to it - Broker wakes from poll(), acquires GIL and _lock - Everyone lives happily ever after. --- mitogen/core.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index ea55f7bb..f850def9 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2613,12 +2613,14 @@ class Waker(Protocol): self.stream.transmit_side.fd) self._lock.acquire() try: - if not self._deferred: - self._wake() + should_wake = not self._deferred self._deferred.append((func, args, kwargs)) finally: self._lock.release() + if should_wake: + self._wake() + class IoLoggerProtocol(DelimitedProtocol): """ From 3b585b841e6602aa2366564040507261d4a5d68c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 24 Feb 2019 20:12:51 +0000 Subject: [PATCH 176/383] core: ensure 'exit' signal fires even on Broker crash. --- mitogen/core.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index f850def9..843f5a04 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3272,8 +3272,11 @@ class Broker(object): self._broker_exit() def _broker_main(self): - _profile_hook('mitogen.broker', self._do_broker_main) - fire(self, 'exit') + try: + _profile_hook('mitogen.broker', self._do_broker_main) + finally: + # 'finally' to ensure _on_broker_exit() can always SIGTERM. + fire(self, 'exit') def shutdown(self): """ From f4709b1dc2c28910eb6497e472d56e383bd045b9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 24 Feb 2019 20:13:07 +0000 Subject: [PATCH 177/383] profiler: marginal improvements --- mitogen/profiler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mitogen/profiler.py b/mitogen/profiler.py index e697d599..51da9b18 100644 --- a/mitogen/profiler.py +++ b/mitogen/profiler.py @@ -72,13 +72,13 @@ def try_merge(stats, path): stats.add(path) return True except Exception as e: - print('Failed. Race? Will retry. %s' % (e,)) + print('%s failed. Will retry. %s' % (path, e)) return False def merge_stats(outpath, inpaths): first, rest = inpaths[0], inpaths[1:] - for x in range(5): + for x in range(1): try: stats = pstats.Stats(first) except EOFError: From 50bfe4c74699c61d20076feaea16a8a9cc7b45e2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 24 Feb 2019 20:13:18 +0000 Subject: [PATCH 178/383] service: don't acquire lock when pool already initialized --- mitogen/service.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/mitogen/service.py b/mitogen/service.py index 9e17482c..eb7b856a 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -77,19 +77,24 @@ else: def get_or_create_pool(size=None, router=None): global _pool global _pool_pid - _pool_lock.acquire() - try: - if _pool_pid != os.getpid(): - _pool = Pool(router, [], size=size or DEFAULT_POOL_SIZE, - overwrite=True) - # In case of Broker shutdown crash, Pool can cause 'zombie' - # processes. - mitogen.core.listen(router.broker, 'shutdown', - lambda: _pool.stop(join=False)) - _pool_pid = os.getpid() - return _pool - finally: - _pool_lock.release() + + my_pid = os.getpid() + if _pool is None or my_pid != _pool_pid: + # Avoid acquiring heavily contended lock if possible. + _pool_lock.acquire() + try: + if _pool_pid != my_pid: + _pool = Pool(router, [], size=size or DEFAULT_POOL_SIZE, + overwrite=True) + # In case of Broker shutdown crash, Pool can cause 'zombie' + # processes. + mitogen.core.listen(router.broker, 'shutdown', + lambda: _pool.stop(join=True)) + _pool_pid = os.getpid() + finally: + _pool_lock.release() + + return _pool def call(service_name, method_name, call_context=None, **kwargs): From e93762b3db65411efb3e6dcb21b4d4525e91344d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 25 Feb 2019 17:45:27 +0000 Subject: [PATCH 179/383] service: avoid taking another lock in the usual case --- mitogen/service.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/mitogen/service.py b/mitogen/service.py index eb7b856a..49bd2281 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -547,15 +547,18 @@ class Pool(object): invoker.service.on_shutdown() def get_invoker(self, name, msg): - self._lock.acquire() - try: - invoker = self._invoker_by_name.get(name) - if not invoker: - service = self._activator.activate(self, name, msg) - invoker = service.invoker_class(service=service) - self._invoker_by_name[name] = invoker - finally: - self._lock.release() + invoker = self._invoker_by_name.get(name) + if invoker is None: + # Avoid acquiring lock if possible. + self._lock.acquire() + try: + invoker = self._invoker_by_name.get(name) + if not invoker: + service = self._activator.activate(self, name, msg) + invoker = service.invoker_class(service=service) + self._invoker_by_name[name] = invoker + finally: + self._lock.release() return invoker From 1fca0b7a94010f80aba4fe2e07e63c26598ccae9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 16:10:36 +0100 Subject: [PATCH 180/383] [linear2] fix MuxProcess test fixture and some merge fallout --- ansible_mitogen/process.py | 83 +++++++++++++++----------- mitogen/fork.py | 20 +++++-- tests/ansible/tests/connection_test.py | 8 ++- 3 files changed, 69 insertions(+), 42 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 6cf3a968..cb130abc 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -114,14 +114,17 @@ def get_worker_model(): return _worker_model -def get_classic_worker_model(): +def get_classic_worker_model(**kwargs): """ Return the single :class:`ClassicWorkerModel` instance, constructing it if necessary. """ global _classic_worker_model + assert _classic_worker_model is None or (not kwargs), \ + "ClassicWorkerModel kwargs supplied but model already constructed" + if _classic_worker_model is None: - _classic_worker_model = ClassicWorkerModel() + _classic_worker_model = ClassicWorkerModel(**kwargs) return _classic_worker_model @@ -417,9 +420,9 @@ class ClassicWorkerModel(WorkerModel): For testing, if :data:`False`, don't initialize logging. """ common_setup(_init_logging=self._init_logging) + MuxProcess.cls_parent_sock, \ MuxProcess.cls_child_sock = socket.socketpair() - mitogen.core.set_cloexec(MuxProcess.cls_parent_sock.fileno()) mitogen.core.set_cloexec(MuxProcess.cls_child_sock.fileno()) @@ -434,6 +437,28 @@ class ClassicWorkerModel(WorkerModel): MuxProcess.cls_child_sock.close() MuxProcess.cls_child_sock = None + def _test_reset(self): + """ + Used to clean up in unit tests. + """ + # TODO: split this up a bit. + global _classic_worker_model + assert MuxProcess.cls_parent_sock is not None + MuxProcess.cls_parent_sock.close() + MuxProcess.cls_parent_sock = None + self.listener_path = None + self.router = None + self.parent = None + + for mux in self._muxes: + pid, status = os.waitpid(mux.pid, 0) + status = mitogen.fork._convert_exit_status(status) + LOG.debug('mux PID %d %s', pid, + mitogen.parent.returncode_to_str(status)) + + _classic_worker_model = None + set_worker_model(None) + def on_strategy_start(self): """ See WorkerModel.on_strategy_start(). @@ -461,22 +486,26 @@ class ClassicWorkerModel(WorkerModel): return ClassicBinding(self) def on_binding_close(self): - if self.broker: - self.broker.shutdown() - self.broker.join() - self.router = None - self.broker = None + if not self.broker: + return + + self.broker.shutdown() + self.broker.join() + self.router = None + self.broker = None + self.listener_path = None + self.initialized = False - # #420: Ansible executes "meta" actions in the top-level process, - # meaning "reset_connection" will cause :class:`mitogen.core.Latch` - # FDs to be cached and erroneously shared by children on subsequent - # WorkerProcess forks. To handle that, call on_fork() to ensure any - # shared state is discarded. - # #490: only attempt to clean up when it's known that some - # resources exist to cleanup, otherwise later __del__ double-call - # to close() due to GC at random moment may obliterate an unrelated - # Connection's related resources. - mitogen.fork.on_fork() + # #420: Ansible executes "meta" actions in the top-level process, + # meaning "reset_connection" will cause :class:`mitogen.core.Latch` FDs + # to be cached and erroneously shared by children on subsequent + # WorkerProcess forks. To handle that, call on_fork() to ensure any + # shared state is discarded. + # #490: only attempt to clean up when it's known that some resources + # exist to cleanup, otherwise later __del__ double-call to close() due + # to GC at random moment may obliterate an unrelated Connection's + # related resources. + mitogen.fork.on_fork() class MuxProcess(object): @@ -514,28 +543,14 @@ class MuxProcess(object): #: applied to locally executed commands and modules. cls_original_env = None - #: In both processes, this a list of the temporary UNIX sockets used for - #: forked WorkerProcesses to contact the forked mux processes. - cls_listener_paths = None - - @classmethod - def _reset(cls): - """ - Used to clean up in unit tests. - """ - assert cls.worker_sock is not None - cls.worker_sock.close() - cls.worker_sock = None - os.waitpid(cls.worker_pid, 0) - def __init__(self, index): self.index = index #: Individual path of this process. self.path = mitogen.unix.make_socket_path() def start(self): - pid = os.fork() - if pid: + self.pid = os.fork() + if self.pid: # Wait for child to boot before continuing. mitogen.core.io_op(MuxProcess.cls_parent_sock.recv, 1) return diff --git a/mitogen/fork.py b/mitogen/fork.py index e2075fc3..ee990262 100644 --- a/mitogen/fork.py +++ b/mitogen/fork.py @@ -121,6 +121,19 @@ def handle_child_crash(): os._exit(1) +def _convert_exit_status(status): + """ + Convert a :func:`os.waitpid`-style exit status to a :mod:`subprocess` style + exit status. + """ + if os.WIFEXITED(status): + return os.WEXITSTATUS(status) + elif os.WIFSIGNALED(status): + return -os.WTERMSIG(status) + elif os.WIFSTOPPED(status): + return -os.WSTOPSIG(status) + + class Process(mitogen.parent.Process): def poll(self): try: @@ -134,12 +147,7 @@ class Process(mitogen.parent.Process): if not pid: return - if os.WIFEXITED(status): - return os.WEXITSTATUS(status) - elif os.WIFSIGNALED(status): - return -os.WTERMSIG(status) - elif os.WIFSTOPPED(status): - return -os.WSTOPSIG(status) + return _convert_exit_status(status) class Options(mitogen.parent.Options): diff --git a/tests/ansible/tests/connection_test.py b/tests/ansible/tests/connection_test.py index d663ecc5..73958185 100644 --- a/tests/ansible/tests/connection_test.py +++ b/tests/ansible/tests/connection_test.py @@ -26,13 +26,17 @@ class MuxProcessMixin(object): @classmethod def setUpClass(cls): #mitogen.utils.log_to_file() - ansible_mitogen.process.MuxProcess.start(_init_logging=False) + cls.model = ansible_mitogen.process.get_classic_worker_model( + _init_logging=False + ) + ansible_mitogen.process.set_worker_model(cls.model) + cls.model.on_strategy_start() super(MuxProcessMixin, cls).setUpClass() @classmethod def tearDownClass(cls): + cls.model._test_reset() super(MuxProcessMixin, cls).tearDownClass() - ansible_mitogen.process.MuxProcess._reset() class ConnectionMixin(MuxProcessMixin): From d35bca3f15c878573800b2113e11540e5e3570d8 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 8 Jun 2019 05:48:04 +0100 Subject: [PATCH 181/383] docs: Add lineinfile bug to changelog. --- docs/changelog.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 43d30456..951243de 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -53,6 +53,13 @@ Fixes ``mitogen_ssh_keepalive_count`` variables, and the default timeout for an SSH server has been increased from `15*3` seconds to `30*10` seconds. +* `7ae926b3 `_: the + ``lineinfile`` module began leaking writable temporary file descriptors since + Ansible 2.7.0. When ``lineinfile`` was used to create or modify a script, and + that script was later executed, the execution could fail with "*text file + busy*" due to the open descriptor. Temporary descriptors are now tracked and + cleaned up on module exit. + Thanks! ~~~~~~~ From 4cd32cbcba0071aaa25305167da12a52d44043e9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 16:27:44 +0100 Subject: [PATCH 182/383] docs: update Changelog for stream-refactor. --- docs/changelog.rst | 46 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 951243de..a7b7ec47 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -61,6 +61,44 @@ Fixes cleaned up on module exit. +Core Library +~~~~~~~~~~~~ + +* Logs are more readable, and many :func:`repr` strings are more descriptive. + The old pseudo-function-call format is slowly being migrated to + human-readable output where appropriate. For example, + *"Stream(ssh:123).connect()"* could become *"connecting to ssh:123"*. + +* :func:`bytearray` was removed from the list of supported serialization types. + It has never been portable, and does not appear to have been used. + +* `#170 `_: to better support child + process management and a future asynchronous connect implementation, a + :class:`mitogen.parent.TimerList` API is available. + +* `#419 `_: the internal + :class:`mitogen.core.Stream` has been refactored into 7 new classes, + separating out protocol behaviour logic, output buffering, line-oriented + input parsing, options handling, and connection management. The new + connection management implementation is internally asynchronous, laying + almost all the groundwork needed for fully asynchronous connect. + +* `#419 `_: zombie process reaping + has vastly improved, by using the timer API to efficiently poll for a slow + child to finish exiting. Polling avoids the need to install a process-global + `SIGCHLD` handler, or rely on the process-global 'signal file descriptor' + functionality only available in newer Python releases. + +* `#419 `_: almost all uses of + :func:`os.dup` have been removed, along with almost all cases of manual file + descriptor management. Descriptors are trapped in :func:`os.fdopen` objects + as soon as they are opened, ensuring a leaked object will close itself, and + ensuring every descriptor is fused to a `closed` flag, preventing historical + bugs where a double close could destroy descriptors belonging to an unrelated + stream. + + + Thanks! ~~~~~~~ @@ -109,14 +147,6 @@ Fixes potential influx of 2.8-related bug reports. -Core Library -~~~~~~~~~~~~ - -* `#170 `_: to better support child - process management and a future asynchronous connect implementation, a - :class:`mitogen.parent.TimerList` API is available. - - Thanks! ~~~~~~~ From ded00d25c91eec4ca3753876fbe5cc39e9b30bdb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 16:29:05 +0100 Subject: [PATCH 183/383] docs: more stream-refactor work --- docs/changelog.rst | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index a7b7ec47..9773ff04 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -25,8 +25,8 @@ Enhancements ^^^^^^^^^^^^ * `#587 `_: partial support for - Ansible 2.8 is now available. This implementation does not yet support the - new `become plugins + Ansible 2.8 is available. This implementation does not yet support the new + `become plugins `_ functionality, which will be addressed in a future release. @@ -70,7 +70,7 @@ Core Library *"Stream(ssh:123).connect()"* could become *"connecting to ssh:123"*. * :func:`bytearray` was removed from the list of supported serialization types. - It has never been portable, and does not appear to have been used. + It has never been portable, and does not appear to have been used. * `#170 `_: to better support child process management and a future asynchronous connect implementation, a @@ -78,10 +78,11 @@ Core Library * `#419 `_: the internal :class:`mitogen.core.Stream` has been refactored into 7 new classes, - separating out protocol behaviour logic, output buffering, line-oriented - input parsing, options handling, and connection management. The new - connection management implementation is internally asynchronous, laying - almost all the groundwork needed for fully asynchronous connect. + modularizing protocol behaviour, output buffering, line-oriented input + parsing, options handling and connection management. Connection setup is + internally asynchronous, laying almost all the groundwork needed for fully + asynchronous connect, proxied Ansible become plug-ins, and integrating + `libssh `_. * `#419 `_: zombie process reaping has vastly improved, by using the timer API to efficiently poll for a slow @@ -89,7 +90,7 @@ Core Library `SIGCHLD` handler, or rely on the process-global 'signal file descriptor' functionality only available in newer Python releases. -* `#419 `_: almost all uses of +* `#419 `_: most uses of :func:`os.dup` have been removed, along with almost all cases of manual file descriptor management. Descriptors are trapped in :func:`os.fdopen` objects as soon as they are opened, ensuring a leaked object will close itself, and @@ -97,6 +98,11 @@ Core Library bugs where a double close could destroy descriptors belonging to an unrelated stream. +* `a5536c35 `_: avoid quadratic + buffer management when logging lines received from a child's redirected + standard IO. + + Thanks! From de65790a7466362339a2050f74e71ca614664176 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 16:53:35 +0100 Subject: [PATCH 184/383] docs: note fd usage has halved --- docs/changelog.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 9773ff04..78dd2c18 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -98,6 +98,13 @@ Core Library bugs where a double close could destroy descriptors belonging to an unrelated stream. +* `#419 `_, + `#549 `_, file descriptor usage + during large runs has approximately halved, as it is no longer necessary to + manage every read and write side distinctly, in order to avoid double-close + bugs. Avoiding double-close is instead handled by Python's built-in file + object. + * `a5536c35 `_: avoid quadratic buffer management when logging lines received from a child's redirected standard IO. From 8f25114d1a04c7318e45a904ea7e9fdb2c782eb3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 28 Jul 2019 19:03:26 +0100 Subject: [PATCH 185/383] docs: update Changelog --- docs/changelog.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 78dd2c18..27227c33 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -122,8 +122,10 @@ bug reports, testing, features and fixes in this release contributed by `Orion Poplawski `_, `Szabó Dániel Ernő `_, `Ulrich Schreiner `_, -`Yuki Nishida `_, and -`@ghp-rr `_. +`Yuki Nishida `_, +`@ghp-rr `_, +`Pieter Voet `_, and +`@rizzly `_. v0.2.7 (2019-05-19) From c0357d394f503b1027cd99291f3d22d1e3429729 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 00:43:38 +0100 Subject: [PATCH 186/383] add 363 to changelog --- docs/changelog.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 27227c33..8165db00 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -34,6 +34,10 @@ Enhancements Fixes ^^^^^ +* `#363 `_: fix a rare race matching + *Permission denied* errors from some versions of ``su`` running on heavily + loaded machines. + * `#578 `_: the extension could crash while rendering an error message, due to an incorrect format string. From 9e1faa79275247c5d6944a5dc141488e915a5d84 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 06:37:43 +0100 Subject: [PATCH 187/383] docs: changelog concision / additions --- docs/changelog.rst | 96 ++++++++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 38 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 8165db00..dbc33d02 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -24,19 +24,44 @@ To avail of fixes in an unreleased version, please download a ZIP file Enhancements ^^^^^^^^^^^^ -* `#587 `_: partial support for - Ansible 2.8 is available. This implementation does not yet support the new - `become plugins - `_ - functionality, which will be addressed in a future release. +* `#587 `_: Ansible 2.8 is partially + supported. `Become plugins + `_ and + `interpreter discovery + `_ + are not yet handled. + +* The ``MITOGEN_CPU_COUNT`` environment variable shards the connection + multiplexer into per-CPU worker processes. This improves throughput for large + runs especially involving file transfer, and is a prerequisite to future + in-process SSH support. To match the behaviour of older releases, only one + multiplexer is started by default. + +* `#419 `_, + `#470 `_, file descriptor usage + during large runs is halved, as it is no longer necessary to manage read and + write sides distinctly in order to work around a design limitation. + +* `#419 `_: almost all connection + setup happens on one thread, reducing GIL contention and context switching + early in a run. + +* `#419 `_: 2 network round-trips + were removed from early connection setup. + +* `? `_, + `? `_, + `? `_, + `? `_: locking is avoided in + some hot paths, and locks that must be taken are held for less time. Fixes ^^^^^ -* `#363 `_: fix a rare race matching - *Permission denied* errors from some versions of ``su`` running on heavily - loaded machines. +* `#363 `_: fix an obscure race + matching *Permission denied* errors from some versions of ``su`` running on + heavily loaded machines. * `#578 `_: the extension could crash while rendering an error message, due to an incorrect format string. @@ -61,61 +86,56 @@ Fixes ``lineinfile`` module began leaking writable temporary file descriptors since Ansible 2.7.0. When ``lineinfile`` was used to create or modify a script, and that script was later executed, the execution could fail with "*text file - busy*" due to the open descriptor. Temporary descriptors are now tracked and - cleaned up on module exit. + busy*" due to the leaked descriptor. Temporary descriptors are now tracked + and cleaned up on exit for all modules. Core Library ~~~~~~~~~~~~ * Logs are more readable, and many :func:`repr` strings are more descriptive. - The old pseudo-function-call format is slowly being migrated to - human-readable output where appropriate. For example, - *"Stream(ssh:123).connect()"* could become *"connecting to ssh:123"*. + The old pseudo-function-call format is slowly migrating to human-readable + output where possible. For example, *"Stream(ssh:123).connect()"* might + be written *"connecting to ssh:123"*. * :func:`bytearray` was removed from the list of supported serialization types. - It has never been portable, and does not appear to have been used. + It was never portable between Python versions, unused, and never made much + sense to support as a wire type. -* `#170 `_: to better support child - process management and a future asynchronous connect implementation, a - :class:`mitogen.parent.TimerList` API is available. +* `#170 `_: to improve subprocess + management and asynchronous connect, a :class:`mitogen.parent.TimerList` + interface is available, accessible as :attr:`Broker.timers` in an + asynchronous context. * `#419 `_: the internal :class:`mitogen.core.Stream` has been refactored into 7 new classes, modularizing protocol behaviour, output buffering, line-oriented input - parsing, options handling and connection management. Connection setup is + parsing, option handling and connection management. Connection setup is internally asynchronous, laying almost all the groundwork needed for fully asynchronous connect, proxied Ansible become plug-ins, and integrating `libssh `_. -* `#419 `_: zombie process reaping - has vastly improved, by using the timer API to efficiently poll for a slow - child to finish exiting. Polling avoids the need to install a process-global - `SIGCHLD` handler, or rely on the process-global 'signal file descriptor' - functionality only available in newer Python releases. +* `#169 `_, + `#419 `_: zombie child reaping has + vastly improved, by using timers to efficiently poll for a slow child to + finish exiting. Polling avoids relying on process-global configuration such + as a `SIGCHLD` handler, or :func:`signal.set_wakeup_fd` available in modern + Python. -* `#419 `_: most uses of - :func:`os.dup` have been removed, along with almost all cases of manual file - descriptor management. Descriptors are trapped in :func:`os.fdopen` objects - as soon as they are opened, ensuring a leaked object will close itself, and - ensuring every descriptor is fused to a `closed` flag, preventing historical - bugs where a double close could destroy descriptors belonging to an unrelated - stream. +* `#256 `_, -* `#419 `_, - `#549 `_, file descriptor usage - during large runs has approximately halved, as it is no longer necessary to - manage every read and write side distinctly, in order to avoid double-close - bugs. Avoiding double-close is instead handled by Python's built-in file - object. +`#419 `_: most :func:`os.dup` was + eliminated, along with almost all manual file descriptor management. + Descriptors are trapped in :func:`os.fdopen` objects when they are created, + ensuring a leaked object will close itself, and ensuring every descriptor is + fused to a `closed` flag, preventing historical bugs where a double close + could destroy descriptors belonging to unrelated streams. * `a5536c35 `_: avoid quadratic buffer management when logging lines received from a child's redirected standard IO. - - Thanks! ~~~~~~~ From a39169ff16797d3eac929cf39fdb2da8cf06dbdb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 16:18:23 +0100 Subject: [PATCH 188/383] [linear2] fix mitogen_shutdown_all service context access --- tests/ansible/lib/action/mitogen_shutdown_all.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/ansible/lib/action/mitogen_shutdown_all.py b/tests/ansible/lib/action/mitogen_shutdown_all.py index c28d9d4b..59191450 100644 --- a/tests/ansible/lib/action/mitogen_shutdown_all.py +++ b/tests/ansible/lib/action/mitogen_shutdown_all.py @@ -23,9 +23,10 @@ class ActionModule(ActionBase): } self._connection._connect() + binding = self._connection.get_binding() return { 'changed': True, - 'result': self._connection.parent.call_service( + 'result': binding.get_service_context().call_service( service_name='ansible_mitogen.services.ContextService', method_name='shutdown_all', ) From a9755d4ad01f88cf7de0d0e4748a87ec9217c19d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 16:30:01 +0100 Subject: [PATCH 189/383] [linear2] update mitogen_get_stack for new _build_stack() return value --- ansible_mitogen/plugins/action/mitogen_get_stack.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/plugins/action/mitogen_get_stack.py b/ansible_mitogen/plugins/action/mitogen_get_stack.py index 12afbfba..171f84ea 100644 --- a/ansible_mitogen/plugins/action/mitogen_get_stack.py +++ b/ansible_mitogen/plugins/action/mitogen_get_stack.py @@ -47,8 +47,9 @@ class ActionModule(ActionBase): 'skipped': True, } + _, stack = self._connection._build_stack() return { 'changed': True, - 'result': self._connection._build_stack(), + 'result': stack, '_ansible_verbose_always': True, } From 136dee1fb42d9aac5c4016224da3a5a10d77450d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 17:52:44 +0100 Subject: [PATCH 190/383] [linear2] more merge fallout, fix Connection._mitogen_reset(mode=) --- ansible_mitogen/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index 4d310d75..9fd279b1 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -767,7 +767,7 @@ class Connection(ansible.plugins.connection.ConnectionBase): mitogen.service.call( call_context=self.binding.get_service_context(), service_name='ansible_mitogen.services.ContextService', - method_name='put', + method_name=mode, context=self.context ) From 115145555eb80c6bcd73f8a6e5e91d610aa64b4f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 18:07:01 +0100 Subject: [PATCH 191/383] [linear2] fix another test relying on Connection.parent --- .../integration/context_service/disconnect_cleanup.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ansible/integration/context_service/disconnect_cleanup.yml b/tests/ansible/integration/context_service/disconnect_cleanup.yml index 575358f6..3275b596 100644 --- a/tests/ansible/integration/context_service/disconnect_cleanup.yml +++ b/tests/ansible/integration/context_service/disconnect_cleanup.yml @@ -24,7 +24,7 @@ - mitogen_action_script: script: | self._connection._connect() - result['dump'] = self._connection.parent.call_service( + result['dump'] = self._connection.get_binding().get_service_context().call_service( service_name='ansible_mitogen.services.ContextService', method_name='dump' ) @@ -39,7 +39,7 @@ - mitogen_action_script: script: | self._connection._connect() - result['dump'] = self._connection.parent.call_service( + result['dump'] = self._connection.get_binding().get_service_context().call_service( service_name='ansible_mitogen.services.ContextService', method_name='dump' ) From 7bf5ce4498ca3fabee5775dfe63704587005ec90 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 18:49:29 +0100 Subject: [PATCH 192/383] ci: roughly bin-pack Travis jobs longest->shortest --- .travis.yml | 62 ++++++++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/.travis.yml b/.travis.yml index b8ae0c62..a3f4967b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,6 +35,37 @@ matrix: env: MODE=mitogen_py24 DISTRO=centos5 include: + # Debops tests. + # 2.8.0; 3.6 -> 2.7 + - python: "3.6" + env: MODE=debops_common VER=2.8.0 + # 2.4.6.0; 2.7 -> 2.7 + - python: "2.7" + env: MODE=debops_common VER=2.4.6.0 + + # Sanity check against vanilla Ansible. One job suffices. + - python: "2.7" + env: MODE=ansible VER=2.8.0 DISTROS=debian STRATEGY=linear + + # ansible_mitogen tests. + + # 3.6 -> {debian, centos6, centos7} + - python: "3.6" + env: MODE=ansible VER=2.8.0 + - python: "3.6" + env: MODE=ansible VER=2.4.6.0 + + # 2.7 -> {debian, centos6, centos7} + - python: "2.7" + env: MODE=ansible VER=2.8.0 + # 2.6 -> {debian, centos6, centos7} + - python: "2.6" + env: MODE=ansible VER=2.4.6.0 + + # 2.3 -> {centos5} + - python: "2.6" + env: MODE=ansible VER=2.3.3.0 DISTROS=centos5 + # Mitogen tests. # 2.4 -> 2.4 - language: c @@ -50,34 +81,3 @@ matrix: - python: "2.6" env: MODE=mitogen DISTRO=debian-py3 # 3.6 -> 2.6 -- moved to Azure - - # Debops tests. - # 2.4.6.0; 2.7 -> 2.7 - - python: "2.7" - env: MODE=debops_common VER=2.4.6.0 - # 2.8.0; 3.6 -> 2.7 - - python: "3.6" - env: MODE=debops_common VER=2.8.0 - - # ansible_mitogen tests. - - # 2.3 -> {centos5} - - python: "2.6" - env: MODE=ansible VER=2.3.3.0 DISTROS=centos5 - - # 2.6 -> {debian, centos6, centos7} - - python: "2.6" - env: MODE=ansible VER=2.4.6.0 - # 2.7 -> {debian, centos6, centos7} - - python: "2.7" - env: MODE=ansible VER=2.8.0 - - # 3.6 -> {debian, centos6, centos7} - - python: "3.6" - env: MODE=ansible VER=2.4.6.0 - - python: "3.6" - env: MODE=ansible VER=2.8.0 - - # Sanity check against vanilla Ansible. One job suffices. - - python: "2.7" - env: MODE=ansible VER=2.8.0 DISTROS=debian STRATEGY=linear From 95e7bca51809bf1e36184c913efe1f455f52121d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 19:08:21 +0100 Subject: [PATCH 193/383] ci: try moving slower 2.8.0 jobs together --- .travis.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index a3f4967b..af637db1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -49,16 +49,17 @@ matrix: # ansible_mitogen tests. - # 3.6 -> {debian, centos6, centos7} + # 2.8.0 -> {debian, centos6, centos7} - python: "3.6" env: MODE=ansible VER=2.8.0 - - python: "3.6" - env: MODE=ansible VER=2.4.6.0 - - # 2.7 -> {debian, centos6, centos7} + # 2.8.0 -> {debian, centos6, centos7} - python: "2.7" env: MODE=ansible VER=2.8.0 - # 2.6 -> {debian, centos6, centos7} + + # 2.4.6.0 -> {debian, centos6, centos7} + - python: "3.6" + env: MODE=ansible VER=2.4.6.0 + # 2.4.6.0 -> {debian, centos6, centos7} - python: "2.6" env: MODE=ansible VER=2.4.6.0 From 4ced885619ee9c2893dca57eac25c9e9907e7196 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 22:11:04 +0100 Subject: [PATCH 194/383] issue #543: localhost_ansible scripts. --- .ci/localhost_ansible_install.py | 16 ++++++++++++ .ci/localhost_ansible_tests.py | 43 ++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100755 .ci/localhost_ansible_install.py create mode 100755 .ci/localhost_ansible_tests.py diff --git a/.ci/localhost_ansible_install.py b/.ci/localhost_ansible_install.py new file mode 100755 index 00000000..0cb47374 --- /dev/null +++ b/.ci/localhost_ansible_install.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +import ci_lib + +batches = [ + [ + # Must be installed separately, as PyNACL indirect requirement causes + # newer version to be installed if done in a single pip run. + 'pip install "pycparser<2.19" "idna<2.7"', + 'pip install ' + '-r tests/requirements.txt ' + '-r tests/ansible/requirements.txt', + ] +] + +ci_lib.run_batches(batches) diff --git a/.ci/localhost_ansible_tests.py b/.ci/localhost_ansible_tests.py new file mode 100755 index 00000000..71542d21 --- /dev/null +++ b/.ci/localhost_ansible_tests.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# Run tests/ansible/all.yml under Ansible and Ansible-Mitogen + +import glob +import os +import sys + +import ci_lib +from ci_lib import run + + +TESTS_DIR = os.path.join(ci_lib.GIT_ROOT, 'tests/ansible') +IMAGE_PREP_DIR = os.path.join(ci_lib.GIT_ROOT, 'tests/image_prep') +HOSTS_DIR = os.path.join(TESTS_DIR, 'hosts') + + +with ci_lib.Fold('unit_tests'): + os.environ['SKIP_MITOGEN'] = '1' + ci_lib.run('./run_tests -v') + + +with ci_lib.Fold('job_setup'): + # Don't set -U as that will upgrade Paramiko to a non-2.6 compatible version. + run("pip install -q ansible==%s", ci_lib.ANSIBLE_VERSION) + + os.chdir(TESTS_DIR) + os.chmod('../data/docker/mitogen__has_sudo_pubkey.key', int('0600', 7)) + + if not ci_lib.exists_in_path('sshpass'): + run("brew install sshpass") + + +with ci_lib.Fold('machine_prep'): + if os.path.expanduser('~mitogen__user1') == '~mitogen_user1': + os.chdir(IMAGE_PREP_DIR) + run("ansible-playbook -i localhost, _user_accounts.yml") + + +with ci_lib.Fold('ansible'): + os.chdir(TESTS_DIR) + playbook = os.environ.get('PLAYBOOK', 'all.yml') + run('./run_ansible_playbook.py %s -l target %s', + playbook, HOSTS_DIR, ' '.join(sys.argv[1:])) From 57203aef531f24c0065853d3c7327c425d2177ed Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 22:12:06 +0100 Subject: [PATCH 195/383] issue #543: add Ansible job to Azure matrix --- .ci/azure-pipelines.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.ci/azure-pipelines.yml b/.ci/azure-pipelines.yml index 95f239ff..920e82a1 100644 --- a/.ci/azure-pipelines.yml +++ b/.ci/azure-pipelines.yml @@ -15,6 +15,9 @@ jobs: Mito27_27: python.version: '2.7' MODE: mitogen + Ans280_27: + python.version: '2.7' + MODE: localhost_ansible - job: Linux From 501cfca82b50af3576390ca69ad436bbba74a483 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 22:52:26 +0100 Subject: [PATCH 196/383] issue #543: make localhost_ansible_tests run locally --- .ci/localhost_ansible_tests.py | 15 ++++++++++++--- tests/ansible/tests/affinity_test.py | 6 ++++++ tests/image_prep/_container_setup.yml | 4 ++++ tests/image_prep/_user_accounts.yml | 2 ++ tests/image_prep/setup.yml | 11 ----------- tests/image_prep/shared_vars.yml | 5 +++++ 6 files changed, 29 insertions(+), 14 deletions(-) create mode 100644 tests/image_prep/shared_vars.yml diff --git a/.ci/localhost_ansible_tests.py b/.ci/localhost_ansible_tests.py index 71542d21..b795da48 100755 --- a/.ci/localhost_ansible_tests.py +++ b/.ci/localhost_ansible_tests.py @@ -27,11 +27,20 @@ with ci_lib.Fold('job_setup'): os.chmod('../data/docker/mitogen__has_sudo_pubkey.key', int('0600', 7)) if not ci_lib.exists_in_path('sshpass'): - run("brew install sshpass") + run("brew install http://git.io/sshpass.rb") with ci_lib.Fold('machine_prep'): - if os.path.expanduser('~mitogen__user1') == '~mitogen_user1': + key_path = os.path.expanduser('~/.ssh/id_rsa') + if not os.path.exists(key_path): + run("ssh-keygen -N '' -f %s", key_path) + + auth_path = os.path.expanduser('~/.ssh/authorized_keys') + with open(auth_path, 'a') as fp: + fp.write(open(key_path + '.pub').read()) + os.chmod(auth_path, int('0600', 8)) + + if os.path.expanduser('~mitogen__user1') == '~mitogen__user1': os.chdir(IMAGE_PREP_DIR) run("ansible-playbook -i localhost, _user_accounts.yml") @@ -40,4 +49,4 @@ with ci_lib.Fold('ansible'): os.chdir(TESTS_DIR) playbook = os.environ.get('PLAYBOOK', 'all.yml') run('./run_ansible_playbook.py %s -l target %s', - playbook, HOSTS_DIR, ' '.join(sys.argv[1:])) + playbook, ' '.join(sys.argv[1:])) diff --git a/tests/ansible/tests/affinity_test.py b/tests/ansible/tests/affinity_test.py index 641455bd..ccd72243 100644 --- a/tests/ansible/tests/affinity_test.py +++ b/tests/ansible/tests/affinity_test.py @@ -1,6 +1,7 @@ import multiprocessing import os +import sys import tempfile import mock @@ -221,6 +222,11 @@ class MockLinuxPolicyTest(testlib.TestCase): for x in range(1, 4096, 32): policy.assign_subprocess() +MockLinuxPolicyTest = unittest2.skipIf( + condition=(not sys.platform.startswith('linuxPolicy')), + reason='select.select() not supported' +)(MockLinuxPolicyTest) + if __name__ == '__main__': unittest2.main() diff --git a/tests/image_prep/_container_setup.yml b/tests/image_prep/_container_setup.yml index 9d001f48..2a30f49d 100644 --- a/tests/image_prep/_container_setup.yml +++ b/tests/image_prep/_container_setup.yml @@ -1,5 +1,7 @@ - hosts: all + vars_files: + - shared_vars.yml strategy: linear gather_facts: false tasks: @@ -13,6 +15,8 @@ fi - hosts: all + vars_files: + - shared_vars.yml strategy: mitogen_free # Can't gather facts before here. gather_facts: true diff --git a/tests/image_prep/_user_accounts.yml b/tests/image_prep/_user_accounts.yml index 5f1bf0dc..e6ace82f 100644 --- a/tests/image_prep/_user_accounts.yml +++ b/tests/image_prep/_user_accounts.yml @@ -5,6 +5,8 @@ # - hosts: all + vars_files: + - shared_vars.yml gather_facts: true strategy: mitogen_free become: true diff --git a/tests/image_prep/setup.yml b/tests/image_prep/setup.yml index 760da0f6..2c37c6bb 100644 --- a/tests/image_prep/setup.yml +++ b/tests/image_prep/setup.yml @@ -1,14 +1,3 @@ -- hosts: all - gather_facts: false - tasks: - - set_fact: - # Hacktacular.. but easiest place for it with current structure. - sudo_group: - MacOSX: admin - Debian: sudo - Ubuntu: sudo - CentOS: wheel - - include: _container_setup.yml - include: _user_accounts.yml diff --git a/tests/image_prep/shared_vars.yml b/tests/image_prep/shared_vars.yml new file mode 100644 index 00000000..4be7babe --- /dev/null +++ b/tests/image_prep/shared_vars.yml @@ -0,0 +1,5 @@ +sudo_group: + MacOSX: admin + Debian: sudo + Ubuntu: sudo + CentOS: wheel From 0e55bb3eb7d3dd972165910847ebebf3a0a5b659 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 00:13:15 +0100 Subject: [PATCH 197/383] image_prep: ensure Mac users can SSH without manual intervention --- tests/image_prep/_user_accounts.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/image_prep/_user_accounts.yml b/tests/image_prep/_user_accounts.yml index e6ace82f..70f5d0eb 100644 --- a/tests/image_prep/_user_accounts.yml +++ b/tests/image_prep/_user_accounts.yml @@ -75,7 +75,11 @@ - user: name: "mitogen__{{item}}" shell: /bin/bash - groups: "{{user_groups[item]|default(['mitogen__group'])}}" + groups: | + {{ + ['com.apple.access_ssh'] + + (user_groups[item] | default(['mitogen__group'])) + }} password: "{{item}}_password" with_items: "{{all_users}}" when: ansible_system == 'Darwin' From 92de2abeea611cb1ed98f0d493c0fabe91db1c9f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 00:13:50 +0100 Subject: [PATCH 198/383] issue #543: use key from Git, newer ssh-keygen unsupported by Paramiko --- .ci/localhost_ansible_tests.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/.ci/localhost_ansible_tests.py b/.ci/localhost_ansible_tests.py index b795da48..662c7c73 100755 --- a/.ci/localhost_ansible_tests.py +++ b/.ci/localhost_ansible_tests.py @@ -3,6 +3,7 @@ import glob import os +import shutil import sys import ci_lib @@ -12,6 +13,7 @@ from ci_lib import run TESTS_DIR = os.path.join(ci_lib.GIT_ROOT, 'tests/ansible') IMAGE_PREP_DIR = os.path.join(ci_lib.GIT_ROOT, 'tests/image_prep') HOSTS_DIR = os.path.join(TESTS_DIR, 'hosts') +KEY_PATH = os.path.join(TESTS_DIR, '../data/docker/mitogen__has_sudo_pubkey.key') with ci_lib.Fold('unit_tests'): @@ -23,21 +25,17 @@ with ci_lib.Fold('job_setup'): # Don't set -U as that will upgrade Paramiko to a non-2.6 compatible version. run("pip install -q ansible==%s", ci_lib.ANSIBLE_VERSION) - os.chdir(TESTS_DIR) - os.chmod('../data/docker/mitogen__has_sudo_pubkey.key', int('0600', 7)) - + os.chmod(KEY_PATH, int('0600', 8)) if not ci_lib.exists_in_path('sshpass'): run("brew install http://git.io/sshpass.rb") with ci_lib.Fold('machine_prep'): key_path = os.path.expanduser('~/.ssh/id_rsa') - if not os.path.exists(key_path): - run("ssh-keygen -N '' -f %s", key_path) + shutil.copy(KEY_PATH, key_path) auth_path = os.path.expanduser('~/.ssh/authorized_keys') - with open(auth_path, 'a') as fp: - fp.write(open(key_path + '.pub').read()) + os.system('ssh-keygen -y -f %s >> %s' % (key_path, auth_path)) os.chmod(auth_path, int('0600', 8)) if os.path.expanduser('~mitogen__user1') == '~mitogen__user1': From 17d0e1b31534ccdfeb54973f24f441845abd04c6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 00:14:33 +0100 Subject: [PATCH 199/383] issue #543: skip test that's hard to do on Mac --- tests/ansible/integration/context_service/remote_name.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/ansible/integration/context_service/remote_name.yml b/tests/ansible/integration/context_service/remote_name.yml index 827abaee..d7116ec1 100644 --- a/tests/ansible/integration/context_service/remote_name.yml +++ b/tests/ansible/integration/context_service/remote_name.yml @@ -7,6 +7,10 @@ - meta: end_play when: not is_mitogen + # Too much hassle to make this work for OSX + - meta: end_play + when: ansible_system != 'Linux' + - shell: 'cat /proc/$PPID/cmdline | tr \\0 \\n' register: out - debug: var=out From 0741876392c4983aacc27e7688ca8dbe95aeb746 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 00:04:49 +0100 Subject: [PATCH 200/383] issue #543: Hide Mitogen test users from gdm --- tests/image_prep/_user_accounts.yml | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/tests/image_prep/_user_accounts.yml b/tests/image_prep/_user_accounts.yml index 70f5d0eb..e5b5722d 100644 --- a/tests/image_prep/_user_accounts.yml +++ b/tests/image_prep/_user_accounts.yml @@ -84,9 +84,9 @@ with_items: "{{all_users}}" when: ansible_system == 'Darwin' - - name: Hide users from login window. - with_items: "{{all_users}}" + - name: Hide users from login window (Darwin). when: ansible_system == 'Darwin' + with_items: "{{all_users}}" osx_defaults: array_add: true domain: /Library/Preferences/com.apple.loginwindow @@ -94,6 +94,26 @@ key: HiddenUsersList value: ['mitogen_{{item}}'] + - name: Check if AccountsService is used + file: + path: /var/lib/AccountsService/users + register: out + + - name: Hide users from login window (Linux). + when: ansible_system == 'Linux' and out.stat.exists + with_items: "{{all_users}}" + copy: + dest: /var/lib/AccountsService/users/mitogen__{{item}} + content: | + [User] + SystemAccount=true + + - name: Restart AccountsService (Linux). + when: ansible_system == 'Linux' and out.stat.exists + service: + name: accounts-daemon + restarted: true + - name: Readonly homedir for one account shell: "chown -R root: ~mitogen__readonly_homedir" From edb745f4348c2867d3e0b477796723409ca0aaa5 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 00:22:59 +0100 Subject: [PATCH 201/383] issue #543: create ~/.ssh if it doesn't exist --- .ci/localhost_ansible_tests.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.ci/localhost_ansible_tests.py b/.ci/localhost_ansible_tests.py index 662c7c73..11b1faa1 100755 --- a/.ci/localhost_ansible_tests.py +++ b/.ci/localhost_ansible_tests.py @@ -31,6 +31,10 @@ with ci_lib.Fold('job_setup'): with ci_lib.Fold('machine_prep'): + ssh_dir = os.path.expanduser('~/.ssh') + if not os.path.exists(ssh_dir): + os.makedirs(ssh_dir, int('0700', 8)) + key_path = os.path.expanduser('~/.ssh/id_rsa') shutil.copy(KEY_PATH, key_path) From f3915b5f4021220b303759d07f61a02fde3e0b13 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 00:50:22 +0100 Subject: [PATCH 202/383] issue #543: disable host key checking --- .ci/localhost_ansible_tests.py | 2 +- tests/image_prep/ansible.cfg | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.ci/localhost_ansible_tests.py b/.ci/localhost_ansible_tests.py index 11b1faa1..f9465899 100755 --- a/.ci/localhost_ansible_tests.py +++ b/.ci/localhost_ansible_tests.py @@ -44,7 +44,7 @@ with ci_lib.Fold('machine_prep'): if os.path.expanduser('~mitogen__user1') == '~mitogen__user1': os.chdir(IMAGE_PREP_DIR) - run("ansible-playbook -i localhost, _user_accounts.yml") + run("ansible-playbook -c local -i localhost, _user_accounts.yml") with ci_lib.Fold('ansible'): diff --git a/tests/image_prep/ansible.cfg b/tests/image_prep/ansible.cfg index 8a8c47fa..60f2975e 100644 --- a/tests/image_prep/ansible.cfg +++ b/tests/image_prep/ansible.cfg @@ -4,3 +4,4 @@ strategy_plugins = ../../ansible_mitogen/plugins/strategy retry_files_enabled = false display_args_to_stdout = True no_target_syslog = True +host_key_checking = False From ebb4a7ca6a23bdc91689c5982d540cf2cc067eec Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 00:57:32 +0100 Subject: [PATCH 203/383] issue #543: dumb fix for file vs. stat :( --- tests/image_prep/_user_accounts.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/image_prep/_user_accounts.yml b/tests/image_prep/_user_accounts.yml index e5b5722d..fbefd9c3 100644 --- a/tests/image_prep/_user_accounts.yml +++ b/tests/image_prep/_user_accounts.yml @@ -95,7 +95,7 @@ value: ['mitogen_{{item}}'] - name: Check if AccountsService is used - file: + stat: path: /var/lib/AccountsService/users register: out From 57db3a36e1d468789abc731d95949e4a52251ed0 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 01:03:23 +0100 Subject: [PATCH 204/383] issue #543: install virtualenv for Azure --- .ci/localhost_ansible_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/localhost_ansible_tests.py b/.ci/localhost_ansible_tests.py index f9465899..f7e1ecbd 100755 --- a/.ci/localhost_ansible_tests.py +++ b/.ci/localhost_ansible_tests.py @@ -23,7 +23,7 @@ with ci_lib.Fold('unit_tests'): with ci_lib.Fold('job_setup'): # Don't set -U as that will upgrade Paramiko to a non-2.6 compatible version. - run("pip install -q ansible==%s", ci_lib.ANSIBLE_VERSION) + run("pip install -q virtualenv ansible==%s", ci_lib.ANSIBLE_VERSION) os.chmod(KEY_PATH, int('0600', 8)) if not ci_lib.exists_in_path('sshpass'): From dc9f4e89e672b2ed228ecaa2655a6d5e75eb89ea Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 22:49:30 +0100 Subject: [PATCH 205/383] ansible: reap mux processes on shut down Previously we exitted without calling waitpid(), which meant the top-level process struct rusage did not reflect the resource usage consumed by the multiplexer processes. Existing benchmarks are made using perf so this never created a problem, but it could be confusing to others using the "time" command, and also allows logging the final exit status of the process. --- ansible_mitogen/process.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index cb130abc..7cdd68ae 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -407,6 +407,12 @@ class ClassicWorkerModel(WorkerModel): mitogen.core.io_op(sock.recv, 1) sock.close() + for mux in self._muxes: + _, status = os.waitpid(mux.pid, 0) + status = mitogen.fork._convert_exit_status(status) + LOG.debug('mux %d PID %d %s', mux.index, mux.pid, + mitogen.parent.returncode_to_str(status)) + def _initialize(self): """ Arrange for classic process model connection multiplexer child @@ -555,7 +561,6 @@ class MuxProcess(object): mitogen.core.io_op(MuxProcess.cls_parent_sock.recv, 1) return - save_pid('mux') ansible_mitogen.logging.set_process_name('mux:' + str(self.index)) if setproctitle: setproctitle.setproctitle('mitogen mux:%s (%s)' % ( From bf1f3682aae50791c631aedf233a2f84a42a6d45 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 22:53:53 +0100 Subject: [PATCH 206/383] ansible: pin per-CPU muxes to their corresponding CPU This slightly breaks the old scheme, in that CPU 1 may now end up with a mux and the top-level process pinned to it. --- ansible_mitogen/affinity.py | 8 ++++---- ansible_mitogen/process.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ansible_mitogen/affinity.py b/ansible_mitogen/affinity.py index 94539e21..eeeb4d38 100644 --- a/ansible_mitogen/affinity.py +++ b/ansible_mitogen/affinity.py @@ -142,7 +142,7 @@ class Policy(object): Assign the Ansible top-level policy to this process. """ - def assign_muxprocess(self): + def assign_muxprocess(self, index): """ Assign the MuxProcess policy to this process. """ @@ -224,7 +224,7 @@ class FixedPolicy(Policy): )) def _set_cpu(self, cpu): - self._set_affinity(1 << cpu) + self._set_affinity(1 << (cpu % self.cpu_count)) def _clear(self): all_cpus = (1 << self.cpu_count) - 1 @@ -236,8 +236,8 @@ class FixedPolicy(Policy): else: self._balance() - def assign_muxprocess(self): - self._set_cpu(0) + def assign_muxprocess(self, index): + self._set_cpu(index) def assign_worker(self): self._balance() diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 7cdd68ae..a8052061 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -586,7 +586,7 @@ class MuxProcess(object): """ save_pid('mux') ansible_mitogen.logging.set_process_name('mux') - ansible_mitogen.affinity.policy.assign_muxprocess() + ansible_mitogen.affinity.policy.assign_muxprocess(self.index) self._setup_master() self._setup_services() From a9d3fdf6b70d1a723e9572784ce87b71f0444c19 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 04:01:08 +0100 Subject: [PATCH 207/383] ansible: fix affinity tests for 5ae45f6612390bbc888b65964fb5c218feed1679 --- tests/ansible/tests/affinity_test.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/ansible/tests/affinity_test.py b/tests/ansible/tests/affinity_test.py index ccd72243..d9618bda 100644 --- a/tests/ansible/tests/affinity_test.py +++ b/tests/ansible/tests/affinity_test.py @@ -64,32 +64,32 @@ class FixedPolicyTest(testlib.TestCase): def test_assign_muxprocess_1core(self): # Uniprocessor . policy = self.klass(cpu_count=1) - policy.assign_muxprocess() + policy.assign_muxprocess(0) self.assertEquals(0x1, policy.mask) def test_assign_muxprocess_2core(self): # Small SMP gets dedicated core. policy = self.klass(cpu_count=2) - policy.assign_muxprocess() + policy.assign_muxprocess(0) self.assertEquals(0x1, policy.mask) - policy.assign_muxprocess() + policy.assign_muxprocess(0) self.assertEquals(0x1, policy.mask) - policy.assign_muxprocess() + policy.assign_muxprocess(0) def test_assign_muxprocess_3core(self): # Small SMP gets a dedicated core. policy = self.klass(cpu_count=3) - policy.assign_muxprocess() + policy.assign_muxprocess(0) self.assertEquals(0x1, policy.mask) - policy.assign_muxprocess() + policy.assign_muxprocess(0) self.assertEquals(0x1, policy.mask) def test_assign_muxprocess_4core(self): # Big SMP gets a dedicated core. policy = self.klass(cpu_count=4) - policy.assign_muxprocess() + policy.assign_muxprocess(0) self.assertEquals(0x1, policy.mask) - policy.assign_muxprocess() + policy.assign_muxprocess(0) self.assertEquals(0x1, policy.mask) def test_assign_worker_1core(self): From 108015aa2279133181e0c7e643e6ee8353d8cfa8 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 04:01:49 +0100 Subject: [PATCH 208/383] ansible: gracefully handle failure to connect to MuxProcess It's possible to hit an ugly exception during early CTRL+C --- ansible_mitogen/connection.py | 1 - ansible_mitogen/process.py | 24 +++++++++++++++++++----- mitogen/unix.py | 25 ++++++++++++++++++++++++- tests/ansible/tests/connection_test.py | 24 +++++++++++++++++++++--- tests/testlib.py | 9 ++++++++- 5 files changed, 72 insertions(+), 11 deletions(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index 9fd279b1..b6cb2237 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -46,7 +46,6 @@ import ansible.utils.shlex import mitogen.core import mitogen.fork -import mitogen.unix import mitogen.utils import ansible_mitogen.parsing diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index a8052061..71da10be 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -79,6 +79,14 @@ worker_model_msg = ( '"mitogen_*" or "operon_*" strategies are active.' ) +shutting_down_msg = ( + 'The task worker cannot connect. Ansible may be shutting down, or ' + 'the maximum open files limit may have been exceeded. If this occurs ' + 'midway through a run, please retry after increasing the open file ' + 'limit (ulimit -n). Original error: %s' +) + + #: The worker model as configured by the currently running strategy. This is #: managed via :func:`get_worker_model` / :func:`set_worker_model` functions by #: :class:`StrategyMixin`. @@ -376,10 +384,16 @@ class ClassicWorkerModel(WorkerModel): self.parent = None self.router = None - self.router, self.parent = mitogen.unix.connect( - path=path, - broker=self.broker, - ) + try: + self.router, self.parent = mitogen.unix.connect( + path=path, + broker=self.broker, + ) + except mitogen.unix.ConnectError as e: + # This is not AnsibleConnectionFailure since we want to break + # with_items loops. + raise ansible.errors.AnsibleError(shutting_down_msg % (e,)) + self.listener_path = path def on_process_exit(self, sock): @@ -459,7 +473,7 @@ class ClassicWorkerModel(WorkerModel): for mux in self._muxes: pid, status = os.waitpid(mux.pid, 0) status = mitogen.fork._convert_exit_status(status) - LOG.debug('mux PID %d %s', pid, + LOG.error('mux PID %d %s', pid, mitogen.parent.returncode_to_str(status)) _classic_worker_model = None diff --git a/mitogen/unix.py b/mitogen/unix.py index c0d2bb9c..15f22076 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -48,6 +48,22 @@ import mitogen.master from mitogen.core import LOG +class Error(mitogen.core.Error): + """ + Base for errors raised by :mod:`mitogen.unix`. + """ + pass + + +class ConnectError(Error): + """ + Raised when :func:`mitogen.unix.connect` fails to connect to the listening + socket. + """ + #: UNIX error number reported by underlying exception. + errno = None + + def is_path_dead(path): s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: @@ -154,7 +170,14 @@ class Listener(mitogen.core.Protocol): def _connect(path, broker, sock): - sock.connect(path) + try: + sock.connect(path) + except socket.error: + e = sys.exc_info()[1] + ce = ConnectError('could not connect to %s: %s', path, e.args[1]) + ce.errno = e.args[0] + raise ce + sock.send(struct.pack('>L', os.getpid())) mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12)) mitogen.parent_id = remote_id diff --git a/tests/ansible/tests/connection_test.py b/tests/ansible/tests/connection_test.py index 73958185..e7646716 100644 --- a/tests/ansible/tests/connection_test.py +++ b/tests/ansible/tests/connection_test.py @@ -23,6 +23,8 @@ import testlib class MuxProcessMixin(object): + no_zombie_check = True + @classmethod def setUpClass(cls): #mitogen.utils.log_to_file() @@ -61,7 +63,23 @@ class ConnectionMixin(MuxProcessMixin): super(ConnectionMixin, self).tearDown() -class OptionalIntTest(unittest2.TestCase): +class MuxShutdownTest(ConnectionMixin, testlib.TestCase): + def test_connection_failure_raised(self): + # ensure if a WorkerProcess tries to connect to a MuxProcess that has + # already shut down, it fails with a graceful error. + path = self.model._muxes[0].path + os.rename(path, path + '.tmp') + try: + #e = self.assertRaises(ansible.errors.AnsibleError, + #lambda: self.conn._connect() + #) + e = 1 + print(e) + finally: + os.rename(path + '.tmp', path) + + +class OptionalIntTest(testlib.TestCase): func = staticmethod(ansible_mitogen.connection.optional_int) def test_already_int(self): @@ -81,7 +99,7 @@ class OptionalIntTest(unittest2.TestCase): self.assertEquals(None, self.func({1:2})) -class PutDataTest(ConnectionMixin, unittest2.TestCase): +class PutDataTest(ConnectionMixin, testlib.TestCase): def test_out_path(self): path = tempfile.mktemp(prefix='mitotest') contents = mitogen.core.b('contents') @@ -102,7 +120,7 @@ class PutDataTest(ConnectionMixin, unittest2.TestCase): os.unlink(path) -class PutFileTest(ConnectionMixin, unittest2.TestCase): +class PutFileTest(ConnectionMixin, testlib.TestCase): @classmethod def setUpClass(cls): super(PutFileTest, cls).setUpClass() diff --git a/tests/testlib.py b/tests/testlib.py index 3eeaa461..673d5ca6 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -343,7 +343,14 @@ class TestCase(unittest2.TestCase): self, self._fd_count_before, get_fd_count(), ) + # Some class fixtures (like Ansible MuxProcess) start persistent children + # for the duration of the class. + no_zombie_check = False + def _teardown_check_zombies(self): + if self.no_zombie_check: + return + try: pid, status = os.waitpid(0, os.WNOHANG) except OSError: @@ -354,7 +361,7 @@ class TestCase(unittest2.TestCase): self, pid, status ) - print() + print('') print('Children of unit test process:') os.system('ps uww --ppid ' + str(os.getpid())) assert 0, "%s leaked still-running subprocesses." % (self,) From de2e1ec184ed21507b5bf546deefdf0c7a73c933 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 02:06:00 +0100 Subject: [PATCH 209/383] tests/ansible/gcloud: terraform conf for load testing --- tests/ansible/gcloud/.gitignore | 2 + tests/ansible/gcloud/Makefile | 3 + tests/ansible/gcloud/controller.yml | 115 ++++++++++----- tests/ansible/gcloud/gce.yml | 11 -- tests/ansible/gcloud/mitogen-load-testing.tf | 143 +++++++++++++++++++ 5 files changed, 224 insertions(+), 50 deletions(-) create mode 100644 tests/ansible/gcloud/.gitignore create mode 100644 tests/ansible/gcloud/Makefile delete mode 100644 tests/ansible/gcloud/gce.yml create mode 100644 tests/ansible/gcloud/mitogen-load-testing.tf diff --git a/tests/ansible/gcloud/.gitignore b/tests/ansible/gcloud/.gitignore new file mode 100644 index 00000000..ea1f477c --- /dev/null +++ b/tests/ansible/gcloud/.gitignore @@ -0,0 +1,2 @@ +terraform.tfstate* +.terraform diff --git a/tests/ansible/gcloud/Makefile b/tests/ansible/gcloud/Makefile new file mode 100644 index 00000000..c5413cd6 --- /dev/null +++ b/tests/ansible/gcloud/Makefile @@ -0,0 +1,3 @@ + +default: + terraform fmt diff --git a/tests/ansible/gcloud/controller.yml b/tests/ansible/gcloud/controller.yml index 3c7f9ea0..1128a9b1 100644 --- a/tests/ansible/gcloud/controller.yml +++ b/tests/ansible/gcloud/controller.yml @@ -1,19 +1,89 @@ -- hosts: controller - vars: - git_username: '{{ lookup("pipe", "git config --global user.name") }}' - git_email: '{{ lookup("pipe", "git config --global user.email") }}' - +- hosts: all + become: true tasks: + - apt: name={{item}} state=installed + with_items: + - openvpn + - tcpdump + - python-pip + - python-virtualenv + - strace + - libldap2-dev + - linux-perf + - libsasl2-dev + - build-essential + - git + - rsync + + - file: + path: /etc/openvpn + state: directory + + - copy: + dest: /etc/openvpn/secret + mode: '0600' + content: | + -----BEGIN OpenVPN Static key V1----- + f94005e4206828e281eb397aefd69b37 + ebe6cd39057d5641c5d8dd539cd07651 + 557d94d0077852bd8f92b68bef927169 + c5f0e42ac962a2cbbed35e107ffa0e71 + 1a2607c6bcd919ec5846917b20eb6684 + c7505152815d6ed7b4420714777a3d4a + 8edb27ca81971cba7a1e88fe3936e13b + 85e9be6706a30cd1334836ed0f08e899 + 78942329a330392dff42e4570731ac24 + 9330358aaa6828c07ecb41fb9c498a89 + 1e0435c5a45bfed390cd2104073634ef + b00f9fae1d3c49ef5de51854103edac9 + 5ff39c9dfc66ae270510b2ffa74d87d2 + 9d4b3844b1e1473237bc6dc78fb03e2e + 643ce58e667a532efceec7177367fb37 + a16379a51e0a8c8e3ec00a59952b79d4 + -----END OpenVPN Static key V1----- + + - copy: + dest: /etc/openvpn/k3.conf + content: | + remote k3.botanicus.net + dev tun + ifconfig 10.18.0.1 10.18.0.2 + secret secret + + - shell: systemctl enable openvpn@k3.service + - shell: systemctl start openvpn@k3.service + - lineinfile: line: "{{item}}" path: /etc/sysctl.conf register: sysctl_conf - become: true with_items: - "net.ipv4.ip_forward=1" - "kernel.perf_event_paranoid=-1" + - shell: /sbin/sysctl -p + when: sysctl_conf.changed + + - copy: + dest: /etc/rc.local + mode: "0744" + content: | + #!/bin/bash + iptables -t nat -F; + iptables -t nat -X; + iptables -t nat -A POSTROUTING -j MASQUERADE; + + - shell: systemctl daemon-reload + - shell: systemctl enable rc-local + - shell: systemctl start rc-local + + +- hosts: all + vars: + git_username: '{{ lookup("pipe", "git config --global user.name") }}' + git_email: '{{ lookup("pipe", "git config --global user.email") }}' + tasks: - copy: src: ~/.ssh/id_gitlab dest: ~/.ssh/id_gitlab @@ -23,38 +93,6 @@ dest: ~/.ssh/config src: ssh_config.j2 - - lineinfile: - line: "{{item}}" - path: /etc/sysctl.conf - become: true - with_items: - - net.ipv4.ip_forward=1 - - kernel.perf_event_paranoid=-1 - register: sysctl_conf - - - shell: /sbin/sysctl -p - when: sysctl_conf.changed - become: true - - - shell: | - iptables -t nat -F; - iptables -t nat -X; - iptables -t nat -A POSTROUTING -j MASQUERADE; - become: true - - - apt: name={{item}} state=installed - become: true - with_items: - - python-pip - - python-virtualenv - - strace - - libldap2-dev - - linux-perf - - libsasl2-dev - - build-essential - - git - - rsync - - shell: "rsync -a ~/.ssh {{inventory_hostname}}:" connection: local @@ -119,4 +157,3 @@ path: ~/prj/ansible/inventory/gcloud.py state: link src: ~/mitogen/tests/ansible/lib/inventory/gcloud.py - diff --git a/tests/ansible/gcloud/gce.yml b/tests/ansible/gcloud/gce.yml deleted file mode 100644 index e3f64c23..00000000 --- a/tests/ansible/gcloud/gce.yml +++ /dev/null @@ -1,11 +0,0 @@ - -- hosts: localhost - tasks: - - command: date +%Y%m%d-%H%M%S - register: out - - - set_fact: - instance_name: "controller-{{out.stdout}}" - - - command: > - gcloud compute instances create {{instance_name}} --can-ip-forward --machine-type=n1-standard-8 --preemptible --scopes=compute-ro --image-project=debian-cloud --image-family=debian-9 diff --git a/tests/ansible/gcloud/mitogen-load-testing.tf b/tests/ansible/gcloud/mitogen-load-testing.tf new file mode 100644 index 00000000..774b615d --- /dev/null +++ b/tests/ansible/gcloud/mitogen-load-testing.tf @@ -0,0 +1,143 @@ +variable "node-count" { + default = 0 +} + +provider "google" { + project = "mitogen-load-testing" + region = "europe-west1" + zone = "europe-west1-d" +} + +resource "google_compute_instance" "controller" { + name = "ansible-controller" + + # machine_type = "n1-highcpu-32" + #machine_type = "f1-micro" + #machine_type = "custom-4-8192" + machine_type = "custom-1-1024" + + allow_stopping_for_update = true + can_ip_forward = true + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + scheduling { + preemptible = true + automatic_restart = false + } + + network_interface { + subnetwork = "${google_compute_subnetwork.loadtest-subnet.self_link}" + access_config = {} + } + + provisioner "local-exec" { + command = "ssh-keygen -R ${google_compute_instance.controller.network_interface.0.access_config.0.nat_ip}" + } + + provisioner "local-exec" { + command = "ansible-playbook -i ${google_compute_instance.controller.network_interface.0.access_config.0.nat_ip}, controller.yml" + } +} + +resource "google_compute_network" "loadtest" { + name = "loadtest" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "loadtest-subnet" { + name = "loadtest-subnet" + ip_cidr_range = "10.19.0.0/16" + network = "${google_compute_network.loadtest.id}" +} + +resource "google_compute_firewall" "allow-all-in" { + name = "allow-all-in" + network = "${google_compute_network.loadtest.name}" + direction = "INGRESS" + + allow { + protocol = "all" + } +} + +resource "google_compute_firewall" "allow-all-out" { + name = "allow-all-out" + network = "${google_compute_network.loadtest.name}" + direction = "EGRESS" + + allow { + protocol = "all" + } +} + +resource "google_compute_route" "route-nodes-via-controller" { + name = "route-nodes-via-controller" + dest_range = "0.0.0.0/0" + network = "${google_compute_network.loadtest.name}" + next_hop_instance = "${google_compute_instance.controller.self_link}" + next_hop_instance_zone = "${google_compute_instance.controller.zone}" + priority = 800 + tags = ["node"] +} + +resource "google_compute_instance_template" "node" { + name = "node" + tags = ["node"] + machine_type = "custom-1-1024" + + scheduling { + preemptible = true + automatic_restart = false + } + + disk { + source_image = "debian-cloud/debian-9" + auto_delete = true + boot = true + } + + network_interface { + subnetwork = "${google_compute_subnetwork.loadtest-subnet.self_link}" + } +} + +# +# Compute Engine tops out at 1000 VMs per group +# + +resource "google_compute_instance_group_manager" "nodes-a" { + name = "nodes-a" + + base_instance_name = "node" + instance_template = "${google_compute_instance_template.node.self_link}" + target_size = "${var.node-count / 4}" +} + +resource "google_compute_instance_group_manager" "nodes-b" { + name = "nodes-b" + + base_instance_name = "node" + instance_template = "${google_compute_instance_template.node.self_link}" + target_size = "${var.node-count / 4}" +} + +resource "google_compute_instance_group_manager" "nodes-c" { + name = "nodes-c" + + base_instance_name = "node" + instance_template = "${google_compute_instance_template.node.self_link}" + target_size = "${var.node-count / 4}" +} + +resource "google_compute_instance_group_manager" "nodes-d" { + name = "nodes-d" + + base_instance_name = "node" + instance_template = "${google_compute_instance_template.node.self_link}" + target_size = "${var.node-count / 4}" +} From 4dfbe82e76cd6ef7158069e1dfdc5755ef7cba9d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 04:04:32 +0100 Subject: [PATCH 210/383] tests: hide ugly error during Ansible tests --- ansible_mitogen/process.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 71da10be..ba4d0687 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -473,7 +473,7 @@ class ClassicWorkerModel(WorkerModel): for mux in self._muxes: pid, status = os.waitpid(mux.pid, 0) status = mitogen.fork._convert_exit_status(status) - LOG.error('mux PID %d %s', pid, + LOG.debug('mux PID %d %s', pid, mitogen.parent.returncode_to_str(status)) _classic_worker_model = None From 5ed0b936691614f7a3d44caf535a68e117031a27 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 03:27:08 +0000 Subject: [PATCH 211/383] tests: update gcloud.py to match terraform config --- tests/ansible/lib/inventory/gcloud.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ansible/lib/inventory/gcloud.py b/tests/ansible/lib/inventory/gcloud.py index 73e083f4..9920a008 100755 --- a/tests/ansible/lib/inventory/gcloud.py +++ b/tests/ansible/lib/inventory/gcloud.py @@ -14,14 +14,14 @@ import googleapiclient.discovery def main(): project = 'mitogen-load-testing' zone = 'europe-west1-d' - group_name = 'micro-debian9' + prefix = 'node-' client = googleapiclient.discovery.build('compute', 'v1') resp = client.instances().list(project=project, zone=zone).execute() ips = [] for inst in resp['items']: - if inst['status'] == 'RUNNING' and inst['name'].startswith(group_name): + if inst['status'] == 'RUNNING' and inst['name'].startswith(prefix): ips.extend( #bytes(config['natIP']) bytes(interface['networkIP']) From 3b000c7d152d5aa64c22c76bc30f402ea5f090cf Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 04:14:13 +0100 Subject: [PATCH 212/383] unix: include more IO in the try/except for connection failure --- mitogen/unix.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mitogen/unix.py b/mitogen/unix.py index 15f22076..c34dc064 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -171,15 +171,18 @@ class Listener(mitogen.core.Protocol): def _connect(path, broker, sock): try: + # ENOENT, ECONNREFUSED sock.connect(path) + + # ECONNRESET + sock.send(struct.pack('>L', os.getpid())) + mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12)) except socket.error: e = sys.exc_info()[1] ce = ConnectError('could not connect to %s: %s', path, e.args[1]) ce.errno = e.args[0] raise ce - sock.send(struct.pack('>L', os.getpid())) - mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12)) mitogen.parent_id = remote_id mitogen.parent_ids = [remote_id] From 28b4d63e49f5bc51ede099dd81f187279ae270cb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 04:31:28 +0100 Subject: [PATCH 213/383] tests: terraform tweaks --- tests/ansible/gcloud/mitogen-load-testing.tf | 22 +++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tests/ansible/gcloud/mitogen-load-testing.tf b/tests/ansible/gcloud/mitogen-load-testing.tf index 774b615d..076722ce 100644 --- a/tests/ansible/gcloud/mitogen-load-testing.tf +++ b/tests/ansible/gcloud/mitogen-load-testing.tf @@ -2,6 +2,10 @@ variable "node-count" { default = 0 } +variable "big" { + default = false +} + provider "google" { project = "mitogen-load-testing" region = "europe-west1" @@ -10,11 +14,7 @@ provider "google" { resource "google_compute_instance" "controller" { name = "ansible-controller" - - # machine_type = "n1-highcpu-32" - #machine_type = "f1-micro" - #machine_type = "custom-4-8192" - machine_type = "custom-1-1024" + machine_type = "${var.big ? "n1-highcpu-32" : "custom-1-1024"}" allow_stopping_for_update = true can_ip_forward = true @@ -36,11 +36,13 @@ resource "google_compute_instance" "controller" { } provisioner "local-exec" { - command = "ssh-keygen -R ${google_compute_instance.controller.network_interface.0.access_config.0.nat_ip}" - } - - provisioner "local-exec" { - command = "ansible-playbook -i ${google_compute_instance.controller.network_interface.0.access_config.0.nat_ip}, controller.yml" + command = <<-EOF + ip=${google_compute_instance.controller.network_interface.0.access_config.0.nat_ip}; + ssh-keygen -R $ip; + ssh-keyscan $ip >> ~/.ssh/known_hosts; + sed -ri -e "s/.*CONTROLLER_IP_HERE.*/ Hostname $ip/" ~/.ssh/config; + ansible-playbook -i $ip, controller.yml + EOF } } From 558ebfa914f9b27681d6cfd7249a2532a2fc470f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 19:12:40 +0100 Subject: [PATCH 214/383] ci: update to Ansible 2.8.3 --- .travis.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index af637db1..580ced0b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,25 +36,25 @@ matrix: include: # Debops tests. - # 2.8.0; 3.6 -> 2.7 + # 2.8.3; 3.6 -> 2.7 - python: "3.6" - env: MODE=debops_common VER=2.8.0 + env: MODE=debops_common VER=2.8.3 # 2.4.6.0; 2.7 -> 2.7 - python: "2.7" env: MODE=debops_common VER=2.4.6.0 # Sanity check against vanilla Ansible. One job suffices. - python: "2.7" - env: MODE=ansible VER=2.8.0 DISTROS=debian STRATEGY=linear + env: MODE=ansible VER=2.8.3 DISTROS=debian STRATEGY=linear # ansible_mitogen tests. - # 2.8.0 -> {debian, centos6, centos7} + # 2.8.3 -> {debian, centos6, centos7} - python: "3.6" - env: MODE=ansible VER=2.8.0 - # 2.8.0 -> {debian, centos6, centos7} + env: MODE=ansible VER=2.8.3 + # 2.8.3 -> {debian, centos6, centos7} - python: "2.7" - env: MODE=ansible VER=2.8.0 + env: MODE=ansible VER=2.8.3 # 2.4.6.0 -> {debian, centos6, centos7} - python: "3.6" From 6f7941d616c8cecf931599b2e6744586c9f584b2 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 01:49:44 +0100 Subject: [PATCH 215/383] scripts: import affin.sh --- scripts/affin.sh | 4 ++++ 1 file changed, 4 insertions(+) create mode 100755 scripts/affin.sh diff --git a/scripts/affin.sh b/scripts/affin.sh new file mode 100755 index 00000000..34c03d8b --- /dev/null +++ b/scripts/affin.sh @@ -0,0 +1,4 @@ +# show process affinities for running ansible-playbook +who="$1" +[ ! "$who" ] && who=ansible-playbook +for i in $(pgrep -f "$who") ; do taskset -c -p $i ; done|cut -d: -f2|sort -n |uniq -c From 45a3014fd4b5926d00b60cde241fca92158ca867 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 02:01:11 +0100 Subject: [PATCH 216/383] parent: decode logged stdout as UTF-8. --- mitogen/parent.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 4b2ac388..90926aa4 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1137,7 +1137,8 @@ class BootstrapProtocol(RegexProtocol): return False def on_unrecognized_line_received(self, line): - LOG.debug('%s: stdout: %s', self.stream.name, line) + LOG.debug('%s: stdout: %s', self.stream.name, + line.decode('utf-8', 'replace')) PATTERNS = [ (re.compile(EC0_MARKER), _on_ec0_received), From 7accc092dee0a73c9c4c9efbee2ed3c116b86dfa Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 29 Jul 2019 19:49:16 +0100 Subject: [PATCH 217/383] docs: changelog tweaks --- docs/changelog.rst | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index dbc33d02..7e670804 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -32,10 +32,10 @@ Enhancements are not yet handled. * The ``MITOGEN_CPU_COUNT`` environment variable shards the connection - multiplexer into per-CPU worker processes. This improves throughput for large - runs especially involving file transfer, and is a prerequisite to future - in-process SSH support. To match the behaviour of older releases, only one - multiplexer is started by default. + multiplexer into per-CPU workers. This improves throughput for large runs + especially involving file transfer, and is a prerequisite for future + in-process SSH support. One multiplexer starts by default, to match existing + behaviour. * `#419 `_, `#470 `_, file descriptor usage @@ -56,8 +56,8 @@ Enhancements some hot paths, and locks that must be taken are held for less time. -Fixes -^^^^^ +Mitogen for Ansible +^^^^^^^^^^^^^^^^^^^ * `#363 `_: fix an obscure race matching *Permission denied* errors from some versions of ``su`` running on @@ -93,14 +93,14 @@ Fixes Core Library ~~~~~~~~~~~~ -* Logs are more readable, and many :func:`repr` strings are more descriptive. - The old pseudo-function-call format is slowly migrating to human-readable - output where possible. For example, *"Stream(ssh:123).connect()"* might - be written *"connecting to ssh:123"*. +* Log readability is improving, and many :func:`repr` strings are more + descriptive. The old pseudo-function-call format is slowly migrating to + human-readable output where possible. For example, + *"Stream(ssh:123).connect()"* might be written *"connecting to ssh:123"*. * :func:`bytearray` was removed from the list of supported serialization types. It was never portable between Python versions, unused, and never made much - sense to support as a wire type. + sense to support. * `#170 `_: to improve subprocess management and asynchronous connect, a :class:`mitogen.parent.TimerList` @@ -123,13 +123,12 @@ Core Library Python. * `#256 `_, - -`#419 `_: most :func:`os.dup` was - eliminated, along with almost all manual file descriptor management. - Descriptors are trapped in :func:`os.fdopen` objects when they are created, - ensuring a leaked object will close itself, and ensuring every descriptor is - fused to a `closed` flag, preventing historical bugs where a double close - could destroy descriptors belonging to unrelated streams. + `#419 `_: most :func:`os.dup` use + was eliminated, along with almost all manual file descriptor management. + Descriptors are trapped in :func:`os.fdopen` objects at creation, ensuring a + leaked object will close itself, and ensuring every descriptor is fused to a + `closed` flag, preventing historical bugs where a double close could destroy + descriptors belonging to unrelated streams. * `a5536c35 `_: avoid quadratic buffer management when logging lines received from a child's redirected From c4bcfa4c491d9ee9a4e995bd3693cf2592694a74 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 20:33:11 +0100 Subject: [PATCH 218/383] docs: migrate email list --- docs/ansible_detailed.rst | 27 +++++++++++++++++++++++---- docs/conf.py | 1 + 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index e5bd0669..7fa2228d 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -79,7 +79,9 @@ Installation .. raw:: html -

    + + + Releases occur frequently and often include important fixes. Subscribe to the mitogen-announce @@ -87,13 +89,14 @@ Installation

    - - -

    + + @@ -1375,3 +1378,19 @@ Despite the small margin for optimization, Mitogen still manages **6.2x less bandwidth and 1.8x less time**. .. image:: images/ansible/pcaps/costapp-uk-india.svg + + +.. raw:: html + + + diff --git a/docs/conf.py b/docs/conf.py index 11ef822a..2ee63aa8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,6 +9,7 @@ author = u'Network Genomics' copyright = u'2019, Network Genomics' exclude_patterns = ['_build'] extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.programoutput'] +html_show_copyright = False html_show_sourcelink = False html_show_sphinx = False html_sidebars = {'**': ['globaltoc.html', 'github.html']} From edeaa3c6ee67e4e6dc1adac49f4245a5a650ee92 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 20:36:05 +0100 Subject: [PATCH 219/383] docs: remove old list link. --- docs/ansible_detailed.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index 7fa2228d..4e8dcd27 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -83,9 +83,7 @@ Installation Releases occur frequently and often include important fixes. Subscribe - to the
    mitogen-announce - mailing list be notified of new releases. + to the mitogen-announce list to stay updated.

    @@ -97,6 +95,8 @@ Installation

    + +

    From acab26d796f90e64183e9742f50bf1e363446957 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 04:09:48 +0100 Subject: [PATCH 220/383] ansible: improve process.py docs --- ansible_mitogen/process.py | 43 +++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index ba4d0687..1adc464f 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -277,10 +277,25 @@ def get_cpu_count(default=None): class Binding(object): + """ + Represent a bound connection for a particular inventory hostname. When + operating in sharded mode, the actual MuxProcess implementing a connection + varies according to the target machine. Depending on the particular + implementation, this class represents a binding to the correct MuxProcess. + """ def get_child_service_context(self): """ Return the :class:`mitogen.core.Context` to which children should - direct ContextService requests, or :data:`None` for the local process. + direct requests for services such as FileService, or :data:`None` for + the local process. + + This can be different from :meth:`get_service_context` where MuxProcess + and WorkerProcess are combined, and it is discovered a task is + delegated after being assigned to its initial worker for the original + un-delegated hostname. In that case, connection management and + expensive services like file transfer must be implemented by the + MuxProcess connected to the target, rather than routed to the + MuxProcess responsible for executing the task. """ raise NotImplementedError() @@ -366,8 +381,8 @@ class ClassicWorkerModel(WorkerModel): def _listener_for_name(self, name): """ - Given a connection stack, return the UNIX listener that should be used - to communicate with it. This is a simple hash of the inventory name. + Given an inventory hostname, return the UNIX listener that should + communicate with it. This is a simple hash of the inventory name. """ if len(self._muxes) == 1: return self._muxes[0].path @@ -401,10 +416,9 @@ class ClassicWorkerModel(WorkerModel): This is an :mod:`atexit` handler installed in the top-level process. Shut the write end of `sock`, causing the receive side of the socket in - every worker process to wake up with a 0-byte reads, and causing their - main threads to wake up and initiate shutdown. After shutting the - socket down, wait for a 0-byte read from the read end, which will occur - after the last child closes the descriptor on exit. + every worker process to return 0-byte reads, and causing their main + threads to wake and initiate shutdown. After shutting the socket down, + wait on each child to finish exiting. This is done using :mod:`atexit` since Ansible lacks any better hook to run code during exit, and unless some synchronization exists with @@ -429,12 +443,13 @@ class ClassicWorkerModel(WorkerModel): def _initialize(self): """ - Arrange for classic process model connection multiplexer child - processes to be started, if they are not already running. + Arrange for classic model multiplexers to be started, if they are not + already running. - The parent process picks a UNIX socket path the child will use prior to - fork, creates a socketpair used essentially as a semaphore, then blocks - waiting for the child to indicate the UNIX socket is ready for use. + The parent process picks a UNIX socket path each child will use prior + to fork, creates a socketpair used essentially as a semaphore, then + blocks waiting for the child to indicate the UNIX socket is ready for + use. :param bool _init_logging: For testing, if :data:`False`, don't initialize logging. @@ -533,8 +548,8 @@ class MuxProcess(object): Implement a subprocess forked from the Ansible top-level, as a safe place to contain the Mitogen IO multiplexer thread, keeping its use of the logging package (and the logging package's heavy use of locks) far away - from the clutches of os.fork(), which is used continuously by the - multiprocessing package in the top-level process. + from os.fork(), which is used continuously by the multiprocessing package + in the top-level process. The problem with running the multiplexer in that process is that should the multiplexer thread be in the process of emitting a log entry (and holding From eeb7150f2485a4ff465b124de02e512ea8410251 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 04:20:04 +0100 Subject: [PATCH 221/383] issue #549: increase open file limit automatically if possible While catching every possible case where "open file limit exceeded" is not possible, we can at least increase the soft limit to the available hard limit without any user effort. Do this in Ansible top-level process, even though we probably only need it in the MuxProcess. It seems there is no reason this could hurt --- ansible_mitogen/process.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 1adc464f..85ced27f 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -32,6 +32,7 @@ import errno import logging import multiprocessing import os +import resource import signal import socket import sys @@ -237,9 +238,27 @@ def _setup_responder(responder): ) +def increase_open_file_limit(): + """ + #549: in order to reduce the possibility of hitting an open files limit, + increase :data:`resource.RLIMIT_NOFILE` from its soft limit to its hard + limit, if they differ. + + It is common that a low soft limit is configured by default, where the hard + limit is much higher. + """ + soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) + if soft < hard: + LOG.debug('raising soft open file limit from %d to %d', soft, hard) + resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) + else: + LOG.debug('cannot increase open file limit; existing limit is %d', hard) + + def common_setup(enable_affinity=True, _init_logging=True): save_pid('controller') ansible_mitogen.logging.set_process_name('top') + if enable_affinity: ansible_mitogen.affinity.policy.assign_controller() @@ -255,6 +274,7 @@ def common_setup(enable_affinity=True, _init_logging=True): mitogen.core.enable_profiling() MuxProcess.cls_original_env = dict(os.environ) + increase_open_file_limit() def get_cpu_count(default=None): From d0aee1ef3c256cf54234cbd6ff7564a87ff2b006 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 04:27:05 +0100 Subject: [PATCH 222/383] issue #549: docs: update Changelog --- docs/changelog.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 7e670804..2f2a496f 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -63,6 +63,12 @@ Mitogen for Ansible matching *Permission denied* errors from some versions of ``su`` running on heavily loaded machines. +* `#549 `_: the open file descriptor + limit for the Ansible process is increased to the available hard limit. It is + common for distributions to ship with a much higher hard limit than their + default soft limit, allowing *"too many open files"* errors to be avoided + more often in large runs without user configuration. + * `#578 `_: the extension could crash while rendering an error message, due to an incorrect format string. @@ -140,6 +146,7 @@ Thanks! Mitogen would not be possible without the support of users. A huge thanks for bug reports, testing, features and fixes in this release contributed by +`Andreas Hubert `_. `Anton Markelov `_, `Nigel Metheringham `_, `Orion Poplawski `_, From c80fddd4879a07a351e22dacc8b559a7d458904a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 11:41:29 +0100 Subject: [PATCH 223/383] [linear2]: merge fallout flaggged by LGTM --- ansible_mitogen/mixins.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ansible_mitogen/mixins.py b/ansible_mitogen/mixins.py index 890467fd..3a5d4c93 100644 --- a/ansible_mitogen/mixins.py +++ b/ansible_mitogen/mixins.py @@ -182,14 +182,6 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): ) ) - def _generate_tmp_path(self): - return os.path.join( - self._connection.get_good_temp_dir(), - 'ansible_mitogen_action_%016x' % ( - random.getrandbits(8*8), - ) - ) - def _make_tmp_path(self, remote_user=None): """ Create a temporary subdirectory as a child of the temporary directory From 75d179e4b91b0e3e2daef3a4efbbb43b206c93c6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 11:46:23 +0100 Subject: [PATCH 224/383] remove unused imports flagged by lgtm --- ansible_mitogen/connection.py | 2 -- ansible_mitogen/process.py | 2 -- mitogen/core.py | 2 +- mitogen/doas.py | 1 - mitogen/profiler.py | 3 --- mitogen/ssh.py | 1 - mitogen/su.py | 1 - mitogen/sudo.py | 1 - scripts/release-notes.py | 1 - 9 files changed, 1 insertion(+), 13 deletions(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index b6cb2237..ef21f606 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -37,8 +37,6 @@ import stat import sys import time -import jinja2.runtime -from ansible.module_utils import six import ansible.constants as C import ansible.errors import ansible.plugins.connection diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 85ced27f..1f7741f6 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -28,7 +28,6 @@ from __future__ import absolute_import import atexit -import errno import logging import multiprocessing import os @@ -36,7 +35,6 @@ import resource import signal import socket import sys -import time try: import faulthandler diff --git a/mitogen/core.py b/mitogen/core.py index 843f5a04..be5d7e9c 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3109,7 +3109,7 @@ class Broker(object): if sys.version_info < (2, 6): # import_module() is used to avoid dep scanner. os_fork = import_module('mitogen.os_fork') - mitogen.os_fork._notice_broker_or_pool(self) + os_fork._notice_broker_or_pool(self) def start_receive(self, stream): """ diff --git a/mitogen/doas.py b/mitogen/doas.py index f3bf4c90..5b212b9b 100644 --- a/mitogen/doas.py +++ b/mitogen/doas.py @@ -33,7 +33,6 @@ import re import mitogen.core import mitogen.parent -from mitogen.core import b LOG = logging.getLogger(__name__) diff --git a/mitogen/profiler.py b/mitogen/profiler.py index 51da9b18..bbf6086a 100644 --- a/mitogen/profiler.py +++ b/mitogen/profiler.py @@ -57,15 +57,12 @@ Example: from __future__ import print_function import os import pstats -import cProfile import shutil import subprocess import sys import tempfile import time -import mitogen.core - def try_merge(stats, path): try: diff --git a/mitogen/ssh.py b/mitogen/ssh.py index b4c247c1..b3fb7bac 100644 --- a/mitogen/ssh.py +++ b/mitogen/ssh.py @@ -42,7 +42,6 @@ except ImportError: import mitogen.parent from mitogen.core import b -from mitogen.core import bytes_partition try: any diff --git a/mitogen/su.py b/mitogen/su.py index 5e9a237a..9813632a 100644 --- a/mitogen/su.py +++ b/mitogen/su.py @@ -33,7 +33,6 @@ import re import mitogen.core import mitogen.parent -from mitogen.core import b try: any diff --git a/mitogen/sudo.py b/mitogen/sudo.py index 725e6aff..bcb2e7be 100644 --- a/mitogen/sudo.py +++ b/mitogen/sudo.py @@ -35,7 +35,6 @@ import re import mitogen.core import mitogen.parent -from mitogen.core import b LOG = logging.getLogger(__name__) diff --git a/scripts/release-notes.py b/scripts/release-notes.py index 1444d7a3..08b60c0c 100644 --- a/scripts/release-notes.py +++ b/scripts/release-notes.py @@ -3,7 +3,6 @@ # Generate the fragment used to make email release announcements # usage: release-notes.py 0.2.6 -import os import sys import urllib import lxml.html From 2fede49078092743badedd3d2a6c7f0b68ad69f6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 11:49:53 +0100 Subject: [PATCH 225/383] service: clean up log messages, especially at shutdown --- mitogen/service.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/mitogen/service.py b/mitogen/service.py index 49bd2281..b9332780 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -29,6 +29,7 @@ # !mitogen: minify_safe import grp +import logging import os import os.path import pprint @@ -41,7 +42,6 @@ import time import mitogen.core import mitogen.select from mitogen.core import b -from mitogen.core import LOG from mitogen.core import str_rpartition try: @@ -54,6 +54,8 @@ except NameError: return True +LOG = logging.getLogger(__name__) + DEFAULT_POOL_SIZE = 16 _pool = None _pool_pid = None @@ -501,7 +503,7 @@ class Pool(object): self._py_24_25_compat() self._threads = [] for x in range(size): - name = 'mitogen.service.Pool.%x.worker-%d' % (id(self), x,) + name = 'mitogen.Pool.%04x.%d' % (id(self) & 0xffff, x,) thread = threading.Thread( name=name, target=mitogen.core._profile_hook, @@ -608,9 +610,11 @@ class Pool(object): while not self.closed: try: event = self._select.get_event() - except (mitogen.core.ChannelError, mitogen.core.LatchError): - e = sys.exc_info()[1] - LOG.debug('%r: channel or latch closed, exitting: %s', self, e) + except mitogen.core.LatchError: + LOG.debug('%r: graceful exit', self) + return + except mitogen.core.ChannelError: + LOG.debug('%r: exitting: %s', self, sys.exc_info()[1]) return func = self._func_by_source[event.source] @@ -629,8 +633,8 @@ class Pool(object): def __repr__(self): th = threading.currentThread() - return 'mitogen.service.Pool(%#x, size=%d, th=%r)' % ( - id(self), + return 'Pool(%04x, size=%d, th=%r)' % ( + id(self) & 0xffff, len(self._threads), th.getName(), ) From b75198396d4950fe558c0d6d5a91b64432ca801b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 13:18:18 +0100 Subject: [PATCH 226/383] Fix unit_Test.ClientTest following 108015aa2279133181e0c7e643e6ee8353d8cfa8 Closes #604 --- tests/unix_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unix_test.py b/tests/unix_test.py index cb8c08f5..ba1ba152 100644 --- a/tests/unix_test.py +++ b/tests/unix_test.py @@ -90,7 +90,7 @@ class ClientTest(testlib.TestCase): while True: try: return mitogen.unix.connect(path) - except socket.error: + except mitogen.unix.ConnectError: if time.time() > timeout: raise time.sleep(0.1) From cd2689af0a2a58f748c6f1ac4606fac292063428 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 13:44:34 +0100 Subject: [PATCH 227/383] issue #603: Revert "ci: update to Ansible 2.8.3" This reverts commit 558ebfa914f9b27681d6cfd7249a2532a2fc470f. --- .travis.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 580ced0b..af637db1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,25 +36,25 @@ matrix: include: # Debops tests. - # 2.8.3; 3.6 -> 2.7 + # 2.8.0; 3.6 -> 2.7 - python: "3.6" - env: MODE=debops_common VER=2.8.3 + env: MODE=debops_common VER=2.8.0 # 2.4.6.0; 2.7 -> 2.7 - python: "2.7" env: MODE=debops_common VER=2.4.6.0 # Sanity check against vanilla Ansible. One job suffices. - python: "2.7" - env: MODE=ansible VER=2.8.3 DISTROS=debian STRATEGY=linear + env: MODE=ansible VER=2.8.0 DISTROS=debian STRATEGY=linear # ansible_mitogen tests. - # 2.8.3 -> {debian, centos6, centos7} + # 2.8.0 -> {debian, centos6, centos7} - python: "3.6" - env: MODE=ansible VER=2.8.3 - # 2.8.3 -> {debian, centos6, centos7} + env: MODE=ansible VER=2.8.0 + # 2.8.0 -> {debian, centos6, centos7} - python: "2.7" - env: MODE=ansible VER=2.8.3 + env: MODE=ansible VER=2.8.0 # 2.4.6.0 -> {debian, centos6, centos7} - python: "3.6" From 9bb3dac4504e347ec84715c5684b2e7c74f33c00 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 31 Jul 2019 09:33:43 +0100 Subject: [PATCH 228/383] docs: break out install_app.py and fix API use. --- docs/changelog.rst | 8 ++++++-- docs/index.rst | 31 +------------------------------ examples/install_app.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 32 deletions(-) create mode 100644 examples/install_app.py diff --git a/docs/changelog.rst b/docs/changelog.rst index 2f2a496f..23489766 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -136,6 +136,9 @@ Core Library `closed` flag, preventing historical bugs where a double close could destroy descriptors belonging to unrelated streams. +* `#606 `_: fix example code on the + documentation front page. + * `a5536c35 `_: avoid quadratic buffer management when logging lines received from a child's redirected standard IO. @@ -150,11 +153,12 @@ bug reports, testing, features and fixes in this release contributed by `Anton Markelov `_, `Nigel Metheringham `_, `Orion Poplawski `_, +`Pieter Voet `_, +`Stefane Fermigier `_, `Szabó Dániel Ernő `_, `Ulrich Schreiner `_, `Yuki Nishida `_, -`@ghp-rr `_, -`Pieter Voet `_, and +`@ghp-rr `_, and `@rizzly `_. diff --git a/docs/index.rst b/docs/index.rst index 6b5deb71..c11a1d27 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -329,36 +329,7 @@ External contexts are configured such that any attempt to execute a function from the main Python script will correctly cause that script to be imported as usual into the slave process. -.. code-block:: python - - #!/usr/bin/env python - """ - Install our application on a remote machine. - - Usage: - install_app.py - - Where: - Hostname to install to. - """ - import os - import sys - - import mitogen - - - def install_app(): - os.system('tar zxvf my_app.tar.gz') - - - @mitogen.main() - def main(broker): - if len(sys.argv) != 2: - print(__doc__) - sys.exit(1) - - context = mitogen.ssh.connect(broker, sys.argv[1]) - context.call(install_app) +.. literalinclude:: ../examples/install_app.py Event-driven IO diff --git a/examples/install_app.py b/examples/install_app.py new file mode 100644 index 00000000..566353a8 --- /dev/null +++ b/examples/install_app.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +""" +Install our application on a remote machine. + +Usage: + install_app.py + +Where: + Hostname to install to. +""" +import os +import sys + +import mitogen + + +def install_app(): + os.system('tar zxvf my_app.tar.gz') + + +@mitogen.main() +def main(router): + if len(sys.argv) != 2: + print(__doc__) + sys.exit(1) + + context = router.ssh(hostname=sys.argv[1]) + context.call(install_app) From aa15975ad464ffcc4d4e2d267f6171b6c552e6ec Mon Sep 17 00:00:00 2001 From: Stefane Fermigier Date: Wed, 31 Jul 2019 10:42:18 +0200 Subject: [PATCH 229/383] Fix for sample in doc `log_to_file()` expects a filename. --- docs/getting_started.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 945e243f..12056c55 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -265,7 +265,7 @@ We must therefore continue by writing our code as a script:: print(local.call(my_first_function)) if __name__ == '__main__': - mitogen.utils.log_to_file(main) + mitogen.utils.log_to_file("mitogen.log") mitogen.utils.run_with_router(main) Let's try running it: From e87e41e69ebbe214994dddd2798e0b7d52203964 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 1 Aug 2019 11:00:50 +0100 Subject: [PATCH 230/383] docs: finished Changelog locking note --- docs/changelog.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 23489766..aab49020 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -49,11 +49,12 @@ Enhancements * `#419 `_: 2 network round-trips were removed from early connection setup. -* `? `_, - `? `_, - `? `_, - `? `_: locking is avoided in - some hot paths, and locks that must be taken are held for less time. +* `d6faff06 `_, + `807cbef9 `_, + `e93762b3 `_, + `50bfe4c7 `_: locking is + avoided on hot paths, and some locks are released earlier, before waking a + thread that must immediately take the same lock. Mitogen for Ansible From e4321f81a0e5d4f2fe616ebd2cd96ba1abc39c0e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 1 Aug 2019 12:11:26 +0100 Subject: [PATCH 231/383] issue #600: /etc/environment may be non-ASCII in an unknown encoding --- ansible_mitogen/runner.py | 51 +++++++++----- tests/ansible/tests/env_file_watcher_test.py | 74 ++++++++++++++++++++ tests/image_prep/_container_setup.yml | 5 ++ 3 files changed, 111 insertions(+), 19 deletions(-) create mode 100644 tests/ansible/tests/env_file_watcher_test.py diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py index 843ffe19..ce5d335e 100644 --- a/ansible_mitogen/runner.py +++ b/ansible_mitogen/runner.py @@ -52,7 +52,6 @@ import mitogen.core import ansible_mitogen.target # TODO: circular import from mitogen.core import b from mitogen.core import bytes_partition -from mitogen.core import str_partition from mitogen.core import str_rpartition from mitogen.core import to_text @@ -104,12 +103,20 @@ iteritems = getattr(dict, 'iteritems', dict.items) LOG = logging.getLogger(__name__) -if mitogen.core.PY3: - shlex_split = shlex.split -else: - def shlex_split(s, comments=False): - return [mitogen.core.to_text(token) - for token in shlex.split(str(s), comments=comments)] +def shlex_split_b(s): + """ + Use shlex.split() to split characters in some single-byte encoding, without + knowing what that encoding is. The input is bytes, the output is a list of + bytes. + """ + assert isinstance(s, mitogen.core.BytesType) + if mitogen.core.PY3: + return [ + t.encode('latin1') + for t in shlex.split(s.decode('latin1'), comments=True) + ] + + return [t for t in shlex.split(s, comments=True)] class TempFileWatcher(object): @@ -165,13 +172,19 @@ class EnvironmentFileWatcher(object): A more robust future approach may simply be to arrange for the persistent interpreter to restart when a change is detected. """ + # We know nothing about the character set of /etc/environment or the + # process environment. + environ = getattr(os, 'environb', os.environ) + def __init__(self, path): self.path = os.path.expanduser(path) #: Inode data at time of last check. self._st = self._stat() #: List of inherited keys appearing to originated from this file. - self._keys = [key for key, value in self._load() - if value == os.environ.get(key)] + self._keys = [ + key for key, value in self._load() + if value == self.environ.get(key) + ] LOG.debug('%r installed; existing keys: %r', self, self._keys) def __repr__(self): @@ -185,7 +198,7 @@ class EnvironmentFileWatcher(object): def _load(self): try: - fp = codecs.open(self.path, 'r', encoding='utf-8') + fp = open(self.path, 'rb') try: return list(self._parse(fp)) finally: @@ -199,36 +212,36 @@ class EnvironmentFileWatcher(object): """ for line in fp: # ' #export foo=some var ' -> ['#export', 'foo=some var '] - bits = shlex_split(line, comments=True) - if (not bits) or bits[0].startswith('#'): + bits = shlex_split_b(line) + if (not bits) or bits[0].startswith(b('#')): continue - if bits[0] == u'export': + if bits[0] == b('export'): bits.pop(0) - key, sep, value = str_partition(u' '.join(bits), u'=') + key, sep, value = bytes_partition(b(' ').join(bits), b('=')) if key and sep: yield key, value def _on_file_changed(self): LOG.debug('%r: file changed, reloading', self) for key, value in self._load(): - if key in os.environ: + if key in self.environ: LOG.debug('%r: existing key %r=%r exists, not setting %r', - self, key, os.environ[key], value) + self, key, self.environ[key], value) else: LOG.debug('%r: setting key %r to %r', self, key, value) self._keys.append(key) - os.environ[key] = value + self.environ[key] = value def _remove_existing(self): """ When a change is detected, remove keys that existed in the old file. """ for key in self._keys: - if key in os.environ: + if key in self.environ: LOG.debug('%r: removing old key %r', self, key) - del os.environ[key] + del self.environ[key] self._keys = [] def check(self): diff --git a/tests/ansible/tests/env_file_watcher_test.py b/tests/ansible/tests/env_file_watcher_test.py new file mode 100644 index 00000000..8803a6c2 --- /dev/null +++ b/tests/ansible/tests/env_file_watcher_test.py @@ -0,0 +1,74 @@ +import os +import sys +import tempfile + +import mock +import unittest2 +import testlib + +from mitogen.core import b +import ansible_mitogen.runner + + +klass = ansible_mitogen.runner.EnvironmentFileWatcher +environb = getattr(os, 'environb', os.environ) + + +class WatcherTest(testlib.TestCase): + def setUp(self): + self.original_env = environb.copy() + self.tf = tempfile.NamedTemporaryFile() + + def tearDown(self): + self.tf.close() + environb.clear() + environb.update(self.original_env) + + def test_missing_file(self): + # just ensure it doesn't crash + watcher = klass('/nonexistent') + watcher.check() + + def test_file_becomes_missing(self): + # just ensure it doesn't crash + watcher = klass(self.tf.name) + watcher.check() + os.unlink(self.tf.name) + watcher.check() + open(self.tf.name,'wb').close() + + def test_key_deleted(self): + environb[b('SOMEKEY')] = b('123') + self.tf.write(b('SOMEKEY=123\n')) + self.tf.flush() + watcher = klass(self.tf.name) + self.tf.seek(0) + self.tf.truncate(0) + watcher.check() + self.assertTrue(b('SOMEKEY') not in environb) + + def test_key_added(self): + watcher = klass(self.tf.name) + self.tf.write(b('SOMEKEY=123\n')) + self.tf.flush() + watcher.check() + self.assertEqual(environb[b('SOMEKEY')], b('123')) + + def test_key_shadowed_nuchange(self): + environb[b('SOMEKEY')] = b('234') + self.tf.write(b('SOMEKEY=123\n')) + self.tf.flush() + watcher = klass(self.tf.name) + watcher.check() + self.assertEqual(environb[b('SOMEKEY')], b('234')) + + def test_binary_key_added(self): + watcher = klass(self.tf.name) + self.tf.write(b('SOMEKEY=\xff\xff\xff\n')) + self.tf.flush() + watcher.check() + self.assertEqual(environb[b('SOMEKEY')], b('\xff\xff\xff')) + + +if __name__ == '__main__': + unittest2.main() diff --git a/tests/image_prep/_container_setup.yml b/tests/image_prep/_container_setup.yml index 2a30f49d..65e898a1 100644 --- a/tests/image_prep/_container_setup.yml +++ b/tests/image_prep/_container_setup.yml @@ -91,6 +91,11 @@ shell: locale-gen when: distro == "Debian" + - name: Write Unicode into /etc/environment + copy: + dest: /etc/environment + content: "UNICODE_SNOWMAN=\u2603\n" + - name: Install prebuilt 'doas' binary unarchive: dest: / From cf2b8f1c241661873cff39ed35c2df235dfb4844 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 1 Aug 2019 12:13:58 +0100 Subject: [PATCH 232/383] docs: update Changelog. --- docs/changelog.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index aab49020..2e37e87f 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -89,6 +89,11 @@ Mitogen for Ansible ``mitogen_ssh_keepalive_count`` variables, and the default timeout for an SSH server has been increased from `15*3` seconds to `30*10` seconds. +* `#600 `_: functionality to reflect + changes to ``/etc/environment`` in the running interpreter did not account + for Unicode file contents. Now the file may contain data in any single byte + encoding. + * `7ae926b3 `_: the ``lineinfile`` module began leaking writable temporary file descriptors since Ansible 2.7.0. When ``lineinfile`` was used to create or modify a script, and @@ -159,8 +164,9 @@ bug reports, testing, features and fixes in this release contributed by `Szabó Dániel Ernő `_, `Ulrich Schreiner `_, `Yuki Nishida `_, -`@ghp-rr `_, and -`@rizzly `_. +`@ghp-rr `_, +`@rizzly `_, and +`@tho86 `_. v0.2.7 (2019-05-19) From 619f4dee07346454d754b069dba48f9134317932 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 2 Aug 2019 04:05:19 +0100 Subject: [PATCH 233/383] [linear2] merge fallout: restore optimization from #491 / 7b129e857 --- ansible_mitogen/connection.py | 9 --------- ansible_mitogen/process.py | 11 ++++++++++- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index ef21f606..532ef042 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -405,15 +405,6 @@ CONNECTION_METHOD = { } -class Broker(mitogen.master.Broker): - """ - WorkerProcess maintains at most 2 file descriptors, therefore does not need - the exuberant syscall expense of EpollPoller, so override it and restore - the poll() poller. - """ - poller_class = mitogen.core.Poller - - class CallChain(mitogen.parent.CallChain): """ Extend :class:`mitogen.parent.CallChain` to additionally cause the diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 1f7741f6..b09b54eb 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -294,6 +294,15 @@ def get_cpu_count(default=None): return cpu_count +class Broker(mitogen.master.Broker): + """ + WorkerProcess maintains at most 2 file descriptors, therefore does not need + the exuberant syscall expense of EpollPoller, so override it and restore + the poll() poller. + """ + poller_class = mitogen.core.Poller + + class Binding(object): """ Represent a bound connection for a particular inventory hostname. When @@ -530,7 +539,7 @@ class ClassicWorkerModel(WorkerModel): See WorkerModel.get_binding(). """ if self.broker is None: - self.broker = mitogen.master.Broker() + self.broker = Broker() path = self._listener_for_name(inventory_name) if path != self.listener_path: From 6b4bcf4fe01fa13113584209af88987b028febb5 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 2 Aug 2019 04:05:28 +0100 Subject: [PATCH 234/383] ansible: remove cutpasted docstring --- ansible_mitogen/parsing.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ansible_mitogen/parsing.py b/ansible_mitogen/parsing.py index 525e60cf..27fca7cd 100644 --- a/ansible_mitogen/parsing.py +++ b/ansible_mitogen/parsing.py @@ -26,14 +26,6 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -""" -Classes to detect each case from [0] and prepare arguments necessary for the -corresponding Runner class within the target, including preloading requisite -files/modules known missing. - -[0] "Ansible Module Architecture", developing_program_flow_modules.html -""" - from __future__ import absolute_import from __future__ import unicode_literals From 33bceb6eb4468ec2df005a4525c403a5f6f8ba11 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 2 Aug 2019 04:05:34 +0100 Subject: [PATCH 235/383] issue #602: recover task_vars for synchronize and meta: reset_connection --- ansible_mitogen/connection.py | 51 ++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index 532ef042..c08df611 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -46,6 +46,7 @@ import mitogen.core import mitogen.fork import mitogen.utils +import ansible_mitogen.mixins import ansible_mitogen.parsing import ansible_mitogen.process import ansible_mitogen.services @@ -533,6 +534,47 @@ class Connection(ansible.plugins.connection.ConnectionBase): self.loader_basedir = loader_basedir self._mitogen_reset(mode='put') + def _get_task_vars(self): + """ + More information is needed than normally provided to an Ansible + connection. For proxied connections, intermediary configuration must + be inferred, and for any connection the configured Python interpreter + must be known. + + There is no clean way to access this information that would not deviate + from the running Ansible version. The least invasive method known is to + reuse the running task's task_vars dict. + + This method walks the stack to find task_vars of the Action plugin's + run(), or if no Action is present, from Strategy's _execute_meta(), as + in the case of 'meta: reset_connection'. The stack is walked in + addition to subclassing Action.run()/on_action_run(), as it is possible + for new connections to be constructed in addition to the preconstructed + connection passed into any running action. + """ + f = sys._getframe() + + while f: + if f.f_code.co_name == 'run': + f_locals = f.f_locals + f_self = f_locals.get('self') + if isinstance(f_self, ansible_mitogen.mixins.ActionModuleMixin): + task_vars = f_locals.get('task_vars') + if task_vars: + LOG.debug('recovered task_vars from Action') + return task_vars + elif f.f_code.co_name == '_execute_meta': + f_all_vars = f.f_locals.get('all_vars') + if isinstance(f_all_vars, dict): + LOG.debug('recovered task_vars from meta:') + return f_all_vars + + f = f.f_back + + LOG.warning('could not recover task_vars. This means some connection ' + 'settings may erroneously be reset to their defaults. ' + 'Please report a bug if you encounter this message.') + def get_task_var(self, key, default=None): """ Fetch the value of a task variable related to connection configuration, @@ -544,12 +586,13 @@ class Connection(ansible.plugins.connection.ConnectionBase): does not make sense to extract connection-related configuration for the delegated-to machine from them. """ - if self._task_vars: + task_vars = self._task_vars or self._get_task_vars() + if task_vars is not None: if self.delegate_to_hostname is None: - if key in self._task_vars: - return self._task_vars[key] + if key in task_vars: + return task_vars[key] else: - delegated_vars = self._task_vars['ansible_delegated_vars'] + delegated_vars = task_vars['ansible_delegated_vars'] if self.delegate_to_hostname in delegated_vars: task_vars = delegated_vars[self.delegate_to_hostname] if key in task_vars: From 7629ff9e6da96d70c9bd12140d1ced53727791d4 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 2 Aug 2019 04:10:34 +0100 Subject: [PATCH 236/383] issue #602: update Changelog --- docs/changelog.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 2e37e87f..267bc0db 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -94,6 +94,11 @@ Mitogen for Ansible for Unicode file contents. Now the file may contain data in any single byte encoding. +* `#602 `_: connection configuration + is more accurately inferred for `meta: reset_connection`, the `synchronize` + module, and for any other action plug-ins that establish new connections of + their own. + * `7ae926b3 `_: the ``lineinfile`` module began leaking writable temporary file descriptors since Ansible 2.7.0. When ``lineinfile`` was used to create or modify a script, and @@ -157,6 +162,7 @@ Mitogen would not be possible without the support of users. A huge thanks for bug reports, testing, features and fixes in this release contributed by `Andreas Hubert `_. `Anton Markelov `_, +`Dave Cottlehuber `_, `Nigel Metheringham `_, `Orion Poplawski `_, `Pieter Voet `_, From 395b03a77d79e5edfae2a44f0086ec01a1654d51 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 2 Aug 2019 22:30:58 +0100 Subject: [PATCH 237/383] issue #549: fix setrlimit() crash and hard-wire OS X default OS X advertised unlimited, but really it means kern.maxfilesperproc. --- ansible_mitogen/process.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index b09b54eb..f9c59e50 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -246,11 +246,22 @@ def increase_open_file_limit(): limit is much higher. """ soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) - if soft < hard: - LOG.debug('raising soft open file limit from %d to %d', soft, hard) - resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) - else: - LOG.debug('cannot increase open file limit; existing limit is %d', hard) + LOG.debug('inherited open file limits: soft=%d hard=%d', soft, hard) + if soft >= hard: + LOG.debug('max open files already set to hard limit: %d', hard) + return + + # OS X is limited by kern.maxfilesperproc sysctl, rather than the + # advertised unlimited hard RLIMIT_NOFILE. Just hard-wire known defaults + # for that sysctl, to avoid the mess of querying it. + for value in (hard, 10240): + try: + resource.setrlimit(resource.RLIMIT_NOFILE, (value, hard)) + LOG.debug('raised soft open file limit from %d to %d', soft, value) + break + except ValueError as e: + LOG.debug('could not raise soft open file limit from %d to %d: %s', + soft, value, e) def common_setup(enable_affinity=True, _init_logging=True): From dcfd733e6f8d36b0c21c6a59992dacae4b052c1c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 05:12:33 +0100 Subject: [PATCH 238/383] issue #549: remove Linux-specific assumptions from create_child_test Some stat fields are implementation-specific, little value even testing them on Linux --- tests/create_child_test.py | 61 ++++++++++++++++++++++++++++++-------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/tests/create_child_test.py b/tests/create_child_test.py index 21591fe8..e7357059 100644 --- a/tests/create_child_test.py +++ b/tests/create_child_test.py @@ -15,7 +15,46 @@ from mitogen.core import b import testlib +def _osx_mode(n): + """ + fstat(2) on UNIX sockets on OSX return different mode bits depending on + which side is being inspected, so zero those bits for comparison. + """ + if sys.platform == 'darwin': + n &= ~int('0777', 8) + return n + + def run_fd_check(func, fd, mode, on_start=None): + """ + Run ``tests/data/fd_check.py`` using `func`. The subprocess writes + information about the `fd` it received to a temporary file. + + :param func: + Function like `create_child()` used to start child. + :param fd: + FD child should read/write from, and report information about. + :param mode: + "read" or "write", depending on whether the FD is readable or writeable + from the perspective of the child. If "read", `on_start()` should write + "TEST" to it and the child reads "TEST" from it, otherwise `on_start()` + should read "TEST" from it and the child writes "TEST" to it. + :param on_start: + Function invoked as `on_start(proc)` + :returns: + Tuple of `(proc, info, on_start_result)`, where: + + * `proc`: the :class:`mitogen.parent.Process` returned by `func`. + * `info`: dict containing information returned by the child: + * `buf`: "TEST" that was read in "read" mode + * `flags`: :attr:`fcntl.F_GETFL` flags for `fd` + * `st_mode`: st_mode field from :func:`os.fstat` + * `st_dev`: st_dev field from :func:`os.fstat` + * `st_ino`: st_ino field from :func:`os.fstat` + * `ttyname`: :func:`os.ttyname` invoked on `fd`. + * `controlling_tty`: :func:os.ttyname` invoked on ``/dev/tty`` + from within the child. + """ tf = tempfile.NamedTemporaryFile() args = [ sys.executable, @@ -61,7 +100,7 @@ class StdinSockMixin(object): st = os.fstat(proc.stdin.fileno()) self.assertTrue(stat.S_ISSOCK(st.st_mode)) self.assertEquals(st.st_dev, info['st_dev']) - self.assertEquals(st.st_mode, info['st_mode']) + self.assertEquals(st.st_mode, _osx_mode(info['st_mode'])) flags = fcntl.fcntl(proc.stdin.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['buf'], 'TEST') @@ -75,7 +114,7 @@ class StdoutSockMixin(object): st = os.fstat(proc.stdout.fileno()) self.assertTrue(stat.S_ISSOCK(st.st_mode)) self.assertEquals(st.st_dev, info['st_dev']) - self.assertEquals(st.st_mode, info['st_mode']) + self.assertEquals(st.st_mode, _osx_mode(info['st_mode'])) flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(buf, 'TEST') @@ -105,8 +144,6 @@ class CreateChildMergedTest(StdinSockMixin, StdoutSockMixin, self.assertEquals(None, proc.stderr) st = os.fstat(proc.stdout.fileno()) self.assertTrue(stat.S_ISSOCK(st.st_mode)) - self.assertEquals(st.st_dev, info['st_dev']) - self.assertEquals(st.st_mode, info['st_mode']) flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(buf, 'TEST') @@ -145,13 +182,11 @@ class TtyCreateChildTest(testlib.TestCase): self.assertTrue(isinstance(info['ttyname'], mitogen.core.UnicodeType)) - os.ttyname(proc.stdin.fileno()) # crashes if not TTY + self.assertTrue(os.isatty(proc.stdin.fileno())) flags = fcntl.fcntl(proc.stdin.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['flags'] & os.O_RDWR) - - self.assertNotEquals(st.st_dev, info['st_dev']) self.assertTrue(info['buf'], 'TEST') def test_stdout(self): @@ -164,17 +199,18 @@ class TtyCreateChildTest(testlib.TestCase): self.assertTrue(isinstance(info['ttyname'], mitogen.core.UnicodeType)) - os.ttyname(proc.stdout.fileno()) # crashes if wrong + self.assertTrue(os.isatty(proc.stdout.fileno())) flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['flags'] & os.O_RDWR) - self.assertNotEquals(st.st_dev, info['st_dev']) self.assertTrue(flags & os.O_RDWR) self.assertTrue(buf, 'TEST') def test_stderr(self): + # proc.stderr is None in the parent since there is no separate stderr + # stream. In the child, FD 2/stderr is connected to the TTY. proc, info, buf = run_fd_check(self.func, 2, 'write', lambda proc: wait_read(proc.stdout, 4)) @@ -184,13 +220,12 @@ class TtyCreateChildTest(testlib.TestCase): self.assertTrue(isinstance(info['ttyname'], mitogen.core.UnicodeType)) - os.ttyname(proc.stdin.fileno()) # crashes if not TTY + self.assertTrue(os.isatty(proc.stdout.fileno())) flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['flags'] & os.O_RDWR) - self.assertNotEquals(st.st_dev, info['st_dev']) self.assertTrue(flags & os.O_RDWR) self.assertTrue(buf, 'TEST') @@ -222,6 +257,7 @@ class TtyCreateChildTest(testlib.TestCase): class StderrDiagTtyMixin(object): def test_stderr(self): + # proc.stderr is the PTY master, FD 2 in the child is the PTY slave proc, info, buf = run_fd_check(self.func, 2, 'write', lambda proc: wait_read(proc.stderr, 4)) @@ -231,13 +267,12 @@ class StderrDiagTtyMixin(object): self.assertTrue(isinstance(info['ttyname'], mitogen.core.UnicodeType)) - os.ttyname(proc.stderr.fileno()) # crashes if wrong + self.assertTrue(os.isatty(proc.stderr.fileno())) flags = fcntl.fcntl(proc.stderr.fileno(), fcntl.F_GETFL) self.assertTrue(flags & os.O_RDWR) self.assertTrue(info['flags'] & os.O_RDWR) - self.assertNotEquals(st.st_dev, info['st_dev']) self.assertTrue(flags & os.O_RDWR) self.assertTrue(buf, 'TEST') From 19b259a45f204eb70edfdb2f69d7d43fbeb7596f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 05:48:47 +0100 Subject: [PATCH 239/383] issue #549: skip Docker tests if Docker is unavailable --- tests/testlib.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/testlib.py b/tests/testlib.py index 673d5ca6..d4387c54 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -427,6 +427,11 @@ class DockerizedSshDaemon(object): raise ValueError('could not find SSH port in: %r' % (s,)) def start_container(self): + try: + subprocess__check_output(['docker']) + except Exception: + raise unittest2.SkipTest('Docker binary is unavailable') + self.container_name = 'mitogen-test-%08x' % (random.getrandbits(64),) args = [ 'docker', From cebccf6f4123e38e6c6aabd4392be10dea02a8e5 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 07:13:12 +0100 Subject: [PATCH 240/383] issue #549 / [stream-refactor]: fix close/poller deregister crash on OSX See source comment. --- mitogen/core.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index be5d7e9c..14fddc0f 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1628,12 +1628,17 @@ class Protocol(object): self.stream.on_disconnect(broker) def on_disconnect(self, broker): + # Normally both sides an FD, so it is important that tranmit_side is + # deregistered from Poller before closing the receive side, as pollers + # like epoll and kqueue unregister all events on FD close, causing + # subsequent attempt to unregister the transmit side to fail. LOG.debug('%r: disconnecting', self) - if self.stream.receive_side: - broker.stop_receive(self.stream) - self.stream.receive_side.close() + broker.stop_receive(self.stream) if self.stream.transmit_side: broker._stop_transmit(self.stream) + + self.stream.receive_side.close() + if self.stream.transmit_side: self.stream.transmit_side.close() From 3ceac2c9eda4e3ea7f430ad692cdb8967e4dd633 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 18:17:24 +0100 Subject: [PATCH 241/383] [linear2] simplify ClassicWorkerModel and fix repeat initialization "self.initialized = False" slipped in a few days ago, on second thoughts that flag is not needed at all, by simply rearranging ClassicWorkerModel to have a regular constructor. This hierarchy is still squishy, it needs more love. Remaining MuxProcess class attributes should eliminated. --- ansible_mitogen/process.py | 114 ++++++++++++++++++------------------- 1 file changed, 54 insertions(+), 60 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index f9c59e50..b8f10ec2 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -399,6 +399,18 @@ class ClassicBinding(Binding): class ClassicWorkerModel(WorkerModel): + #: In the top-level process, this references one end of a socketpair(), + #: whose other end child MuxProcesses block reading from to determine when + #: the master process dies. When the top-level exits abnormally, or + #: normally but where :func:`_on_process_exit` has been called, this socket + #: will be closed, causing all the children to wake. + parent_sock = None + + #: In the mux process, this is the other end of :attr:`cls_parent_sock`. + #: The main thread blocks on a read from it until :attr:`cls_parent_sock` + #: is closed. + child_sock = None + #: mitogen.master.Router for this worker. router = None @@ -414,8 +426,34 @@ class ClassicWorkerModel(WorkerModel): parent = None def __init__(self, _init_logging=True): - self._init_logging = _init_logging - self.initialized = False + """ + Arrange for classic model multiplexers to be started, if they are not + already running. + + The parent process picks a UNIX socket path each child will use prior + to fork, creates a socketpair used essentially as a semaphore, then + blocks waiting for the child to indicate the UNIX socket is ready for + use. + + :param bool _init_logging: + For testing, if :data:`False`, don't initialize logging. + """ + common_setup(_init_logging=_init_logging) + + self.parent_sock, self.child_sock = socket.socketpair() + mitogen.core.set_cloexec(self.parent_sock.fileno()) + mitogen.core.set_cloexec(self.child_sock.fileno()) + + self._muxes = [ + MuxProcess(self, index) + for index in range(get_cpu_count(default=1)) + ] + for mux in self._muxes: + mux.start() + + atexit.register(self._on_process_exit, self.parent_sock) + self.child_sock.close() + self.child_sock = None def _listener_for_name(self, name): """ @@ -449,7 +487,7 @@ class ClassicWorkerModel(WorkerModel): self.listener_path = path - def on_process_exit(self, sock): + def _on_process_exit(self, sock): """ This is an :mod:`atexit` handler installed in the top-level process. @@ -467,7 +505,7 @@ class ClassicWorkerModel(WorkerModel): sock.shutdown(socket.SHUT_WR) except socket.error: # Already closed. This is possible when tests are running. - LOG.debug('on_process_exit: ignoring duplicate call') + LOG.debug('_on_process_exit: ignoring duplicate call') return mitogen.core.io_op(sock.recv, 1) @@ -479,46 +517,15 @@ class ClassicWorkerModel(WorkerModel): LOG.debug('mux %d PID %d %s', mux.index, mux.pid, mitogen.parent.returncode_to_str(status)) - def _initialize(self): - """ - Arrange for classic model multiplexers to be started, if they are not - already running. - - The parent process picks a UNIX socket path each child will use prior - to fork, creates a socketpair used essentially as a semaphore, then - blocks waiting for the child to indicate the UNIX socket is ready for - use. - - :param bool _init_logging: - For testing, if :data:`False`, don't initialize logging. - """ - common_setup(_init_logging=self._init_logging) - - MuxProcess.cls_parent_sock, \ - MuxProcess.cls_child_sock = socket.socketpair() - mitogen.core.set_cloexec(MuxProcess.cls_parent_sock.fileno()) - mitogen.core.set_cloexec(MuxProcess.cls_child_sock.fileno()) - - self._muxes = [ - MuxProcess(index) - for index in range(get_cpu_count(default=1)) - ] - for mux in self._muxes: - mux.start() - - atexit.register(self.on_process_exit, MuxProcess.cls_parent_sock) - MuxProcess.cls_child_sock.close() - MuxProcess.cls_child_sock = None - def _test_reset(self): """ Used to clean up in unit tests. """ # TODO: split this up a bit. global _classic_worker_model - assert MuxProcess.cls_parent_sock is not None - MuxProcess.cls_parent_sock.close() - MuxProcess.cls_parent_sock = None + assert self.parent_sock is not None + self.parent_sock.close() + self.parent_sock = None self.listener_path = None self.router = None self.parent = None @@ -536,9 +543,6 @@ class ClassicWorkerModel(WorkerModel): """ See WorkerModel.on_strategy_start(). """ - if not self.initialized: - self._initialize() - self.initialized = True def on_strategy_complete(self): """ @@ -567,7 +571,6 @@ class ClassicWorkerModel(WorkerModel): self.router = None self.broker = None self.listener_path = None - self.initialized = False # #420: Ansible executes "meta" actions in the top-level process, # meaning "reset_connection" will cause :class:`mitogen.core.Latch` FDs @@ -598,25 +601,16 @@ class MuxProcess(object): See https://bugs.python.org/issue6721 for a thorough description of the class of problems this worker is intended to avoid. """ - #: In the top-level process, this references one end of a socketpair(), - #: whose other end child MuxProcesses block reading from to determine when - #: the master process dies. When the top-level exits abnormally, or - #: normally but where :func:`on_process_exit` has been called, this socket - #: will be closed, causing all the children to wake. - cls_parent_sock = None - - #: In the mux process, this is the other end of :attr:`cls_parent_sock`. - #: The main thread blocks on a read from it until :attr:`cls_parent_sock` - #: is closed. - cls_child_sock = None - #: A copy of :data:`os.environ` at the time the multiplexer process was #: started. It's used by mitogen_local.py to find changes made to the #: top-level environment (e.g. vars plugins -- issue #297) that must be #: applied to locally executed commands and modules. cls_original_env = None - def __init__(self, index): + def __init__(self, model, index): + #: :class:`ClassicWorkerModel` instance we were created by. + self.model = model + #: MuxProcess CPU index. self.index = index #: Individual path of this process. self.path = mitogen.unix.make_socket_path() @@ -625,7 +619,7 @@ class MuxProcess(object): self.pid = os.fork() if self.pid: # Wait for child to boot before continuing. - mitogen.core.io_op(MuxProcess.cls_parent_sock.recv, 1) + mitogen.core.io_op(self.model.parent_sock.recv, 1) return ansible_mitogen.logging.set_process_name('mux:' + str(self.index)) @@ -635,8 +629,8 @@ class MuxProcess(object): os.path.basename(self.path), )) - MuxProcess.cls_parent_sock.close() - MuxProcess.cls_parent_sock = None + self.model.parent_sock.close() + self.model.parent_sock = None try: try: self.worker_main() @@ -660,9 +654,9 @@ class MuxProcess(object): try: # Let the parent know our listening socket is ready. - mitogen.core.io_op(self.cls_child_sock.send, b('1')) + mitogen.core.io_op(self.model.child_sock.send, b('1')) # Block until the socket is closed, which happens on parent exit. - mitogen.core.io_op(self.cls_child_sock.recv, 1) + mitogen.core.io_op(self.model.child_sock.recv, 1) finally: self.broker.shutdown() self.broker.join() From d408caccf504410d3e113863dafce977bb4e8291 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 18:32:12 +0100 Subject: [PATCH 242/383] issue #573: guard against a forked top-level Ansible process See comment. --- ansible_mitogen/process.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index b8f10ec2..99e07aee 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -438,6 +438,12 @@ class ClassicWorkerModel(WorkerModel): :param bool _init_logging: For testing, if :data:`False`, don't initialize logging. """ + # #573: The process ID that installed the :mod:`atexit` handler. If + # some unknown Ansible plug-in forks the Ansible top-level process and + # later performs a graceful Python exit, it may try to wait for child + # PIDs it never owned, causing a crash. We want to avoid that. + self._pid = os.getpid() + common_setup(_init_logging=_init_logging) self.parent_sock, self.child_sock = socket.socketpair() @@ -451,7 +457,7 @@ class ClassicWorkerModel(WorkerModel): for mux in self._muxes: mux.start() - atexit.register(self._on_process_exit, self.parent_sock) + atexit.register(self._on_process_exit) self.child_sock.close() self.child_sock = None @@ -487,7 +493,7 @@ class ClassicWorkerModel(WorkerModel): self.listener_path = path - def _on_process_exit(self, sock): + def _on_process_exit(self): """ This is an :mod:`atexit` handler installed in the top-level process. @@ -501,15 +507,18 @@ class ClassicWorkerModel(WorkerModel): MuxProcess, debug logs may appear on the user's terminal *after* the prompt has been printed. """ + if self._pid != os.getpid(): + return + try: - sock.shutdown(socket.SHUT_WR) + self.parent_sock.shutdown(socket.SHUT_WR) except socket.error: # Already closed. This is possible when tests are running. LOG.debug('_on_process_exit: ignoring duplicate call') return - mitogen.core.io_op(sock.recv, 1) - sock.close() + mitogen.core.io_op(self.parent_sock.recv, 1) + self.parent_sock.close() for mux in self._muxes: _, status = os.waitpid(mux.pid, 0) From edde251d58ae9a2a4b9d80f143b0a285f60957b1 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 21:40:57 +0100 Subject: [PATCH 243/383] issue #549: ansible: reduce risk by capping RLIM_INFINITY --- ansible_mitogen/process.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 99e07aee..0a183e6a 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -246,7 +246,14 @@ def increase_open_file_limit(): limit is much higher. """ soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) - LOG.debug('inherited open file limits: soft=%d hard=%d', soft, hard) + if hard == resource.RLIM_INFINITY: + hard_s = '(infinity)' + # cap in case of O(RLIMIT_NOFILE) algorithm in some subprocess. + hard = 524288 + else: + hard_s = str(hard) + + LOG.debug('inherited open file limits: soft=%d hard=%s', soft, hard_s) if soft >= hard: LOG.debug('max open files already set to hard limit: %d', hard) return From aa06b960f5641903a64668dceb537f9d8b8bf6fc Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 02:33:02 +0100 Subject: [PATCH 244/383] parent: define Connection behaviour during Broker.shutdown() - Connection attempt fails reliably, and it fails with CancelledError - Add new mitogen.core.unlisten() - Add test. --- docs/api.rst | 4 ++ docs/signals.rst | 1 + mitogen/core.py | 25 +++++++++--- mitogen/parent.py | 30 +++++++++++++- tests/connection_test.py | 48 +++++++++++++++++++++++ tests/data/broker_shutdown_test_python.py | 9 +++++ 6 files changed, 109 insertions(+), 8 deletions(-) create mode 100644 tests/connection_test.py create mode 100755 tests/data/broker_shutdown_test_python.py diff --git a/docs/api.rst b/docs/api.rst index 917fc627..2557806e 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -662,3 +662,7 @@ Exceptions .. autoclass:: LatchError .. autoclass:: StreamError .. autoclass:: TimeoutError + +.. currentmodule:: mitogen.parent +.. autoclass:: EofError +.. autoclass:: CancelledError diff --git a/docs/signals.rst b/docs/signals.rst index 19533bb1..9447e529 100644 --- a/docs/signals.rst +++ b/docs/signals.rst @@ -22,6 +22,7 @@ Functions .. currentmodule:: mitogen.core .. autofunction:: listen +.. autofunction:: unlisten .. autofunction:: fire diff --git a/mitogen/core.py b/mitogen/core.py index 14fddc0f..4dc3e7ef 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -406,22 +406,35 @@ def has_parent_authority(msg, _stream=None): msg.auth_id in mitogen.parent_ids) +def _signals(obj, signal): + return ( + obj.__dict__ + .setdefault('_signals', {}) + .setdefault(signal, []) + ) + + def listen(obj, name, func): """ - Arrange for `func(*args, **kwargs)` to be invoked when the named signal is + Arrange for `func()` to be invoked when signal `name` is fired on `obj`. + """ + _signals(obj, name).append(func) + + +def unlisten(obj, name, func): + """ + Remove `func` from the list of functions invoked when signal `name` is fired by `obj`. """ - signals = vars(obj).setdefault('_signals', {}) - signals.setdefault(name, []).append(func) + _signals(obj, name).remove(func) def fire(obj, name, *args, **kwargs): """ Arrange for `func(*args, **kwargs)` to be invoked for every function - registered for the named signal on `obj`. + registered for signal `name` on `obj`. """ - signals = vars(obj).get('_signals', {}) - for func in signals.get(name, ()): + for func in _signals(obj, name): func(*args, **kwargs) diff --git a/mitogen/parent.py b/mitogen/parent.py index 90926aa4..f8c5d95f 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -91,6 +91,10 @@ try: except ValueError: SC_OPEN_MAX = 1024 +BROKER_SHUTDOWN_MSG = ( + 'Connection cancelled because the associated Broker began to shut down.' +) + OPENPTY_MSG = ( "Failed to create a PTY: %s. It is likely the maximum number of PTYs has " "been reached. Consider increasing the 'kern.tty.ptmx_max' sysctl on OS " @@ -737,13 +741,21 @@ def returncode_to_str(n): class EofError(mitogen.core.StreamError): """ - Raised by :func:`iter_read` and :func:`write_all` when EOF is detected by - the child process. + Raised by :class:`Connection` when an empty read is detected from the + remote process before bootstrap completes. """ # inherits from StreamError to maintain compatibility. pass +class CancelledError(mitogen.core.StreamError): + """ + Raised by :class:`Connection` when :meth:`mitogen.core.Broker.shutdown` is + called before bootstrap completes. + """ + pass + + class Argv(object): """ Wrapper to defer argv formatting when debug logging is disabled. @@ -1427,6 +1439,8 @@ class Connection(object): def _complete_connection(self): self.timer.cancel() if not self.exception: + mitogen.core.unlisten(self._router.broker, 'shutdown', + self._on_broker_shutdown) self._router.register(self.context, self.stdio_stream) self.stdio_stream.set_protocol( MitogenProtocol( @@ -1445,6 +1459,8 @@ class Connection(object): if self.exception is None: self._adorn_eof_error(exc) self.exception = exc + mitogen.core.unlisten(self._router.broker, 'shutdown', + self._on_broker_shutdown) for stream in self.stdio_stream, self.stderr_stream: if stream and not stream.receive_side.closed: stream.on_disconnect(self._router.broker) @@ -1492,6 +1508,13 @@ class Connection(object): )) self.proc._async_reap(self, self._router) + def _on_broker_shutdown(self): + """ + Respond to broker.shutdown() being called by failing the connection + attempt. + """ + self._fail_connection(CancelledError(BROKER_SHUTDOWN_MSG)) + def _start_timer(self): self.timer = self._router.broker.timers.schedule( when=self.options.connect_deadline, @@ -1535,6 +1558,9 @@ class Connection(object): return stream def _async_connect(self): + mitogen.core.listen(self._router.broker, 'shutdown', + self._on_broker_shutdown) + self._start_timer() self.stdio_stream = self._setup_stdio_stream() if self.context.name is None: diff --git a/tests/connection_test.py b/tests/connection_test.py new file mode 100644 index 00000000..a66428e4 --- /dev/null +++ b/tests/connection_test.py @@ -0,0 +1,48 @@ + +import time +import tempfile +import sys +import os +import threading + +import unittest2 +import testlib + +import mitogen.core +import mitogen.parent + + +class ConnectionTest(testlib.RouterMixin, testlib.TestCase): + def test_broker_shutdown_while_connect_in_progress(self): + # if Broker.shutdown() is called while a connection attempt is in + # progress, the connection should be torn down. + + path = tempfile.mktemp(prefix='broker_shutdown_sem_') + open(path, 'wb').close() + + os.environ['BROKER_SHUTDOWN_SEMAPHORE'] = path + result = [] + + def thread(): + python_path = testlib.data_path('broker_shutdown_test_python.py') + try: + result.append(self.router.local(python_path=python_path)) + except Exception: + result.append(sys.exc_info()[1]) + + th = threading.Thread(target=thread) + th.start() + + while os.path.exists(path): + time.sleep(0.05) + + self.broker.shutdown() + th.join() + + exc, = result + self.assertTrue(isinstance(exc, mitogen.parent.CancelledError)) + self.assertEquals(mitogen.parent.BROKER_SHUTDOWN_MSG, exc.args[0]) + + +if __name__ == '__main__': + unittest2.main() diff --git a/tests/data/broker_shutdown_test_python.py b/tests/data/broker_shutdown_test_python.py new file mode 100755 index 00000000..f1e20c16 --- /dev/null +++ b/tests/data/broker_shutdown_test_python.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python +# Delete a semaphore file to allow the main thread to wake up, then sleep for +# 30 seconds before starting the real Python. +import os +import time +import sys +os.unlink(os.environ['BROKER_SHUTDOWN_SEMAPHORE']) +time.sleep(30) +os.execl(sys.executable, sys.executable, *sys.argv[1:]) From f4ca926b21b26b998e3a005e6e1529806c6340a6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 02:40:56 +0100 Subject: [PATCH 245/383] ansible: cleanup various docstrings --- ansible_mitogen/process.py | 21 ++++++++++----- ansible_mitogen/strategy.py | 53 +++++++++++++++++++++---------------- 2 files changed, 44 insertions(+), 30 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 0a183e6a..4d3a9b5e 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -359,6 +359,11 @@ class Binding(object): class WorkerModel(object): + """ + Interface used by StrategyMixin to manage various Mitogen services, by + default running in one or more connection multiplexer subprocesses spawned + off the top-level Ansible process. + """ def on_strategy_start(self): """ Called prior to strategy start in the top-level process. Responsible @@ -375,6 +380,11 @@ class WorkerModel(object): raise NotImplementedError() def get_binding(self, inventory_name): + """ + Return a :class:`Binding` to access Mitogen services for + `inventory_name`. Usually called from worker processes, but may also be + called from top-level process to handle "meta: reset_connection". + """ raise NotImplementedError() @@ -434,13 +444,10 @@ class ClassicWorkerModel(WorkerModel): def __init__(self, _init_logging=True): """ - Arrange for classic model multiplexers to be started, if they are not - already running. - - The parent process picks a UNIX socket path each child will use prior - to fork, creates a socketpair used essentially as a semaphore, then - blocks waiting for the child to indicate the UNIX socket is ready for - use. + Arrange for classic model multiplexers to be started. The parent choses + UNIX socket paths each child will use prior to fork, creates a + socketpair used essentially as a semaphore, then blocks waiting for the + child to indicate the UNIX socket is ready for use. :param bool _init_logging: For testing, if :data:`False`, don't initialize logging. diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index a1315cd9..755b9113 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -124,7 +124,7 @@ def wrap_action_loader__get(name, *args, **kwargs): the use of shell fragments wherever possible. This is used instead of static subclassing as it generalizes to third party - action modules outside the Ansible tree. + action plugins outside the Ansible tree. """ get_kwargs = {'class_only': True} if ansible.__version__ >= '2.8': @@ -141,8 +141,8 @@ def wrap_action_loader__get(name, *args, **kwargs): def wrap_connection_loader__get(name, *args, **kwargs): """ - While the strategy is active, rewrite connection_loader.get() calls for - some transports into requests for a compatible Mitogen transport. + While a Mitogen strategy is active, rewrite connection_loader.get() calls + for some transports into requests for a compatible Mitogen transport. """ if name in ('buildah', 'docker', 'kubectl', 'jail', 'local', 'lxc', 'lxd', 'machinectl', 'setns', 'ssh'): @@ -152,8 +152,10 @@ def wrap_connection_loader__get(name, *args, **kwargs): def wrap_worker__run(self): """ - While the strategy is active, rewrite connection_loader.get() calls for - some transports into requests for a compatible Mitogen transport. + While a Mitogen strategy is active, trap WorkerProcess.run() calls and use + the opportunity to set the worker's name in the process list and log + output, activate profiling if requested, and bind the worker to a specific + CPU. """ if setproctitle: setproctitle.setproctitle('worker:%s task:%s' % ( @@ -225,10 +227,14 @@ class AnsibleWrappers(object): class StrategyMixin(object): """ - This mix-in enhances any built-in strategy by arranging for various Mitogen - services to be initialized in the Ansible top-level process, and for worker - processes to grow support for using those top-level services to communicate - with and execute modules on remote hosts. + This mix-in enhances any built-in strategy by arranging for an appropriate + WorkerModel instance to be constructed as necessary, or for the existing + one to be reused. + + The WorkerModel in turn arranges for a connection multiplexer to be started + somewhere (by default in an external process), and for WorkerProcesses to + grow support for using those top-level services to communicate with remote + hosts. Mitogen: @@ -246,18 +252,19 @@ class StrategyMixin(object): services, review the Standard Handles section of the How It Works guide in the documentation. - A ContextService is installed as a message handler in the master - process and run on a private thread. It is responsible for accepting - requests to establish new SSH connections from worker processes, and - ensuring precisely one connection exists and is reused for subsequent - playbook steps. The service presently runs in a single thread, so to - begin with, new SSH connections are serialized. + A ContextService is installed as a message handler in the connection + mutliplexer subprocess and run on a private thread. It is responsible + for accepting requests to establish new SSH connections from worker + processes, and ensuring precisely one connection exists and is reused + for subsequent playbook steps. The service presently runs in a single + thread, so to begin with, new SSH connections are serialized. Finally a mitogen.unix listener is created through which WorkerProcess - can establish a connection back into the master process, in order to - avail of ContextService. A UNIX listener socket is necessary as there - is no more sane mechanism to arrange for IPC between the Router in the - master process, and the corresponding Router in the worker process. + can establish a connection back into the connection multiplexer, in + order to avail of ContextService. A UNIX listener socket is necessary + as there is no more sane mechanism to arrange for IPC between the + Router in the connection multiplexer, and the corresponding Router in + the worker process. Ansible: @@ -265,10 +272,10 @@ class StrategyMixin(object): connection and action plug-ins. For connection plug-ins, if the desired method is "local" or "ssh", it - is redirected to the "mitogen" connection plug-in. That plug-in - implements communication via a UNIX socket connection to the top-level - Ansible process, and uses ContextService running in the top-level - process to actually establish and manage the connection. + is redirected to one of the "mitogen_*" connection plug-ins. That + plug-in implements communication via a UNIX socket connection to the + connection multiplexer process, and uses ContextService running there + to establish a persistent connection to the target. For action plug-ins, the original class is looked up as usual, but a new subclass is created dynamically in order to mix-in From 711f46aee91e47a40e075b977578ca3c9e9d08c6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 02:41:05 +0100 Subject: [PATCH 246/383] tests: move tty_create_child tests together --- tests/create_child_test.py | 27 +++++++++++++++++++++++++++ tests/parent_test.py | 31 ------------------------------- 2 files changed, 27 insertions(+), 31 deletions(-) diff --git a/tests/create_child_test.py b/tests/create_child_test.py index e7357059..1c2f526a 100644 --- a/tests/create_child_test.py +++ b/tests/create_child_test.py @@ -173,6 +173,33 @@ class CreateChildStderrPipeTest(StdinSockMixin, StdoutSockMixin, class TtyCreateChildTest(testlib.TestCase): func = staticmethod(mitogen.parent.tty_create_child) + def test_dev_tty_open_succeeds(self): + # In the early days of UNIX, a process that lacked a controlling TTY + # would acquire one simply by opening an existing TTY. Linux and OS X + # continue to follow this behaviour, however at least FreeBSD moved to + # requiring an explicit ioctl(). Linux supports it, but we don't yet + # use it there and anyway the behaviour will never change, so no point + # in fixing things that aren't broken. Below we test that + # getpass-loving apps like sudo and ssh get our slave PTY when they + # attempt to open /dev/tty, which is what they both do on attempting to + # read a password. + tf = tempfile.NamedTemporaryFile() + try: + proc = self.func([ + 'bash', '-c', 'exec 2>%s; echo hi > /dev/tty' % (tf.name,) + ]) + deadline = time.time() + 5.0 + mitogen.core.set_block(proc.stdin.fileno()) + # read(3) below due to https://bugs.python.org/issue37696 + self.assertEquals(mitogen.core.b('hi\n'), proc.stdin.read(3)) + waited_pid, status = os.waitpid(proc.pid, 0) + self.assertEquals(proc.pid, waited_pid) + self.assertEquals(0, status) + self.assertEquals(mitogen.core.b(''), tf.read()) + proc.stdout.close() + finally: + tf.close() + def test_stdin(self): proc, info, _ = run_fd_check(self.func, 0, 'read', lambda proc: proc.stdin.write(b('TEST'))) diff --git a/tests/parent_test.py b/tests/parent_test.py index 7ac482c5..b314d472 100644 --- a/tests/parent_test.py +++ b/tests/parent_test.py @@ -211,37 +211,6 @@ class OpenPtyTest(testlib.TestCase): slave_fp.close() -class TtyCreateChildTest(testlib.TestCase): - func = staticmethod(mitogen.parent.tty_create_child) - - def test_dev_tty_open_succeeds(self): - # In the early days of UNIX, a process that lacked a controlling TTY - # would acquire one simply by opening an existing TTY. Linux and OS X - # continue to follow this behaviour, however at least FreeBSD moved to - # requiring an explicit ioctl(). Linux supports it, but we don't yet - # use it there and anyway the behaviour will never change, so no point - # in fixing things that aren't broken. Below we test that - # getpass-loving apps like sudo and ssh get our slave PTY when they - # attempt to open /dev/tty, which is what they both do on attempting to - # read a password. - tf = tempfile.NamedTemporaryFile() - try: - proc = self.func([ - 'bash', '-c', 'exec 2>%s; echo hi > /dev/tty' % (tf.name,) - ]) - deadline = time.time() + 5.0 - mitogen.core.set_block(proc.stdin.fileno()) - # read(3) below due to https://bugs.python.org/issue37696 - self.assertEquals(mitogen.core.b('hi\n'), proc.stdin.read(3)) - waited_pid, status = os.waitpid(proc.pid, 0) - self.assertEquals(proc.pid, waited_pid) - self.assertEquals(0, status) - self.assertEquals(mitogen.core.b(''), tf.read()) - proc.stdout.close() - finally: - tf.close() - - class DisconnectTest(testlib.RouterMixin, testlib.TestCase): def test_child_disconnected(self): # Easy mode: process notices its own directly connected child is From c68dbdd569d9c0604e7fd7636337fc3e2d8e646c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 02:57:32 +0100 Subject: [PATCH 247/383] ansible: stop relying on SIGTERM to shut down service pool It's no longer necessary, since connection attempts are no longer truly blocking. When CTRL+C is hit in the top-level process, broker will begin shutdown, which will cancel all pending connection attempts, causing pool threads to wake. The pool can't block during shutdown anymore. --- ansible_mitogen/process.py | 54 ++++++++++++++------------------------ 1 file changed, 19 insertions(+), 35 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 4d3a9b5e..2b29904e 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -544,23 +544,12 @@ class ClassicWorkerModel(WorkerModel): """ Used to clean up in unit tests. """ - # TODO: split this up a bit. - global _classic_worker_model - assert self.parent_sock is not None - self.parent_sock.close() - self.parent_sock = None - self.listener_path = None - self.router = None - self.parent = None - - for mux in self._muxes: - pid, status = os.waitpid(mux.pid, 0) - status = mitogen.fork._convert_exit_status(status) - LOG.debug('mux PID %d %s', pid, - mitogen.parent.returncode_to_str(status)) + self.on_binding_close() + self._on_process_exit() + set_worker_model(None) + global _classic_worker_model _classic_worker_model = None - set_worker_model(None) def on_strategy_start(self): """ @@ -593,6 +582,7 @@ class ClassicWorkerModel(WorkerModel): self.broker.join() self.router = None self.broker = None + self.parent = None self.listener_path = None # #420: Ansible executes "meta" actions in the top-level process, @@ -708,8 +698,8 @@ class MuxProcess(object): max_message_size=4096 * 1048576, ) _setup_responder(self.router.responder) - mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown) - mitogen.core.listen(self.broker, 'exit', self.on_broker_exit) + mitogen.core.listen(self.broker, 'shutdown', self._on_broker_shutdown) + mitogen.core.listen(self.broker, 'exit', self._on_broker_exit) self.listener = mitogen.unix.Listener.build_stream( router=self.router, path=self.path, @@ -729,26 +719,20 @@ class MuxProcess(object): ) setup_pool(self.pool) - def on_broker_shutdown(self): + def _on_broker_shutdown(self): """ - Respond to broker shutdown by beginning service pool shutdown. Do not - join on the pool yet, since that would block the broker thread which - then cannot clean up pending handlers, which is required for the - threads to exit gracefully. + Respond to broker shutdown by shutting down the pool. Do not join on it + yet, since that would block the broker thread which then cannot clean + up pending handlers and connections, which is required for the threads + to exit gracefully. """ - # In normal operation we presently kill the process because there is - # not yet any way to cancel connect(). - self.pool.stop(join=self.profiling) + self.pool.stop(join=False) - def on_broker_exit(self): + def _on_broker_exit(self): """ - Respond to the broker thread about to exit by sending SIGTERM to - ourself. In future this should gracefully join the pool, but TERM is - fine for now. + Respond to the broker thread about to exit by finally joining on the + pool. This is safe since pools only block in connection attempts, and + connection attempts fail with CancelledError when broker shutdown + begins. """ - if not os.environ.get('MITOGEN_PROFILING'): - # In normal operation we presently kill the process because there is - # not yet any way to cancel connect(). When profiling, threads - # including the broker must shut down gracefully, otherwise pstats - # won't be written. - os.kill(os.getpid(), signal.SIGTERM) + self.pool.join() From 5811909c8dd89c0efdc08a8261885e8e1469df32 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 11:46:53 +0100 Subject: [PATCH 248/383] [linear2] simplify _listener_for_name() --- ansible_mitogen/process.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 2b29904e..0c59e833 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -480,12 +480,9 @@ class ClassicWorkerModel(WorkerModel): Given an inventory hostname, return the UNIX listener that should communicate with it. This is a simple hash of the inventory name. """ - if len(self._muxes) == 1: - return self._muxes[0].path - - idx = abs(hash(name)) % len(self._muxes) - LOG.debug('Picked worker %d: %s', idx, self._muxes[idx].path) - return self._muxes[idx].path + mux = self._muxes[abs(hash(name)) % len(self._muxes)] + LOG.debug('Picked worker %d: %s', mux.index, mux.path) + return mux.path def _reconnect(self, path): if self.router is not None: From 4f051a38a7c62866c9cf4702df5f62a3623f1e2e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 11:47:54 +0100 Subject: [PATCH 249/383] ansible: improve docstring --- ansible_mitogen/process.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 0c59e833..acf6cf36 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -509,9 +509,9 @@ class ClassicWorkerModel(WorkerModel): This is an :mod:`atexit` handler installed in the top-level process. Shut the write end of `sock`, causing the receive side of the socket in - every worker process to return 0-byte reads, and causing their main - threads to wake and initiate shutdown. After shutting the socket down, - wait on each child to finish exiting. + every :class:`MuxProcess` to return 0-byte reads, and causing their + main threads to wake and initiate shutdown. After shutting the socket + down, wait on each child to finish exiting. This is done using :mod:`atexit` since Ansible lacks any better hook to run code during exit, and unless some synchronization exists with From 16ba1aacce4736476cd578770e7dbca3e0a37776 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 12:37:50 +0100 Subject: [PATCH 250/383] ci: log failed command line, and try enabling stdout line buffering --- .ci/ci_lib.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.ci/ci_lib.py b/.ci/ci_lib.py index e1cb84d5..5a03b906 100644 --- a/.ci/ci_lib.py +++ b/.ci/ci_lib.py @@ -57,8 +57,10 @@ def have_docker(): # ----------------- -# Force stdout FD 1 to be a pipe, so tools like pip don't spam progress bars. +# Force line buffering on stdout. +sys.stdout = os.fdopen(1, 'w', 1) +# Force stdout FD 1 to be a pipe, so tools like pip don't spam progress bars. if 'TRAVIS_HOME' in os.environ: proc = subprocess.Popen( args=['stdbuf', '-oL', 'cat'], @@ -86,8 +88,13 @@ def _argv(s, *args): def run(s, *args, **kwargs): argv = ['/usr/bin/time', '--'] + _argv(s, *args) print('Running: %s' % (argv,)) - ret = subprocess.check_call(argv, **kwargs) - print('Finished running: %s' % (argv,)) + try: + ret = subprocess.check_call(argv, **kwargs) + print('Finished running: %s' % (argv,)) + except Exception: + print('Exception occurred while running: %s' % (argv,)) + raise + return ret From 0f23a90d5081691dd276ef59ec24b05f23ef9a8d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 14:37:59 +0100 Subject: [PATCH 251/383] ansible: log affinity assignments --- ansible_mitogen/affinity.py | 27 +++++++++++++++++---------- ansible_mitogen/process.py | 6 +++--- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/ansible_mitogen/affinity.py b/ansible_mitogen/affinity.py index eeeb4d38..9eb6597a 100644 --- a/ansible_mitogen/affinity.py +++ b/ansible_mitogen/affinity.py @@ -73,7 +73,9 @@ necessarily involves preventing the scheduler from making load balancing decisions. """ +from __future__ import absolute_import import ctypes +import logging import mmap import multiprocessing import os @@ -83,6 +85,9 @@ import mitogen.core import mitogen.parent +LOG = logging.getLogger(__name__) + + try: _libc = ctypes.CDLL(None, use_errno=True) _strerror = _libc.strerror @@ -207,11 +212,13 @@ class FixedPolicy(Policy): self._reserve_mask = 3 self._reserve_shift = 2 - def _set_affinity(self, mask): + def _set_affinity(self, descr, mask): + if descr: + LOG.debug('CPU mask for %s: %#08x', descr, mask) mitogen.parent._preexec_hook = self._clear self._set_cpu_mask(mask) - def _balance(self): + def _balance(self, descr): self.state.lock.acquire() try: n = self.state.counter @@ -219,28 +226,28 @@ class FixedPolicy(Policy): finally: self.state.lock.release() - self._set_cpu(self._reserve_shift + ( + self._set_cpu(descr, self._reserve_shift + ( (n % (self.cpu_count - self._reserve_shift)) )) - def _set_cpu(self, cpu): - self._set_affinity(1 << (cpu % self.cpu_count)) + def _set_cpu(self, descr, cpu): + self._set_affinity(descr, 1 << (cpu % self.cpu_count)) def _clear(self): all_cpus = (1 << self.cpu_count) - 1 - self._set_affinity(all_cpus & ~self._reserve_mask) + self._set_affinity(None, all_cpus & ~self._reserve_mask) def assign_controller(self): if self._reserve_controller: - self._set_cpu(1) + self._set_cpu('Ansible top-level process', 1) else: - self._balance() + self._balance('Ansible top-level process') def assign_muxprocess(self, index): - self._set_cpu(index) + self._set_cpu('MuxProcess %d' % (index,), index) def assign_worker(self): - self._balance() + self._balance('WorkerProcess') def assign_subprocess(self): self._clear() diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index acf6cf36..7e74c36a 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -275,13 +275,13 @@ def common_setup(enable_affinity=True, _init_logging=True): save_pid('controller') ansible_mitogen.logging.set_process_name('top') + if _init_logging: + ansible_mitogen.logging.setup() + if enable_affinity: ansible_mitogen.affinity.policy.assign_controller() mitogen.utils.setup_gil() - if _init_logging: - ansible_mitogen.logging.setup() - if faulthandler is not None: faulthandler.enable() From 5298e87548f1dd014fb641dfa32ef87bd9bf5c8c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 14:41:47 +0100 Subject: [PATCH 252/383] Split out and make readable more log messages across both packages --- ansible_mitogen/process.py | 5 ++-- ansible_mitogen/services.py | 2 +- mitogen/core.py | 43 ++++++++++++++--------------- mitogen/master.py | 18 ++++++------- mitogen/parent.py | 54 ++++++++++++++++++++++--------------- mitogen/service.py | 8 ++++-- mitogen/unix.py | 19 +++++++------ 7 files changed, 84 insertions(+), 65 deletions(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 7e74c36a..93b72f3f 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -481,7 +481,8 @@ class ClassicWorkerModel(WorkerModel): communicate with it. This is a simple hash of the inventory name. """ mux = self._muxes[abs(hash(name)) % len(self._muxes)] - LOG.debug('Picked worker %d: %s', mux.index, mux.path) + LOG.debug('will use multiplexer %d (%s) to connect to "%s"', + mux.index, mux.path, name) return mux.path def _reconnect(self, path): @@ -534,7 +535,7 @@ class ClassicWorkerModel(WorkerModel): for mux in self._muxes: _, status = os.waitpid(mux.pid, 0) status = mitogen.fork._convert_exit_status(status) - LOG.debug('mux %d PID %d %s', mux.index, mux.pid, + LOG.debug('multiplexer %d PID %d %s', mux.index, mux.pid, mitogen.parent.returncode_to_str(status)) def _test_reset(self): diff --git a/ansible_mitogen/services.py b/ansible_mitogen/services.py index a8fde265..fa55f2ec 100644 --- a/ansible_mitogen/services.py +++ b/ansible_mitogen/services.py @@ -180,7 +180,7 @@ class ContextService(mitogen.service.Service): Return a reference, making it eligable for recycling once its reference count reaches zero. """ - LOG.debug('%r.put(%r)', self, context) + LOG.debug('decrementing reference count for %r', context) self._lock.acquire() try: if self._refs_by_context.get(context, 0) == 0: diff --git a/mitogen/core.py b/mitogen/core.py index 4dc3e7ef..aca7972f 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -875,7 +875,8 @@ class Message(object): if msg.handle: (self.router or router).route(msg) else: - LOG.debug('Message.reply(): discarding due to zero handle: %r', msg) + LOG.debug('dropping reply to message with no return address: %r', + msg) if PY3: UNPICKLER_KWARGS = {'encoding': 'bytes'} @@ -1224,6 +1225,7 @@ class Importer(object): ALWAYS_BLACKLIST += ['cStringIO'] def __init__(self, router, context, core_src, whitelist=(), blacklist=()): + self._log = LOG.getChild('importer') self._context = context self._present = {'mitogen': self.MITOGEN_PKG_CONTENT} self._lock = threading.Lock() @@ -1272,7 +1274,7 @@ class Importer(object): ) def __repr__(self): - return 'Importer()' + return 'Importer' def builtin_find_module(self, fullname): # imp.find_module() will always succeed for __main__, because it is a @@ -1297,18 +1299,18 @@ class Importer(object): _tls.running = True try: - _v and LOG.debug('%r.find_module(%r)', self, fullname) + #_v and self._log.debug('Python requested %r', fullname) fullname = to_text(fullname) pkgname, dot, _ = str_rpartition(fullname, '.') pkg = sys.modules.get(pkgname) if pkgname and getattr(pkg, '__loader__', None) is not self: - LOG.debug('%r: %r is submodule of a package we did not load', - self, fullname) + self._log.debug('%s is submodule of a locally loaded package', + fullname) return None suffix = fullname[len(pkgname+dot):] if pkgname and suffix not in self._present.get(pkgname, ()): - LOG.debug('%r: master doesn\'t know %r', self, fullname) + self._log.debug('%s has no submodule %s', pkgname, suffix) return None # #114: explicitly whitelisted prefixes override any @@ -1319,10 +1321,9 @@ class Importer(object): try: self.builtin_find_module(fullname) - _vv and IOLOG.debug('%r: %r is available locally', - self, fullname) + _vv and self._log.debug('%r is available locally', fullname) except ImportError: - _vv and IOLOG.debug('find_module(%r) returning self', fullname) + _vv and self._log.debug('we will try to load %r', fullname) return self finally: del _tls.running @@ -1373,7 +1374,7 @@ class Importer(object): tup = msg.unpickle() fullname = tup[0] - _v and LOG.debug('importer: received %s', fullname) + _v and self._log.debug('received %s', fullname) self._lock.acquire() try: @@ -1397,12 +1398,12 @@ class Importer(object): if not present: funcs = self._callbacks.get(fullname) if funcs is not None: - _v and LOG.debug('%s: existing request for %s in flight', - self, fullname) + _v and self._log.debug('existing request for %s in flight', + fullname) funcs.append(callback) else: - _v and LOG.debug('%s: requesting %s from parent', - self, fullname) + _v and self._log.debug('sending new %s request to parent', + fullname) self._callbacks[fullname] = [callback] self._context.send( Message(data=b(fullname), handle=GET_MODULE) @@ -1415,7 +1416,7 @@ class Importer(object): def load_module(self, fullname): fullname = to_text(fullname) - _v and LOG.debug('importer: requesting %s', fullname) + _v and self._log.debug('requesting %s', fullname) self._refuse_imports(fullname) event = threading.Event() @@ -1679,7 +1680,7 @@ class DelimitedProtocol(Protocol): _trailer = b('') def on_receive(self, broker, buf): - IOLOG.debug('%r.on_receive()', self) + _vv and IOLOG.debug('%r.on_receive()', self) self._trailer, cont = mitogen.core.iter_split( buf=self._trailer + buf, delim=self.delimiter, @@ -1743,13 +1744,13 @@ class BufferedWriter(object): buf = self._buf.popleft() written = self._protocol.stream.transmit_side.write(buf) if not written: - _v and LOG.debug('%r.on_transmit(): disconnection detected', self) + _v and LOG.debug('disconnected during write to %r', self) self._protocol.stream.on_disconnect(broker) return elif written != len(buf): self._buf.appendleft(BufferType(buf, written)) - _vv and IOLOG.debug('%r.on_transmit() -> len %d', self, written) + _vv and IOLOG.debug('transmitted %d bytes to %r', written, self) self._len -= written if not self._buf: @@ -2068,13 +2069,13 @@ class Context(object): msg.dst_id = self.context_id msg.reply_to = receiver.handle - _v and LOG.debug('%r.send_async(%r)', self, msg) + _v and LOG.debug('sending message to %r: %r', self, msg) self.send(msg) return receiver def call_service_async(self, service_name, method_name, **kwargs): - _v and LOG.debug('%r.call_service_async(%r, %r, %r)', - self, service_name, method_name, kwargs) + _v and LOG.debug('calling service %s.%s of %r, args: %r', + service_name, method_name, self, kwargs) if isinstance(service_name, BytesType): service_name = service_name.encode('utf-8') elif not isinstance(service_name, UnicodeType): diff --git a/mitogen/master.py b/mitogen/master.py index 909c3cef..cb4452a1 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -796,6 +796,7 @@ class ModuleFinder(object): class ModuleResponder(object): def __init__(self, router): + self._log = logging.getLogger('mitogen.responder') self._router = router self._finder = ModuleFinder() self._cache = {} # fullname -> pickled @@ -863,7 +864,7 @@ class ModuleResponder(object): if b('mitogen.main(') in src: return src - LOG.error(self.main_guard_msg, path) + self._log.error(self.main_guard_msg, path) raise ImportError('refused') def _make_negative_response(self, fullname): @@ -882,8 +883,7 @@ class ModuleResponder(object): if path and is_stdlib_path(path): # Prevent loading of 2.x<->3.x stdlib modules! This costs one # RTT per hit, so a client-side solution is also required. - LOG.debug('%r: refusing to serve stdlib module %r', - self, fullname) + self._log.debug('refusing to serve stdlib module %r', fullname) tup = self._make_negative_response(fullname) self._cache[fullname] = tup return tup @@ -891,7 +891,7 @@ class ModuleResponder(object): if source is None: # TODO: make this .warning() or similar again once importer has its # own logging category. - LOG.debug('_build_tuple(%r): could not locate source', fullname) + self._log.debug('could not find source for %r', fullname) tup = self._make_negative_response(fullname) self._cache[fullname] = tup return tup @@ -904,8 +904,8 @@ class ModuleResponder(object): if is_pkg: pkg_present = get_child_modules(path) - LOG.debug('_build_tuple(%r, %r) -> %r', - path, fullname, pkg_present) + self._log.debug('%s is a package at %s with submodules %r', + fullname, path, pkg_present) else: pkg_present = None @@ -936,8 +936,8 @@ class ModuleResponder(object): dst_id=stream.protocol.remote_id, handle=mitogen.core.LOAD_MODULE, ) - LOG.debug('%s: sending %s (%.2f KiB) to %s', - self, fullname, len(msg.data) / 1024.0, stream.name) + self._log.debug('sending %s (%.2f KiB) to %s', + fullname, len(msg.data) / 1024.0, stream.name) self._router._async_route(msg) stream.protocol.sent_modules.add(fullname) if tup[2] is not None: @@ -983,7 +983,7 @@ class ModuleResponder(object): return fullname = msg.data.decode() - LOG.debug('%s requested module %s', stream.name, fullname) + self._log.debug('%s requested module %s', stream.name, fullname) self.get_module_count += 1 if fullname in stream.protocol.sent_modules: LOG.warning('_on_get_module(): dup request for %r from %r', diff --git a/mitogen/parent.py b/mitogen/parent.py index f8c5d95f..851882da 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -41,6 +41,7 @@ import getpass import heapq import inspect import logging +import logging import os import re import signal @@ -65,9 +66,12 @@ except ImportError: import mitogen.core from mitogen.core import b from mitogen.core import bytes_partition -from mitogen.core import LOG from mitogen.core import IOLOG + +LOG = logging.getLogger(__name__) + + try: next except NameError: @@ -663,7 +667,7 @@ def _upgrade_broker(broker): root.setLevel(old_level) broker.timers = TimerList() - LOG.debug('replaced %r with %r (new: %d readers, %d writers; ' + LOG.debug('upgraded %r with %r (new: %d readers, %d writers; ' 'old: %d readers, %d writers)', old, new, len(new.readers), len(new.writers), len(old.readers), len(old.writers)) @@ -1141,7 +1145,7 @@ class BootstrapProtocol(RegexProtocol): self._writer.write(self.stream.conn.get_preamble()) def _on_ec1_received(self, line, match): - LOG.debug('%r: first stage received bootstrap', self) + LOG.debug('%r: first stage received mitogen.core source', self) def _on_ec2_received(self, line, match): LOG.debug('%r: new child booted successfully', self) @@ -1454,8 +1458,8 @@ class Connection(object): """ Fail the connection attempt. """ - LOG.debug('%s: failing connection due to %r', - self.stdio_stream.name, exc) + LOG.debug('failing connection %s due to %r', + self.stdio_stream and self.stdio_stream.name, exc) if self.exception is None: self._adorn_eof_error(exc) self.exception = exc @@ -1558,9 +1562,10 @@ class Connection(object): return stream def _async_connect(self): + LOG.debug('creating connection to context %d using %s', + self.context.context_id, self.__class__.__module__) mitogen.core.listen(self._router.broker, 'shutdown', self._on_broker_shutdown) - self._start_timer() self.stdio_stream = self._setup_stdio_stream() if self.context.name is None: @@ -1570,7 +1575,6 @@ class Connection(object): self.stderr_stream = self._setup_stderr_stream() def connect(self, context): - LOG.debug('%r.connect()', self) self.context = context self.proc = self.start_child() LOG.debug('%r.connect(): pid:%r stdin:%r stdout:%r stderr:%r', @@ -1759,7 +1763,9 @@ class CallChain(object): pipelining is disabled, the exception will be logged to the target context's logging framework. """ - LOG.debug('%r.call_no_reply(): %r', self, CallSpec(fn, args, kwargs)) + LOG.debug('starting no-reply function call to %r: %r', + self.context.name or self.context.context_id, + CallSpec(fn, args, kwargs)) self.context.send(self.make_msg(fn, *args, **kwargs)) def call_async(self, fn, *args, **kwargs): @@ -1815,7 +1821,9 @@ class CallChain(object): contexts and consumed as they complete using :class:`mitogen.select.Select`. """ - LOG.debug('%r.call_async(): %r', self, CallSpec(fn, args, kwargs)) + LOG.debug('starting function call to %s: %r', + self.context.name or self.context.context_id, + CallSpec(fn, args, kwargs)) return self.context.send_async(self.make_msg(fn, *args, **kwargs)) def call(self, fn, *args, **kwargs): @@ -1946,6 +1954,7 @@ class RouteMonitor(object): def __init__(self, router, parent=None): self.router = router self.parent = parent + self._log = logging.getLogger('mitogen.route_monitor') #: Mapping of Stream instance to integer context IDs reachable via the #: stream; used to cleanup routes during disconnection. self._routes_by_stream = {} @@ -2066,8 +2075,8 @@ class RouteMonitor(object): if routes is None: return - LOG.debug('%r: %r is gone; propagating DEL_ROUTE for %r', - self, stream, routes) + self._log.debug('stream %s is gone; propagating DEL_ROUTE for %r', + stream.name, routes) for target_id in routes: self.router.del_route(target_id) self._propagate_up(mitogen.core.DEL_ROUTE, target_id) @@ -2093,12 +2102,12 @@ class RouteMonitor(object): stream = self.router.stream_by_id(msg.auth_id) current = self.router.stream_by_id(target_id) if current and current.protocol.remote_id != mitogen.parent_id: - LOG.error('Cannot add duplicate route to %r via %r, ' - 'already have existing route via %r', - target_id, stream, current) + self._log.error('Cannot add duplicate route to %r via %r, ' + 'already have existing route via %r', + target_id, stream, current) return - LOG.debug('Adding route to %d via %r', target_id, stream) + self._log.debug('Adding route to %d via %r', target_id, stream) self._routes_by_stream[stream].add(target_id) self.router.add_route(target_id, stream) self._propagate_up(mitogen.core.ADD_ROUTE, target_id, target_name) @@ -2120,16 +2129,16 @@ class RouteMonitor(object): stream = self.router.stream_by_id(msg.auth_id) if registered_stream != stream: - LOG.error('%r: received DEL_ROUTE for %d from %r, expected %r', - self, target_id, stream, registered_stream) + self._log.error('received DEL_ROUTE for %d from %r, expected %r', + target_id, stream, registered_stream) return context = self.router.context_by_id(target_id, create=False) if context: - LOG.debug('%r: firing local disconnect for %r', self, context) + self._log.debug('firing local disconnect signal for %r', context) mitogen.core.fire(context, 'disconnect') - LOG.debug('%r: deleting route to %d via %r', self, target_id, stream) + self._log.debug('deleting route to %d via %r', target_id, stream) routes = self._routes_by_stream.get(stream) if routes: routes.discard(target_id) @@ -2151,7 +2160,7 @@ class Router(mitogen.core.Router): route_monitor = None def upgrade(self, importer, parent): - LOG.debug('%r.upgrade()', self) + LOG.debug('upgrading %r with capabilities to start new children', self) self.id_allocator = ChildIdAllocator(router=self) self.responder = ModuleForwarder( router=self, @@ -2211,7 +2220,8 @@ class Router(mitogen.core.Router): but remains public while the design has not yet settled, and situations may arise where routing is not fully automatic. """ - LOG.debug('%r.add_route(%r, %r)', self, target_id, stream) + LOG.debug('%r: adding route to context %r via %r', + self, target_id, stream) assert isinstance(target_id, int) assert isinstance(stream, mitogen.core.Stream) @@ -2480,7 +2490,7 @@ class ModuleForwarder(object): return fullname = msg.data.decode('utf-8') - LOG.debug('%r: %s requested by %d', self, fullname, msg.src_id) + LOG.debug('%r: %s requested by context %d', self, fullname, msg.src_id) callback = lambda: self._on_cache_callback(msg, fullname) self.importer._request_module(fullname, callback) diff --git a/mitogen/service.py b/mitogen/service.py index b9332780..da48521f 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -688,10 +688,12 @@ class PushFileService(Service): def _forward(self, context, path): stream = self.router.stream_by_id(context.context_id) - child = mitogen.core.Context(self.router, stream.protocol.remote_id) + child = self.router.context_by_id(stream.protocol.remote_id) sent = self._sent_by_stream.setdefault(stream, set()) if path in sent: if child.context_id != context.context_id: + LOG.debug('requesting %s forward small file to %s: %s', + child, context, path) child.call_service_async( service_name=self.name(), method_name='forward', @@ -699,6 +701,8 @@ class PushFileService(Service): context=context ).close() else: + LOG.debug('requesting %s cache and forward small file to %s: %s', + child, context, path) child.call_service_async( service_name=self.name(), method_name='store_and_forward', @@ -729,8 +733,8 @@ class PushFileService(Service): 'path': mitogen.core.FsPathTypes, }) def propagate_to(self, context, path): - LOG.debug('%r.propagate_to(%r, %r)', self, context, path) if path not in self._cache: + LOG.debug('caching small file %s', path) fp = open(path, 'rb') try: self._cache[path] = mitogen.core.Blob(fp.read()) diff --git a/mitogen/unix.py b/mitogen/unix.py index c34dc064..645b061d 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -36,6 +36,7 @@ have the same privilege (auth_id) as the current process. """ import errno +import logging import os import socket import struct @@ -45,7 +46,8 @@ import tempfile import mitogen.core import mitogen.master -from mitogen.core import LOG + +LOG = logging.getLogger(__name__) class Error(mitogen.core.Error): @@ -143,8 +145,8 @@ class Listener(mitogen.core.Protocol): try: pid, = struct.unpack('>L', sock.recv(4)) except (struct.error, socket.error): - LOG.error('%r: failed to read remote identity: %s', - self, sys.exc_info()[1]) + LOG.error('listener: failed to read remote identity: %s', + sys.exc_info()[1]) return context_id = self._router.id_allocator.allocate() @@ -152,8 +154,8 @@ class Listener(mitogen.core.Protocol): sock.send(struct.pack('>LLL', context_id, mitogen.context_id, os.getpid())) except socket.error: - LOG.error('%r: failed to assign identity to PID %d: %s', - self, pid, sys.exc_info()[1]) + LOG.error('listener: failed to assign identity to PID %d: %s', + pid, sys.exc_info()[1]) return context = mitogen.parent.Context(self._router, context_id) @@ -165,7 +167,8 @@ class Listener(mitogen.core.Protocol): stream.protocol.auth_id = mitogen.context_id stream.protocol.is_privileged = True stream.accept(sock, sock) - LOG.debug('%r: accepted %r', self, stream) + LOG.debug('listener: accepted connection from PID %d: %s', + pid, stream.name) self._router.register(context, stream) @@ -186,7 +189,7 @@ def _connect(path, broker, sock): mitogen.parent_id = remote_id mitogen.parent_ids = [remote_id] - LOG.debug('unix.connect(): local ID is %r, remote is %r', + LOG.debug('client: local ID is %r, remote is %r', mitogen.context_id, remote_id) router = mitogen.master.Router(broker=broker) @@ -204,7 +207,7 @@ def _connect(path, broker, sock): def connect(path, broker=None): - LOG.debug('unix.connect(path=%r)', path) + LOG.debug('client: connecting to %s', path) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: return _connect(path, broker, sock) From b76da4698b131a8e56b806c1ddaa1097ae9646d9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 14:51:16 +0100 Subject: [PATCH 253/383] parent: move subprocess creation to mux thread too Now connect() really is a pure blocking wrapper. --- mitogen/parent.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 851882da..f42d22e6 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1567,6 +1567,19 @@ class Connection(object): mitogen.core.listen(self._router.broker, 'shutdown', self._on_broker_shutdown) self._start_timer() + + try: + self.proc = self.start_child() + except Exception: + self._fail_connection(sys.exc_info()[1]) + return + + LOG.debug('child for %r started: pid:%r stdin:%r stdout:%r stderr:%r', + self, self.proc.pid, + self.proc.stdin.fileno(), + self.proc.stdout.fileno(), + self.proc.stderr and self.proc.stderr.fileno()) + self.stdio_stream = self._setup_stdio_stream() if self.context.name is None: self.context.name = self.stdio_stream.name @@ -1576,13 +1589,6 @@ class Connection(object): def connect(self, context): self.context = context - self.proc = self.start_child() - LOG.debug('%r.connect(): pid:%r stdin:%r stdout:%r stderr:%r', - self, self.proc.pid, - self.proc.stdin.fileno(), - self.proc.stdout.fileno(), - self.proc.stderr and self.proc.stderr.fileno()) - self.latch = mitogen.core.Latch() self._router.broker.defer(self._async_connect) self.latch.get() From 212f6a35297c0aac6960e0031d7f931596c4c29c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 15:02:27 +0100 Subject: [PATCH 254/383] tests: stop dumping Docker help output in the log. --- tests/testlib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testlib.py b/tests/testlib.py index d4387c54..e26c6544 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -428,7 +428,7 @@ class DockerizedSshDaemon(object): def start_container(self): try: - subprocess__check_output(['docker']) + subprocess__check_output(['docker', '--version']) except Exception: raise unittest2.SkipTest('Docker binary is unavailable') From 7379144a1292b5fbdcfb4b3ed574ba3095992569 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 17:07:49 +0100 Subject: [PATCH 255/383] Stop using mitogen root logger in more modules, remove unused loggers --- mitogen/fork.py | 2 +- mitogen/jail.py | 5 ----- mitogen/kubectl.py | 5 ----- mitogen/lxc.py | 5 ----- mitogen/lxd.py | 5 ----- mitogen/ssh.py | 2 +- mitogen/su.py | 2 +- mitogen/utils.py | 1 - 8 files changed, 3 insertions(+), 24 deletions(-) diff --git a/mitogen/fork.py b/mitogen/fork.py index ee990262..25d1b5a6 100644 --- a/mitogen/fork.py +++ b/mitogen/fork.py @@ -41,7 +41,7 @@ import mitogen.parent from mitogen.core import b -LOG = logging.getLogger('mitogen') +LOG = logging.getLogger(__name__) # Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up # interpreter state. So 2.4/2.5 interpreters start .local() contexts for diff --git a/mitogen/jail.py b/mitogen/jail.py index c7c1f0f9..4da7eb0d 100644 --- a/mitogen/jail.py +++ b/mitogen/jail.py @@ -28,15 +28,10 @@ # !mitogen: minify_safe -import logging - import mitogen.core import mitogen.parent -LOG = logging.getLogger(__name__) - - class Options(mitogen.parent.Options): container = None username = None diff --git a/mitogen/kubectl.py b/mitogen/kubectl.py index acc011b9..374ab747 100644 --- a/mitogen/kubectl.py +++ b/mitogen/kubectl.py @@ -28,15 +28,10 @@ # !mitogen: minify_safe -import logging - import mitogen.core import mitogen.parent -LOG = logging.getLogger(__name__) - - class Options(mitogen.parent.Options): pod = None kubectl_path = 'kubectl' diff --git a/mitogen/lxc.py b/mitogen/lxc.py index 759475c1..a86ce5f0 100644 --- a/mitogen/lxc.py +++ b/mitogen/lxc.py @@ -28,15 +28,10 @@ # !mitogen: minify_safe -import logging - import mitogen.core import mitogen.parent -LOG = logging.getLogger(__name__) - - class Options(mitogen.parent.Options): container = None lxc_attach_path = 'lxc-attach' diff --git a/mitogen/lxd.py b/mitogen/lxd.py index 6fbe0694..675dddcd 100644 --- a/mitogen/lxd.py +++ b/mitogen/lxd.py @@ -28,15 +28,10 @@ # !mitogen: minify_safe -import logging - import mitogen.core import mitogen.parent -LOG = logging.getLogger(__name__) - - class Options(mitogen.parent.Options): container = None lxc_path = 'lxc' diff --git a/mitogen/ssh.py b/mitogen/ssh.py index b3fb7bac..b276dd28 100644 --- a/mitogen/ssh.py +++ b/mitogen/ssh.py @@ -49,7 +49,7 @@ except NameError: from mitogen.core import any -LOG = logging.getLogger('mitogen') +LOG = logging.getLogger(__name__) auth_incorrect_msg = 'SSH authentication is incorrect' password_incorrect_msg = 'SSH password is incorrect' diff --git a/mitogen/su.py b/mitogen/su.py index 9813632a..080c9782 100644 --- a/mitogen/su.py +++ b/mitogen/su.py @@ -81,7 +81,7 @@ class SetupBootstrapProtocol(mitogen.parent.BootstrapProtocol): def _on_password_prompt(self, line, match): LOG.debug('%r: (password prompt): %r', - self.stream.name, line.decode('utf-8', 'replace')) + self.stream.name, line.decode('utf-8', 'replace')) if self.stream.conn.options.password is None: self.stream.conn._fail_connection( diff --git a/mitogen/utils.py b/mitogen/utils.py index 94a171fb..b1347d02 100644 --- a/mitogen/utils.py +++ b/mitogen/utils.py @@ -39,7 +39,6 @@ import mitogen.master import mitogen.parent -LOG = logging.getLogger('mitogen') iteritems = getattr(dict, 'iteritems', dict.items) if mitogen.core.PY3: From f0782ccd422ea37d5218508a7dd946068c8d7acb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 17:39:31 +0100 Subject: [PATCH 256/383] [stream-refactor] get caught up on internals.rst updates --- docs/internals.rst | 85 +++++++++++++++++++++++++++------------------- mitogen/core.py | 60 ++++++++++++++++++++++++++------ mitogen/parent.py | 12 +++---- mitogen/select.py | 6 ++-- 4 files changed, 109 insertions(+), 54 deletions(-) diff --git a/docs/internals.rst b/docs/internals.rst index 96f9269c..fc6206e0 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -48,46 +48,75 @@ PidfulStreamHandler :members: -Side -==== +Stream & Side +============= + +.. currentmodule:: mitogen.core +.. autoclass:: Stream + :members: .. currentmodule:: mitogen.core .. autoclass:: Side :members: -Stream -====== +Protocol +======== .. currentmodule:: mitogen.core -.. autoclass:: BasicStream +.. autoclass:: Protocol :members: -.. autoclass:: Stream +.. currentmodule:: mitogen.parent +.. autoclass:: BootstrapProtocol :members: -.. currentmodule:: mitogen.fork -.. autoclass:: Stream +.. currentmodule:: mitogen.core +.. autoclass:: DelimitedProtocol :members: -.. currentmodule:: mitogen.parent -.. autoclass:: Stream +.. currentmodule:: mitogen.core +.. autoclass:: IoLoggerProtocol :members: -.. currentmodule:: mitogen.ssh -.. autoclass:: Stream +.. currentmodule:: mitogen.core +.. autoclass:: MitogenProtocol :members: -.. currentmodule:: mitogen.sudo -.. autoclass:: Stream +.. currentmodule:: mitogen.parent +.. autoclass:: MitogenProtocol :members: .. currentmodule:: mitogen.core -.. autoclass:: IoLogger +.. autoclass:: Waker :members: -.. currentmodule:: mitogen.core -.. autoclass:: Waker + +Connection / Options +==================== + +.. currentmodule:: mitogen.fork +.. autoclass:: Options + :members: +.. autoclass:: Connection + :members: + +.. currentmodule:: mitogen.parent +.. autoclass:: Options + :members: +.. autoclass:: Connection + :members: + +.. currentmodule:: mitogen.ssh +.. autoclass:: Options + :members: +.. autoclass:: Connection + :members: + +.. currentmodule:: mitogen.sudo +.. autoclass:: Options + :members: +.. autoclass:: Connection :members: @@ -158,21 +187,9 @@ Process Helpers ======= -Blocking I/O ------------- - -These functions exist to support the blocking phase of setting up a new -context. They will eventually be replaced with asynchronous equivalents. - - -.. currentmodule:: mitogen.parent -.. autofunction:: discard_until -.. autofunction:: iter_read -.. autofunction:: write_all - Subprocess Functions ------------- +--------------------- .. currentmodule:: mitogen.parent .. autofunction:: create_child @@ -184,15 +201,15 @@ Helpers ------- .. currentmodule:: mitogen.core -.. autofunction:: to_text .. autofunction:: has_parent_authority +.. autofunction:: io_op +.. autofunction:: pipe +.. autofunction:: set_block .. autofunction:: set_cloexec .. autofunction:: set_nonblock -.. autofunction:: set_block -.. autofunction:: io_op +.. autofunction:: to_text .. currentmodule:: mitogen.parent -.. autofunction:: close_nonstandard_fds .. autofunction:: create_socketpair .. currentmodule:: mitogen.master diff --git a/mitogen/core.py b/mitogen/core.py index aca7972f..c9b1f9df 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -423,8 +423,11 @@ def listen(obj, name, func): def unlisten(obj, name, func): """ - Remove `func` from the list of functions invoked when signal `name` is + Remove `func()` from the list of functions invoked when signal `name` is fired by `obj`. + + :raises ValueError: + `func()` was not on the list. """ _signals(obj, name).remove(func) @@ -946,7 +949,7 @@ class Sender(object): Senders may be serialized, making them convenient to wire up data flows. See :meth:`mitogen.core.Receiver.to_sender` for more information. - :param Context context: + :param mitogen.core.Context context: Context to send messages to. :param int dst_handle: Destination handle to send messages to. @@ -1550,6 +1553,14 @@ class Stream(object): name = u'default' def set_protocol(self, protocol): + """ + Bind a protocol to this stream, by updating :attr:`Protocol.stream` to + refer to this stream, and updating this stream's + :attr:`Stream.protocol` to the refer to the protocol. Any prior + protocol's :attr:`Protocol.stream` is set to :data:`None`. + """ + if self.protocol: + self.protocol.stream = None self.protocol = protocol self.protocol.stream = self @@ -1622,7 +1633,11 @@ class Protocol(object): implementation to be replaced without modifying behavioural logic. """ stream_class = Stream + + #: The :class:`Stream` this protocol is currently bound to, or + #: :data:`None`. stream = None + read_size = CHUNK_SIZE @classmethod @@ -1695,9 +1710,27 @@ class DelimitedProtocol(Protocol): self.stream.protocol.on_receive(broker, self._trailer) def on_line_received(self, line): + """ + Receive a line from the stream. + + :param bytes line: + The encoded line, excluding the delimiter. + :returns: + :data:`False` to indicate this invocation modified the stream's + active protocol, and any remaining buffered data should be passed + to the new protocol's :meth:`on_receive` method. + + Any other return value is ignored. + """ pass def on_partial_line_received(self, line): + """ + Receive a trailing unterminated partial line from the stream. + + :param bytes line: + The encoded partial line. + """ pass @@ -1766,7 +1799,7 @@ class Side(object): underlying FD, preventing erroneous duplicate calls to :func:`os.close` due to duplicate :meth:`Stream.on_disconnect` calls, which would otherwise risk silently succeeding by closing an unrelated descriptor. For this reason, it - is crucial only one :class:`Side` exists per unique descriptor. + is crucial only one file object exists per unique descriptor. :param mitogen.core.Stream stream: The stream this side is associated with. @@ -1794,8 +1827,8 @@ class Side(object): self.fp = fp #: Integer file descriptor to perform IO on, or :data:`None` if #: :meth:`close` has been called. This is saved separately from the - #: file object, since fileno() cannot be called on it after it has been - #: closed. + #: file object, since :meth:`file.fileno` cannot be called on it after + #: it has been closed. self.fd = fp.fileno() #: If :data:`True`, causes presence of this side in #: :class:`Broker`'s active reader set to defer shutdown until the @@ -1822,7 +1855,7 @@ class Side(object): def close(self): """ - Call :func:`os.close` on :attr:`fd` if it is not :data:`None`, + Call :meth:`file.close` on :attr:`fp` if it is not :data:`None`, then set it to :data:`None`. """ _vv and IOLOG.debug('%r.close()', self) @@ -1841,7 +1874,7 @@ class Side(object): in a 0-sized read like a regular file. :returns: - Bytes read, or the empty to string to indicate disconnection was + Bytes read, or the empty string to indicate disconnection was detected. """ if self.closed: @@ -2024,7 +2057,7 @@ class Context(object): explicitly, as that method is deduplicating, and returns the only context instance :ref:`signals` will be raised on. - :param Router router: + :param mitogen.core.Router router: Router to emit messages through. :param int context_id: Context ID. @@ -2669,7 +2702,11 @@ class IoLoggerProtocol(DelimitedProtocol): def on_shutdown(self, broker): """ - Shut down the write end of the logging socket. + Shut down the write end of the socket, preventing any further writes to + it by this process, or subprocess that inherited it. This allows any + remaining kernel-buffered data to be drained during graceful shutdown + without the buffer continuously refilling due to some out of control + child process. """ _v and LOG.debug('%r: shutting down', self) if not IS_WSL: @@ -2681,6 +2718,9 @@ class IoLoggerProtocol(DelimitedProtocol): self.stream.transmit_side.close() def on_line_received(self, line): + """ + Decode the received line as UTF-8 and pass it to the logging framework. + """ self._log.info('%s', line.decode('utf-8', 'replace')) @@ -2881,7 +2921,7 @@ class Router(object): If :data:`False`, the handler will be unregistered after a single message has been received. - :param Context respondent: + :param mitogen.core.Context respondent: Context that messages to this handle are expected to be sent from. If specified, arranges for a dead message to be delivered to `fn` when disconnection of the context is detected. diff --git a/mitogen/parent.py b/mitogen/parent.py index f42d22e6..22e40610 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1121,10 +1121,10 @@ class RegexProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): class BootstrapProtocol(RegexProtocol): """ - Respond to stdout of a child during bootstrap. Wait for EC0_MARKER to be - written by the first stage to indicate it can receive the bootstrap, then - await EC1_MARKER to indicate success, and :class:`MitogenProtocol` can be - enabled. + Respond to stdout of a child during bootstrap. Wait for :attr:`EC0_MARKER` + to be written by the first stage to indicate it can receive the bootstrap, + then await :attr:`EC1_MARKER` to indicate success, and + :class:`MitogenProtocol` can be enabled. """ #: Sentinel value emitted by the first stage to indicate it is ready to #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have @@ -1951,9 +1951,9 @@ class RouteMonitor(object): RouteMonitor lives entirely on the broker thread, so its data requires no locking. - :param Router router: + :param mitogen.master.Router router: Router to install handlers on. - :param Context parent: + :param mitogen.core.Context parent: :data:`None` in the master process, or reference to the parent context we should propagate route updates towards. """ diff --git a/mitogen/select.py b/mitogen/select.py index 51aebc22..ca3c32bc 100644 --- a/mitogen/select.py +++ b/mitogen/select.py @@ -57,9 +57,7 @@ class Select(object): If `oneshot` is :data:`True`, then remove each receiver as it yields a result; since :meth:`__iter__` terminates once the final receiver is - removed, this makes it convenient to respond to calls made in parallel: - - .. code-block:: python + removed, this makes it convenient to respond to calls made in parallel:: total = 0 recvs = [c.call_async(long_running_operation) for c in contexts] @@ -98,7 +96,7 @@ class Select(object): for msg in mitogen.select.Select(selects): print(msg.unpickle()) - :class:`Select` may be used to mix inter-thread and inter-process IO: + :class:`Select` may be used to mix inter-thread and inter-process IO:: latch = mitogen.core.Latch() start_thread(latch) From 11c7e3f561538fb028fc89273db0ee83be1079b3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 17:53:34 +0100 Subject: [PATCH 257/383] service: centralize fetching thread name, and tidy up logs --- mitogen/service.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/mitogen/service.py b/mitogen/service.py index da48521f..f58f882c 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -99,6 +99,10 @@ def get_or_create_pool(size=None, router=None): return _pool +def get_thread_name(): + return threading.currentThread().getName() + + def call(service_name, method_name, call_context=None, **kwargs): """ Call a service registered with this pool, using the calling thread as a @@ -611,10 +615,11 @@ class Pool(object): try: event = self._select.get_event() except mitogen.core.LatchError: - LOG.debug('%r: graceful exit', self) + LOG.debug('thread %s exiting gracefully', get_thread_name()) return except mitogen.core.ChannelError: - LOG.debug('%r: exitting: %s', self, sys.exc_info()[1]) + LOG.debug('thread %s exiting with error: %s', + get_thread_name(), sys.exc_info()[1]) return func = self._func_by_source[event.source] @@ -627,16 +632,14 @@ class Pool(object): try: self._worker_run() except Exception: - th = threading.currentThread() - LOG.exception('%r: worker %r crashed', self, th.getName()) + LOG.exception('%r: worker %r crashed', self, get_thread_name()) raise def __repr__(self): - th = threading.currentThread() return 'Pool(%04x, size=%d, th=%r)' % ( id(self) & 0xffff, len(self._threads), - th.getName(), + get_thread_name(), ) @@ -752,7 +755,7 @@ class PushFileService(Service): def store_and_forward(self, path, data, context): LOG.debug('%r.store_and_forward(%r, %r, %r) %r', self, path, data, context, - threading.currentThread().getName()) + get_thread_name()) self._lock.acquire() try: self._cache[path] = data From 22a07a043be8e5dda3701fa538cce612d71f37ec Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 17:54:10 +0100 Subject: [PATCH 258/383] tests: fix error in affinity_test --- tests/ansible/tests/affinity_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ansible/tests/affinity_test.py b/tests/ansible/tests/affinity_test.py index d9618bda..9572717f 100644 --- a/tests/ansible/tests/affinity_test.py +++ b/tests/ansible/tests/affinity_test.py @@ -197,7 +197,7 @@ class LinuxPolicyTest(testlib.TestCase): tf = tempfile.NamedTemporaryFile() try: before = self._get_cpus() - self.policy._set_cpu(3) + self.policy._set_cpu(None, 3) my_cpu = self._get_cpus() proc = mitogen.parent.popen( From feb16543057222b6161daac3e381fa18238fb9fb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 18:25:28 +0100 Subject: [PATCH 259/383] docs: many more internals.rst tidyups --- docs/internals.rst | 103 ++++++++++++++++++++++++++++++--------------- mitogen/core.py | 20 ++++++--- mitogen/master.py | 34 ++++++++++----- mitogen/parent.py | 35 ++++++++++++++- 4 files changed, 138 insertions(+), 54 deletions(-) diff --git a/docs/internals.rst b/docs/internals.rst index fc6206e0..c9021d31 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -2,6 +2,11 @@ Internal API Reference ********************** +.. note:: + + Internal APIs are subject to rapid change even across minor releases. This + page exists to help users modify and extend the library. + .. toctree:: :hidden: @@ -40,16 +45,22 @@ Latch :members: -PidfulStreamHandler -=================== +Logging +======= + +See also :class:`mitogen.core.IoLoggerProtocol`. + +.. currentmodule:: mitogen.master +.. autoclass:: LogForwarder + :members: .. currentmodule:: mitogen.core .. autoclass:: PidfulStreamHandler :members: -Stream & Side -============= +Stream, Side & Protocol +======================= .. currentmodule:: mitogen.core .. autoclass:: Stream @@ -59,10 +70,6 @@ Stream & Side .. autoclass:: Side :members: - -Protocol -======== - .. currentmodule:: mitogen.core .. autoclass:: Protocol :members: @@ -120,72 +127,100 @@ Connection / Options :members: -Importer -======== +Import Mechanism +================ .. currentmodule:: mitogen.core .. autoclass:: Importer :members: - -ModuleResponder -=============== - .. currentmodule:: mitogen.master .. autoclass:: ModuleResponder :members: +.. currentmodule:: mitogen.parent +.. autoclass:: ModuleForwarder + :members: + -RouteMonitor -============ +Module Finders +============== -.. currentmodule:: mitogen.parent -.. autoclass:: RouteMonitor +.. currentmodule:: mitogen.master +.. autoclass:: ModuleFinder :members: +.. currentmodule:: mitogen.master +.. autoclass:: FinderMethod + :members: -TimerList -========= +.. currentmodule:: mitogen.master +.. autoclass:: DefectivePython3xMainMethod + :members: -.. currentmodule:: mitogen.parent -.. autoclass:: TimerList +.. currentmodule:: mitogen.master +.. autoclass:: PkgutilMethod + :members: + +.. currentmodule:: mitogen.master +.. autoclass:: SysModulesMethod :members: +.. currentmodule:: mitogen.master +.. autoclass:: ParentEnumerationMethod + :members: -Timer -===== + +Routing Management +================== .. currentmodule:: mitogen.parent -.. autoclass:: Timer +.. autoclass:: RouteMonitor :members: -Forwarder -========= +Timer Management +================ .. currentmodule:: mitogen.parent -.. autoclass:: ModuleForwarder +.. autoclass:: TimerList + :members: + +.. currentmodule:: mitogen.parent +.. autoclass:: Timer :members: -ExternalContext -=============== +Child Implementation +==================== .. currentmodule:: mitogen.core .. autoclass:: ExternalContext :members: +.. currentmodule:: mitogen.core +.. autoclass:: Dispatcher + :members: -Process -======= + +Process Management +================== + +.. currentmodule:: mitogen.parent +.. autoclass:: Process + :members: .. currentmodule:: mitogen.parent +.. autoclass:: PopenProcess + :members: + +.. currentmodule:: mitogen.fork .. autoclass:: Process :members: -Helpers -======= +Helper Functions +================ Subprocess Functions diff --git a/mitogen/core.py b/mitogen/core.py index c9b1f9df..cf0b97fd 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3440,30 +3440,36 @@ class ExternalContext(object): """ External context implementation. + This class contains the main program implementation for new children. It is + responsible for setting up everything about the process environment, import + hooks, standard IO redirection, logging, configuring a :class:`Router` and + :class:`Broker`, and finally arranging for :class:`Dispatcher` to take over + the main thread after initialization is complete. + .. attribute:: broker + The :class:`mitogen.core.Broker` instance. .. attribute:: context + The :class:`mitogen.core.Context` instance. .. attribute:: channel + The :class:`mitogen.core.Channel` over which :data:`CALL_FUNCTION` requests are received. - .. attribute:: stdout_log - The :class:`mitogen.core.IoLogger` connected to ``stdout``. - .. attribute:: importer + The :class:`mitogen.core.Importer` instance. .. attribute:: stdout_log - The :class:`IoLogger` connected to ``stdout``. + + The :class:`IoLogger` connected to :data:`sys.stdout`. .. attribute:: stderr_log - The :class:`IoLogger` connected to ``stderr``. - .. method:: _dispatch_calls - Implementation for the main thread in every child context. + The :class:`IoLogger` connected to :data:`sys.stderr`. """ detached = False diff --git a/mitogen/master.py b/mitogen/master.py index cb4452a1..69a6a7db 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -430,8 +430,8 @@ class FinderMethod(object): def find(self, fullname): """ - Accept a canonical module name and return `(path, source, is_pkg)` - tuples, where: + Accept a canonical module name as would be found in :data:`sys.modules` + and return a `(path, source, is_pkg)` tuple, where: * `path`: Unicode string containing path to source file. * `source`: Bytestring containing source file's content. @@ -447,10 +447,13 @@ class DefectivePython3xMainMethod(FinderMethod): """ Recent versions of Python 3.x introduced an incomplete notion of importer specs, and in doing so created permanent asymmetry in the - :mod:`pkgutil` interface handling for the `__main__` module. Therefore - we must handle `__main__` specially. + :mod:`pkgutil` interface handling for the :mod:`__main__` module. Therefore + we must handle :mod:`__main__` specially. """ def find(self, fullname): + """ + Find :mod:`__main__` using its :data:`__file__` attribute. + """ if fullname != '__main__': return None @@ -477,6 +480,9 @@ class PkgutilMethod(FinderMethod): be the only required implementation of get_module(). """ def find(self, fullname): + """ + Find `fullname` using :func:`pkgutil.find_loader`. + """ try: # Pre-'import spec' this returned None, in Python3.6 it raises # ImportError. @@ -522,10 +528,13 @@ class PkgutilMethod(FinderMethod): class SysModulesMethod(FinderMethod): """ - Attempt to fetch source code via sys.modules. This is specifically to - support __main__, but it may catch a few more cases. + Attempt to fetch source code via :data:`sys.modules`. This was originally + specifically to support :mod:`__main__`, but it may catch a few more cases. """ def find(self, fullname): + """ + Find `fullname` using its :data:`__file__` attribute. + """ module = sys.modules.get(fullname) LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module) if getattr(module, '__name__', None) != fullname: @@ -566,14 +575,17 @@ class ParentEnumerationMethod(FinderMethod): """ Attempt to fetch source code by examining the module's (hopefully less insane) parent package. Required for older versions of - ansible.compat.six and plumbum.colors, and Ansible 2.8 - ansible.module_utils.distro. + :mod:`ansible.compat.six`, :mod:`plumbum.colors`, and Ansible 2.8 + :mod:`ansible.module_utils.distro`. - For cases like module_utils.distro, this must handle cases where a package - transmuted itself into a totally unrelated module during import and vice - versa. + For cases like :mod:`ansible.module_utils.distro`, this must handle cases + where a package transmuted itself into a totally unrelated module during + import and vice versa. """ def find(self, fullname): + """ + See implementation for a description of how this works. + """ if fullname not in sys.modules: # Don't attempt this unless a module really exists in sys.modules, # else we could return junk. diff --git a/mitogen/parent.py b/mitogen/parent.py index 22e40610..bba29e7e 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2049,8 +2049,8 @@ class RouteMonitor(object): def notice_stream(self, stream): """ When this parent is responsible for a new directly connected child - stream, we're also responsible for broadcasting DEL_ROUTE upstream - if/when that child disconnects. + stream, we're also responsible for broadcasting + :data:`mitogen.core.DEL_ROUTE` upstream when that child disconnects. """ self._routes_by_stream[stream] = set([stream.protocol.remote_id]) self._propagate_up(mitogen.core.ADD_ROUTE, stream.protocol.remote_id, @@ -2357,6 +2357,22 @@ class Router(mitogen.core.Router): class Process(object): + """ + Process objects contain asynchronous logic for reaping children, and + keeping track of their stdio descriptors. + + This base class is extended by :class:`PopenProcess` and + :class:`mitogen.fork.Process`. + + :param int pid: + The process ID. + :param file stdin: + File object attached to standard input. + :param file stdout: + File object attached to standard output. + :param file stderr: + File object attached to standard error, or :data:`None`. + """ _delays = [0.05, 0.15, 0.3, 1.0, 5.0, 10.0] name = None @@ -2376,6 +2392,14 @@ class Process(object): ) def poll(self): + """ + Fetch the child process exit status, or :data:`None` if it is still + running. This should be overridden by subclasses. + + :returns: + Exit status in the style of the :attr:`subprocess.Popen.returncode` + attribute, i.e. with signals represented by a negative integer. + """ raise NotImplementedError() def _signal_child(self, signum): @@ -2430,8 +2454,15 @@ class Process(object): class PopenProcess(Process): + """ + :class:`Process` subclass wrapping a :class:`subprocess.Popen` object. + + :param subprocess.Popen proc: + The subprocess. + """ def __init__(self, proc, stdin, stdout, stderr=None): super(PopenProcess, self).__init__(proc.pid, stdin, stdout, stderr) + #: The subprocess. self.proc = proc def poll(self): From 20532ea59129f186ab251f92a6f22d376ab6bb72 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 18:42:23 +0100 Subject: [PATCH 260/383] master: allow filtering forwarded logs using logging package functions. Given a message sent on "ssh.foo" to "mypkg.mymod", instead of logging it to "mitogen.ctx.ssh.foo" in the master process, with the message prefixed with the original logger name, instead log it to "mypkg.mymod.[ssh.foo]", permitting normal logging package filtering features to work as they usually do. This also helps tidy up logging output a little bit. --- mitogen/master.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/mitogen/master.py b/mitogen/master.py index 69a6a7db..91e7c9d2 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -387,26 +387,26 @@ class LogForwarder(object): if msg.is_dead: return - logger = self._cache.get(msg.src_id) - if logger is None: - context = self._router.context_by_id(msg.src_id) - if context is None: - LOG.error('%s: dropping log from unknown context ID %d', - self, msg.src_id) - return - - name = '%s.%s' % (RLOG.name, context.name) - self._cache[msg.src_id] = logger = logging.getLogger(name) + context = self._router.context_by_id(msg.src_id) + if context is None: + LOG.error('%s: dropping log from unknown context %d', + self, msg.src_id) + return name, level_s, s = msg.data.decode('utf-8', 'replace').split('\x00', 2) + logger_name = '%s.[%s]' % (name, context.name) + logger = self._cache.get(logger_name) + if logger is None: + self._cache[logger_name] = logger = logging.getLogger(logger_name) + # See logging.Handler.makeRecord() record = logging.LogRecord( name=logger.name, level=int(level_s), pathname='(unknown file)', lineno=0, - msg=('%s: %s' % (name, s)), + msg=s, args=(), exc_info=None, ) From a79d2bd50b9898d8bfe319bfc6948fcfc8cee353 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 20:32:50 +0100 Subject: [PATCH 261/383] docs: another round of docstring cleanups. --- docs/api.rst | 12 ++++++---- docs/howitworks.rst | 14 +++++++----- docs/internals.rst | 14 +++++++++++- mitogen/core.py | 40 +++++++++++++++++++++++++++++----- mitogen/master.py | 34 ++++++++++++++++++++++++----- mitogen/parent.py | 53 ++++++++++++++++++++++++++++++++++++++++----- 6 files changed, 140 insertions(+), 27 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index 2557806e..2f1f9784 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -96,8 +96,12 @@ Router Class :members: -.. currentmodule:: mitogen.master +.. currentmodule:: mitogen.parent +.. autoclass:: Router + :members: + +.. currentmodule:: mitogen.master .. autoclass:: Router (broker=None) :members: @@ -553,11 +557,11 @@ Context Class .. currentmodule:: mitogen.parent - -.. autoclass:: CallChain +.. autoclass:: Context :members: -.. autoclass:: Context +.. currentmodule:: mitogen.parent +.. autoclass:: CallChain :members: diff --git a/docs/howitworks.rst b/docs/howitworks.rst index b4a53810..e5fdee2f 100644 --- a/docs/howitworks.rst +++ b/docs/howitworks.rst @@ -346,11 +346,15 @@ Masters listen on the following handles: .. currentmodule:: mitogen.core .. data:: ALLOCATE_ID - Replies to any message sent to it with a newly allocated range of context - IDs, to allow children to safely start their own contexts. Presently IDs - are allocated in batches of 1000 from a 32 bit range, allowing up to 4.2 - million parent contexts to be created and destroyed before the associated - Router must be recreated. + Replies to any message sent to it with a newly allocated range of context + IDs, to allow children to safely start their own contexts. Presently IDs are + allocated in batches of 1000 from a 32 bit range, allowing up to 4.2 million + parent contexts to be created and destroyed before the associated Router + must be recreated. + + This is handled by :class:`mitogen.master.IdAllocator` in the master + process, and messages are sent to it from + :class:`mitogen.parent.ChildIdAllocator` in children. Children listen on the following handles: diff --git a/docs/internals.rst b/docs/internals.rst index c9021d31..e4f7bd91 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -99,7 +99,7 @@ Stream, Side & Protocol :members: -Connection / Options +Connection & Options ==================== .. currentmodule:: mitogen.fork @@ -191,6 +191,18 @@ Timer Management :members: +Context ID Allocation +===================== + +.. currentmodule:: mitogen.master +.. autoclass:: IdAllocator + :members: + +.. currentmodule:: mitogen.parent +.. autoclass:: ChildIdAllocator + :members: + + Child Implementation ==================== diff --git a/mitogen/core.py b/mitogen/core.py index cf0b97fd..3bc4c608 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2733,7 +2733,12 @@ class Router(object): **Note:** This is the somewhat limited core version of the Router class used by child contexts. The master subclass is documented below this one. """ + #: The :class:`mitogen.core.Context` subclass to use when constructing new + #: :class:`Context` objects in :meth:`myself` and :meth:`context_by_id`. + #: Permits :class:`Router` subclasses to extend the :class:`Context` + #: interface, as done in :class:`mitogen.parent.Router`. context_class = Context + max_message_size = 128 * 1048576 #: When :data:`True`, permit children to only communicate with the current @@ -2829,7 +2834,9 @@ class Router(object): def myself(self): """ - Return a :class:`Context` referring to the current process. + Return a :class:`Context` referring to the current process. Since + :class:`Context` is serializable, this is convenient to use in remote + function call parameter lists. """ return self.context_class( router=self, @@ -2839,8 +2846,25 @@ class Router(object): def context_by_id(self, context_id, via_id=None, create=True, name=None): """ - Messy factory/lookup function to find a context by its ID, or construct - it. This will eventually be replaced by a more sensible interface. + Return or construct a :class:`Context` given its ID. An internal + mapping of ID to the canonical :class:`Context` representing that ID, + so that :ref:`signals` can be raised. + + This may be called from any thread, lookup and construction are atomic. + + :param int context_id: + The context ID to look up. + :param int via_id: + If the :class:`Context` does not already exist, set its + :attr:`Context.via` to the :class:`Context` matching this ID. + :param bool create: + If the :class:`Context` does not already exist, create it. + :param str name: + If the :class:`Context` does not already exist, set its name. + + :returns: + :class:`Context`, or return :data:`None` if `create` is + :data:`False` and no :class:`Context` previously existed. """ context = self._context_by_id.get(context_id) if context: @@ -2885,7 +2909,13 @@ class Router(object): """ Return the :class:`Stream` that should be used to communicate with `dst_id`. If a specific route for `dst_id` is not known, a reference to - the parent context's stream is returned. + the parent context's stream is returned. If the parent is disconnected, + or when running in the master context, return :data:`None` instead. + + This can be used from any thread, but its output is only meaningful + from the context of the :class:`Broker` thread, as disconnection or + replacement could happen in parallel on the broker thread at any + moment. """ return ( self._stream_by_id.get(dst_id) or @@ -2996,7 +3026,7 @@ class Router(object): def on_shutdown(self, broker): """ Called during :meth:`Broker.shutdown`, informs callbacks registered - with :meth:`add_handle_cb` the connection is dead. + with :meth:`add_handler` the connection is dead. """ _v and LOG.debug('%r: shutting down', self, broker) fire(self, 'shutdown') diff --git a/mitogen/master.py b/mitogen/master.py index 91e7c9d2..48f82ab1 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -1228,6 +1228,21 @@ class Router(mitogen.parent.Router): class IdAllocator(object): + """ + Allocate IDs for new contexts constructed locally, and blocks of IDs for + children to allocate their own IDs using + :class:`mitogen.parent.ChildIdAllocator` without risk of conflict, and + without necessitating network round-trips for each new context. + + This class responds to :data:`mitogen.core.ALLOCATE_ID` messages received + from children by replying with fresh block ID allocations. + + The master's :class:`IdAllocator` instance can be accessed via + :attr:`mitogen.master.Router.id_allocator`. + """ + #: Block allocations are made in groups of 1000 by default. + BLOCK_SIZE = 1000 + def __init__(self, router): self.router = router self.next_id = 1 @@ -1240,14 +1255,12 @@ class IdAllocator(object): def __repr__(self): return 'IdAllocator(%r)' % (self.router,) - BLOCK_SIZE = 1000 - def allocate(self): """ - Arrange for a unique context ID to be allocated and associated with a - route leading to the active context. In masters, the ID is generated - directly, in children it is forwarded to the master via a - :data:`mitogen.core.ALLOCATE_ID` message. + Allocate a context ID by directly incrementing an internal counter. + + :returns: + The new context ID. """ self.lock.acquire() try: @@ -1258,6 +1271,15 @@ class IdAllocator(object): self.lock.release() def allocate_block(self): + """ + Allocate a block of IDs for use in a child context. + + This function is safe to call from any thread. + + :returns: + Tuple of the form `(id, end_id)` where `id` is the first usable ID + and `end_id` is the last usable ID. + """ self.lock.acquire() try: id_ = self.next_id diff --git a/mitogen/parent.py b/mitogen/parent.py index bba29e7e..f4ed3f5b 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1252,6 +1252,10 @@ class Options(object): class Connection(object): """ + Manage the lifetime of a set of :class:`Streams ` connecting to a + remote Python interpreter, including bootstrap, disconnection, and external + tool integration. + Base for streams capable of starting children. """ options_class = Options @@ -1597,12 +1601,28 @@ class Connection(object): class ChildIdAllocator(object): + """ + Allocate new context IDs from a block of unique context IDs allocated by + the master process. + """ def __init__(self, router): self.router = router self.lock = threading.Lock() self.it = iter(xrange(0)) def allocate(self): + """ + Allocate an ID, requesting a fresh block from the master if the + existing block is exhausted. + + :returns: + The new context ID. + + .. warning:: + + This method is not safe to call from the :class:`Broker` thread, as + it may block on IO of its own. + """ self.lock.acquire() try: for id_ in self.it: @@ -2193,7 +2213,8 @@ class Router(mitogen.core.Router): def get_streams(self): """ - Return a snapshot of all streams in existence at time of call. + Return an atomic snapshot of all streams in existence at time of call. + This is safe to call from any thread. """ self._write_lock.acquire() try: @@ -2220,11 +2241,18 @@ class Router(mitogen.core.Router): def add_route(self, target_id, stream): """ - Arrange for messages whose `dst_id` is `target_id` to be forwarded on - the directly connected stream for `via_id`. This method is called - automatically in response to :data:`mitogen.core.ADD_ROUTE` messages, - but remains public while the design has not yet settled, and situations - may arise where routing is not fully automatic. + Arrange for messages whose `dst_id` is `target_id` to be forwarded on a + directly connected :class:`Stream`. Safe to call from any thread. + + This is called automatically by :class:`RouteMonitor` in response to + :data:`mitogen.core.ADD_ROUTE` messages, but remains public while the + design has not yet settled, and situations may arise where routing is + not fully automatic. + + :param int target_id: + Target context ID to add a route for. + :param mitogen.core.Stream stream: + Stream over which messages to the target should be routed. """ LOG.debug('%r: adding route to context %r via %r', self, target_id, stream) @@ -2238,6 +2266,19 @@ class Router(mitogen.core.Router): self._write_lock.release() def del_route(self, target_id): + """ + Delete any route that exists for `target_id`. It is not an error to + delete a route that does not currently exist. Safe to call from any + thread. + + This is called automatically by :class:`RouteMonitor` in response to + :data:`mitogen.core.DEL_ROUTE` messages, but remains public while the + design has not yet settled, and situations may arise where routing is + not fully automatic. + + :param int target_id: + Target context ID to delete route for. + """ LOG.debug('%r: deleting route to %r', self, target_id) # DEL_ROUTE may be sent by a parent if it knows this context sent # messages to a peer that has now disconnected, to let us raise From 6f1298061196b1709f7f0db8d2b06c7bae6dcd12 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 20:39:51 +0100 Subject: [PATCH 262/383] [linear2] merge fallout: re-enable _send_module_forwards(). --- ansible_mitogen/services.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ansible_mitogen/services.py b/ansible_mitogen/services.py index fa55f2ec..e6c41e5b 100644 --- a/ansible_mitogen/services.py +++ b/ansible_mitogen/services.py @@ -326,7 +326,6 @@ class ContextService(mitogen.service.Service): ) def _send_module_forwards(self, context): - return self.router.responder.forward_modules(context, self.ALWAYS_PRELOAD) _candidate_temp_dirs = None @@ -383,7 +382,6 @@ class ContextService(mitogen.service.Service): mitogen.core.listen(context, 'disconnect', lambda: self._on_context_disconnect(context)) - #self._send_module_forwards(context) TODO self._send_module_forwards(context) init_child_result = context.call( ansible_mitogen.target.init_child, From b7cdd39044bd292a547b2b3bf3eff646edae4f04 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 1 Aug 2019 13:05:07 +0100 Subject: [PATCH 263/383] docs: tweak Changelog wording --- docs/changelog.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 267bc0db..75b6a43d 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -46,8 +46,10 @@ Enhancements setup happens on one thread, reducing GIL contention and context switching early in a run. -* `#419 `_: 2 network round-trips - were removed from early connection setup. +* `#419 `_: Connection setup is + pipelined, eliminating several network round-trips. Most infrastructure is in + place to support future removal of the final round-trip between a target + fully booting and receiving its first function call. * `d6faff06 `_, `807cbef9 `_, From be330194d5e73fd5c4b4e2bd165c754253405404 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 2 Aug 2019 16:49:37 +0100 Subject: [PATCH 264/383] docs: udpate Changelog. --- docs/changelog.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 75b6a43d..ce88ded0 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -24,7 +24,8 @@ To avail of fixes in an unreleased version, please download a ZIP file Enhancements ^^^^^^^^^^^^ -* `#587 `_: Ansible 2.8 is partially +* `#556 `_, + `#587 `_: Ansible 2.8 is partially supported. `Become plugins `_ and `interpreter discovery @@ -165,6 +166,7 @@ bug reports, testing, features and fixes in this release contributed by `Andreas Hubert `_. `Anton Markelov `_, `Dave Cottlehuber `_, +`James Hogarth `_, `Nigel Metheringham `_, `Orion Poplawski `_, `Pieter Voet `_, From 351aca62c9108f7114ba56ec5a4f8b02524f4613 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 19:10:31 +0100 Subject: [PATCH 265/383] Disable Azure pipelines build for docs-master too. --- .ci/azure-pipelines.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.ci/azure-pipelines.yml b/.ci/azure-pipelines.yml index 920e82a1..6d0832da 100644 --- a/.ci/azure-pipelines.yml +++ b/.ci/azure-pipelines.yml @@ -3,6 +3,11 @@ # Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more: # https://docs.microsoft.com/azure/devops/pipelines/languages/python +trigger: + branches: + exclude: + - docs-master + jobs: - job: Mac From 0d636af532211409d0bc358d283d20ddb93d9749 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 19:11:00 +0100 Subject: [PATCH 266/383] docs: add changelog thanks --- docs/changelog.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index ce88ded0..4cc89041 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -174,6 +174,7 @@ bug reports, testing, features and fixes in this release contributed by `Szabó Dániel Ernő `_, `Ulrich Schreiner `_, `Yuki Nishida `_, +`@DavidVentura `_, `@ghp-rr `_, `@rizzly `_, and `@tho86 `_. From 87443da82812836927f7cc09b3c39358c38d0b4c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 3 Aug 2019 21:09:57 +0100 Subject: [PATCH 267/383] docs: remove fakessh from home page, it's been broken forever --- docs/index.rst | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index c11a1d27..17d183aa 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -155,40 +155,6 @@ Common sources of import latency and bandwidth consumption are mitigated: representing 1.7MiB of uncompressed source split across 148 modules. -SSH Client Emulation -#################### - -.. image:: images/fakessh.svg - :class: mitogen-right-300 - -Support is included for starting subprocesses with a modified environment, that -cause their attempt to use SSH to be redirected back into the host program. In -this way tools like `rsync`, `git`, `sftp`, and `scp` can efficiently reuse the -host program's existing connection to the remote machine, including any -firewall/user account hopping in use, with no additional configuration. - -Scenarios that were not previously possible with these tools are enabled, such -as running `sftp` and `rsync` over a `sudo` session, to an account the user -cannot otherwise directly log into, including in restrictive environments that -for example enforce an interactive TTY and account password. - -.. raw:: html - -

    - -.. code-block:: python - - bastion = router.ssh(hostname='bastion.mycorp.com') - webserver = router.ssh(via=bastion, hostname='webserver') - webapp = router.sudo(via=webserver, username='webapp') - fileserver = router.ssh(via=bastion, hostname='fileserver') - - # Transparently tunnelled over fileserver -> .. -> sudo.webapp link - fileserver.call(mitogen.fakessh.run, webapp, [ - 'rsync', 'appdata', 'appserver:appdata' - ]) - - Message Routing ############### From ad590f332107de023931045e3f946ea7bfe2d733 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 22:03:20 +0100 Subject: [PATCH 268/383] parent: docstring formatting --- mitogen/parent.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index f4ed3f5b..bfd6fef5 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2512,8 +2512,9 @@ class PopenProcess(Process): class ModuleForwarder(object): """ - Respond to GET_MODULE requests in a slave by forwarding the request to our - parent context, or satisfying the request from our local Importer cache. + Respond to :data:`mitogen.core.GET_MODULE` requests in a child by + forwarding the request to our parent context, or satisfying the request + from our local Importer cache. """ def __init__(self, router, parent_context, importer): self.router = router From ce04fd39c94d6a8e4939cc3657672c075ffb961b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 23:40:13 +0100 Subject: [PATCH 269/383] core: cache stream reference in DelimitedProtocol Stream.set_protocol() was updated to break the reference on the previous protocol, to encourage a crash should an old protocol continue operating after it's not supposed to be active any more. That broke DelimitedProtocol's protocol switching functionality. --- mitogen/core.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 3bc4c608..71fbc76a 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1696,6 +1696,7 @@ class DelimitedProtocol(Protocol): def on_receive(self, broker, buf): _vv and IOLOG.debug('%r.on_receive()', self) + stream = self.stream self._trailer, cont = mitogen.core.iter_split( buf=self._trailer + buf, delim=self.delimiter, @@ -1706,8 +1707,8 @@ class DelimitedProtocol(Protocol): if cont: self.on_partial_line_received(self._trailer) else: - assert self.stream.protocol is not self - self.stream.protocol.on_receive(broker, self._trailer) + assert stream.protocol is not self + stream.protocol.on_receive(broker, self._trailer) def on_line_received(self, line): """ From e8b1bf590942123edd8a85119a3ed6a7548f0d45 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 23:43:05 +0100 Subject: [PATCH 270/383] issue #410: automatically work around SELinux braindamage. --- docs/changelog.rst | 6 +++ mitogen/parent.py | 126 ++++++++++++++++++++++++++++++++++----------- mitogen/sudo.py | 3 ++ 3 files changed, 105 insertions(+), 30 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 4cc89041..4004d8ae 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -67,6 +67,11 @@ Mitogen for Ansible matching *Permission denied* errors from some versions of ``su`` running on heavily loaded machines. +* `#410 `_: Use of ``AF_UNIX`` + sockets automatically replaced with plain UNIX pipes when SELinux is + detected, to work around a broken heuristic in popular SELinux policies that + prevents inheriting ``AF_UNIX`` sockets across privilege domains. + * `#549 `_: the open file descriptor limit for the Ansible process is increased to the available hard limit. It is common for distributions to ship with a much higher hard limit than their @@ -166,6 +171,7 @@ bug reports, testing, features and fixes in this release contributed by `Andreas Hubert `_. `Anton Markelov `_, `Dave Cottlehuber `_, +`El Mehdi CHAOUKI `_, `James Hogarth `_, `Nigel Metheringham `_, `Orion Poplawski `_, diff --git a/mitogen/parent.py b/mitogen/parent.py index bfd6fef5..c4642a58 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -71,6 +71,16 @@ from mitogen.core import IOLOG LOG = logging.getLogger(__name__) +# #410: we must avoid the use of socketpairs if SELinux is enabled. +try: + fp = open('/sys/fs/selinux/enforce', 'rb') + try: + SELINUX_ENABLED = bool(int(fp.read())) + finally: + fp.close() +except IOError: + SELINUX_ENABLED = False + try: next @@ -278,6 +288,38 @@ def create_socketpair(size=None): return parentfp, childfp +def create_best_pipe(escalates_privilege=False): + """ + By default we prefer to communicate with children over a UNIX socket, as a + single file descriptor can represent bidirectional communication, and a + cross-platform API exists to align buffer sizes with the needs of the + library. + + SELinux prevents us setting up a privileged process to inherit an AF_UNIX + socket, a facility explicitly designed as a better replacement for pipes, + because at some point in the mid 90s it might have been commonly possible + for AF_INET sockets to end up undesirably connected to a privileged + process, so let's make up arbitrary rules breaking all sockets instead. + + If SELinux is detected, fall back to using pipes. + + :returns: + `(parent_rfp, child_wfp, child_rfp, parent_wfp)` + """ + if (not escalates_privilege) or (not SELINUX_ENABLED): + parentfp, childfp = create_socketpair() + return parentfp, childfp, childfp, parentfp + + parent_rfp, child_wfp = mitogen.core.pipe() + try: + child_rfp, parent_wfp = mitogen.core.pipe() + return parent_rfp, child_wfp, child_rfp, parent_wfp + except: + parent_rfp.close() + child_wfp.close() + raise + + def popen(**kwargs): """ Wrap :class:`subprocess.Popen` to ensure any global :data:`_preexec_hook` @@ -292,7 +334,8 @@ def popen(**kwargs): return subprocess.Popen(preexec_fn=preexec_fn, **kwargs) -def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): +def create_child(args, merge_stdio=False, stderr_pipe=False, + escalates_privilege=False, preexec_fn=None): """ Create a child process whose stdin/stdout is connected to a socket. @@ -306,22 +349,27 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): :param bool stderr_pipe: If :data:`True` and `merge_stdio` is :data:`False`, arrange for `stderr` to be connected to a separate pipe, to allow any ongoing debug - logs generated by e.g. SSH to be outpu as the session progresses, + logs generated by e.g. SSH to be output as the session progresses, without interfering with `stdout`. + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. + :param function preexec_fn: + If not :data:`None`, a function to run within the post-fork child + before executing the target program. :returns: :class:`Process` instance. """ + parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe( + escalates_privilege=escalates_privilege + ) + parentfp, childfp = create_socketpair() - # When running under a monkey patches-enabled gevent, the socket module - # yields descriptors who already have O_NONBLOCK, which is persisted across - # fork, totally breaking Python. Therefore, drop O_NONBLOCK from Python's - # future stdin fd. - mitogen.core.set_block(childfp.fileno()) stderr = None stderr_r = None if merge_stdio: - stderr = childfp + stderr = child_wfp elif stderr_pipe: stderr_r, stderr = mitogen.core.pipe() mitogen.core.set_cloexec(stderr_r.fileno()) @@ -329,27 +377,33 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): try: proc = popen( args=args, - stdin=childfp, - stdout=childfp, + stdin=child_rfp, + stdout=child_wfp, stderr=stderr, close_fds=True, preexec_fn=preexec_fn, ) except: - childfp.close() - parentfp.close() + child_rfp.close() + child_wfp.close() + parent_rfp.close() + parent_wfp.close() if stderr_pipe: stderr.close() stderr_r.close() raise - childfp.close() + child_rfp.close() + child_wfp.close() if stderr_pipe: stderr.close() - LOG.debug('create_child() child %d fd %d, parent %d, cmd: %s', - proc.pid, parentfp.fileno(), os.getpid(), Argv(args)) - return PopenProcess(proc, stdin=parentfp, stdout=parentfp, stderr=stderr_r) + return PopenProcess( + proc=proc, + stdin=parent_wfp, + stdout=parent_rfp, + stderr=stderr_r, + ) def _acquire_controlling_tty(): @@ -461,12 +515,14 @@ def tty_create_child(args): raise slave_fp.close() - LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s', - proc.pid, master_fp.fileno(), os.getpid(), Argv(args)) - return PopenProcess(proc, stdin=master_fp, stdout=master_fp) + return PopenProcess( + proc=proc, + stdin=master_fp, + stdout=master_fp, + ) -def hybrid_tty_create_child(args): +def hybrid_tty_create_child(args, escalates_privilege=False): """ Like :func:`tty_create_child`, except attach stdin/stdout to a socketpair like :func:`create_child`, but leave stderr and the controlling TTY @@ -479,20 +535,25 @@ def hybrid_tty_create_child(args): """ master_fp, slave_fp = openpty() try: - parentfp, childfp = create_socketpair() + parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe( + escalates_privilege=escalates_privilege, + ) try: - mitogen.core.set_block(childfp) + mitogen.core.set_block(child_rfp) + mitogen.core.set_block(child_wfp) proc = popen( args=args, - stdin=childfp, - stdout=childfp, + stdin=child_rfp, + stdout=child_wfp, stderr=slave_fp, preexec_fn=_acquire_controlling_tty, close_fds=True, ) except: - parentfp.close() - childfp.close() + parent_rfp.close() + child_wfp.close() + parent_wfp.close() + child_rfp.close() raise except: master_fp.close() @@ -500,10 +561,14 @@ def hybrid_tty_create_child(args): raise slave_fp.close() - childfp.close() - LOG.debug('hybrid_tty_create_child() pid=%d stdio=%d, tty=%d, cmd: %s', - proc.pid, parentfp.fileno(), master_fp.fileno(), Argv(args)) - return PopenProcess(proc, stdin=parentfp, stdout=parentfp, stderr=master_fp) + child_rfp.close() + child_wfp.close() + return PopenProcess( + proc=proc, + stdin=parent_wfp, + stdout=parent_rfp, + stderr=master_fp, + ) class Timer(object): @@ -1425,6 +1490,7 @@ class Connection(object): def start_child(self): args = self.get_boot_command() + LOG.debug('command line for %r: %s', self, Argv(args)) try: return self.create_child(args=args, **self.create_child_args) except OSError: diff --git a/mitogen/sudo.py b/mitogen/sudo.py index bcb2e7be..ea07d0c1 100644 --- a/mitogen/sudo.py +++ b/mitogen/sudo.py @@ -244,6 +244,9 @@ class Connection(mitogen.parent.Connection): diag_protocol_class = SetupProtocol options_class = Options create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) + create_child_args = { + 'escalates_privilege': True, + } child_is_immediate_subprocess = False def _get_name(self): From 65bec2244d6883db44d87712bacf986822ebbdd5 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 4 Aug 2019 23:57:50 +0000 Subject: [PATCH 271/383] core: fix Python2.4 crash due to missing Logger.getChild(). --- mitogen/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index 71fbc76a..96c30fb0 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1228,7 +1228,7 @@ class Importer(object): ALWAYS_BLACKLIST += ['cStringIO'] def __init__(self, router, context, core_src, whitelist=(), blacklist=()): - self._log = LOG.getChild('importer') + self._log = logging.getLogger('mitogen.importer') self._context = context self._present = {'mitogen': self.MITOGEN_PKG_CONTENT} self._lock = threading.Lock() From dd1fed0a5f1ec95e9fedf81570882d3b456b2487 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 5 Aug 2019 00:18:53 +0100 Subject: [PATCH 272/383] docs: update changelog --- docs/changelog.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 4004d8ae..1bdb1f6d 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -72,6 +72,9 @@ Mitogen for Ansible detected, to work around a broken heuristic in popular SELinux policies that prevents inheriting ``AF_UNIX`` sockets across privilege domains. +* `#467 `_: an incompatibility + running Mitogen under Molecule was resolved. + * `#549 `_: the open file descriptor limit for the Ansible process is increased to the available hard limit. It is common for distributions to ship with a much higher hard limit than their @@ -170,6 +173,7 @@ Mitogen would not be possible without the support of users. A huge thanks for bug reports, testing, features and fixes in this release contributed by `Andreas Hubert `_. `Anton Markelov `_, +`Dan `_, `Dave Cottlehuber `_, `El Mehdi CHAOUKI `_, `James Hogarth `_, From 9839e6781c1a21bcb0fdefcafc8229e9f2474522 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 6 Aug 2019 04:36:17 +0100 Subject: [PATCH 273/383] core: more descriptive graceful shutdown timeout error Accounts for timers too Tidy up a wordy comment further down the file --- mitogen/core.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 96c30fb0..9a863078 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3333,10 +3333,10 @@ class Broker(object): self._loop_once(max(0, deadline - time.time())) if self.keep_alive(): - LOG.error('%r: some streams did not close gracefully. ' - 'The most likely cause for this is one or ' - 'more child processes still connected to ' - 'our stdout/stderr pipes.', self) + LOG.error('%r: pending work still existed %d seconds after ' + 'shutdown began. This may be due to a timer that is yet ' + 'to expire, or a child connection that did not fully ' + 'shut down.', self, self.shutdown_timeout) def _do_broker_main(self): """ @@ -3511,11 +3511,11 @@ class ExternalContext(object): if not self.config['profiling']: os.kill(os.getpid(), signal.SIGTERM) - #: On Python >3.4, the global importer lock has been sharded into a - #: per-module lock, meaning there is no guarantee the import statement in - #: service_stub_main will be truly complete before a second thread - #: attempting the same import will see a partially initialized module. - #: Sigh. Therefore serialize execution of the stub itself. + #: On Python >3.4, the global importer lock has split into per-module + #: locks, so there is no guarantee the import statement in + #: service_stub_main will complete before a second thread attempting the + #: same import will see a partially initialized module. Therefore serialize + #: the stub explicitly. service_stub_lock = threading.Lock() def _service_stub_main(self, msg): From 9c0cb44ee9029384826b7fbb4177f28d7e028f2d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 6 Aug 2019 04:34:25 +0100 Subject: [PATCH 274/383] issue #170: replace Timer.cancelled with Timer.active It's more flexable: False can represent 'cancelled' or 'expired', whereas setting cancelled=True for an expired timer didn't feel right. --- mitogen/parent.py | 11 +++++++---- tests/timer_test.py | 12 ++++++++++++ 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index c4642a58..d026b52b 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -575,7 +575,9 @@ class Timer(object): """ Represents a future event. """ - cancelled = False + #: Set to :data:`False` if :meth:`cancel` has been called, or immediately + #: prior to being executed by :meth:`TimerList.expire`. + active = True def __init__(self, when, func): self.when = when @@ -598,7 +600,7 @@ class Timer(object): Cancel this event. If it has not yet executed, it will not execute during any subsequent :meth:`TimerList.expire` call. """ - self.cancelled = True + self.active = False class TimerList(object): @@ -634,7 +636,7 @@ class TimerList(object): Floating point delay, or 0.0, or :data:`None` if no events are scheduled. """ - while self._lst and self._lst[0].cancelled: + while self._lst and not self._lst[0].active: heapq.heappop(self._lst) if self._lst: return max(0, self._lst[0].when - self._now()) @@ -662,7 +664,8 @@ class TimerList(object): now = self._now() while self._lst and self._lst[0].when <= now: timer = heapq.heappop(self._lst) - if not timer.cancelled: + if timer.active: + timer.active = False timer.func() diff --git a/tests/timer_test.py b/tests/timer_test.py index 14a9c080..ff3e022f 100644 --- a/tests/timer_test.py +++ b/tests/timer_test.py @@ -98,23 +98,31 @@ class ScheduleTest(TimerListMixin, testlib.TestCase): class ExpireTest(TimerListMixin, testlib.TestCase): def test_in_past(self): timer = self.list.schedule(29, mock.Mock()) + self.assertTrue(timer.active) self.list._now = lambda: 30 self.list.expire() self.assertEquals(1, len(timer.func.mock_calls)) + self.assertFalse(timer.active) def test_in_future(self): timer = self.list.schedule(29, mock.Mock()) + self.assertTrue(timer.active) self.list._now = lambda: 28 self.list.expire() self.assertEquals(0, len(timer.func.mock_calls)) + self.assertTrue(timer.active) def test_same_moment(self): timer = self.list.schedule(29, mock.Mock()) timer2 = self.list.schedule(29, mock.Mock()) + self.assertTrue(timer.active) + self.assertTrue(timer2.active) self.list._now = lambda: 29 self.list.expire() self.assertEquals(1, len(timer.func.mock_calls)) self.assertEquals(1, len(timer2.func.mock_calls)) + self.assertFalse(timer.active) + self.assertFalse(timer2.active) def test_cancelled(self): self.list._now = lambda: 29 @@ -131,7 +139,9 @@ class CancelTest(TimerListMixin, testlib.TestCase): def test_single_cancel(self): self.list._now = lambda: 29 timer = self.list.schedule(29, mock.Mock()) + self.assertTrue(timer.active) timer.cancel() + self.assertFalse(timer.active) self.list.expire() self.assertEquals(0, len(timer.func.mock_calls)) @@ -139,7 +149,9 @@ class CancelTest(TimerListMixin, testlib.TestCase): self.list._now = lambda: 29 timer = self.list.schedule(29, mock.Mock()) timer.cancel() + self.assertFalse(timer.active) timer.cancel() + self.assertFalse(timer.active) self.list.expire() self.assertEquals(0, len(timer.func.mock_calls)) From 709a0c013f682ec2d6ade93e67f362c23c144ed1 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 7 Aug 2019 19:49:47 +0100 Subject: [PATCH 275/383] issue #410: fix test failure due to obsolete parentfp/childfp --- mitogen/parent.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index d026b52b..59ba15d9 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -364,8 +364,6 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, escalates_privilege=escalates_privilege ) - parentfp, childfp = create_socketpair() - stderr = None stderr_r = None if merge_stdio: From f4cee1652671fb96f8763f1f7ff53b6d1137183c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 7 Aug 2019 19:52:24 +0100 Subject: [PATCH 276/383] parent: zombie reaping v3 Improvements: - Refactored off Process, separately testable without a connection - Don't delay Broker shutdown indefinitely for detached children --- docs/internals.rst | 4 + mitogen/parent.py | 251 ++++++++++++++++++++++++++------------- tests/connection_test.py | 35 +++++- tests/testlib.py | 2 +- 4 files changed, 206 insertions(+), 86 deletions(-) diff --git a/docs/internals.rst b/docs/internals.rst index e4f7bd91..71c6273d 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -218,6 +218,10 @@ Child Implementation Process Management ================== +.. currentmodule:: mitogen.parent +.. autoclass:: Reaper + :members: + .. currentmodule:: mitogen.parent .. autoclass:: Process :members: diff --git a/mitogen/parent.py b/mitogen/parent.py index 59ba15d9..5a525c15 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1364,7 +1364,22 @@ class Connection(object): #: Prefix given to default names generated by :meth:`connect`. name_prefix = u'local' - timer = None + #: :class:`Timer` that runs :meth:`_on_timer_expired` when connection + #: timeout occurs. + _timer = None + + #: When disconnection completes, instance of :class:`Reaper` used to wait + #: on the exit status of the subprocess. + _reaper = None + + #: On failure, the exception object that should be propagated back to the + #: user. + exception = None + + #: Extra text appended to :class:`EofError` if that exception is raised on + #: a failed connection attempt. May be used in subclasses to hint at common + #: problems with a particular connection method. + eof_error_hint = None def __init__(self, options, router): #: :class:`Options` @@ -1499,8 +1514,6 @@ class Connection(object): msg = 'Child start failed: %s. Command was: %s' % (e, Argv(args)) raise mitogen.core.StreamError(msg) - eof_error_hint = None - def _adorn_eof_error(self, e): """ Subclasses may provide additional information in the case of a failed @@ -1509,10 +1522,8 @@ class Connection(object): if self.eof_error_hint: e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),) - exception = None - def _complete_connection(self): - self.timer.cancel() + self._timer.cancel() if not self.exception: mitogen.core.unlisten(self._router.broker, 'shutdown', self._on_broker_shutdown) @@ -1569,19 +1580,36 @@ class Connection(object): def _on_streams_disconnected(self): """ - When disconnection has been detected for both our streams, cancel the + When disconnection has been detected for both streams, cancel the connection timer, mark the connection failed, and reap the child process. Do nothing if the timer has already been cancelled, indicating some existing failure has already been noticed. """ - if not self.timer.cancelled: - self.timer.cancel() + if self._timer.active: + self._timer.cancel() self._fail_connection(EofError( self.eof_error_msg + get_history( [self.stdio_stream, self.stderr_stream] ) )) - self.proc._async_reap(self, self._router) + + if self._reaper: + return + + self._reaper = Reaper( + broker=self._router.broker, + proc=self.proc, + kill=not ( + (self.detached and self.child_is_immediate_subprocess) or + # Avoid killing so child has chance to write cProfile data + self._router.profiling + ), + # Don't delay shutdown waiting for a detached child, since the + # detached child may expect to live indefinitely after its parent + # exited. + wait_on_shutdown=(not self.detached), + ) + self._reaper.reap() def _on_broker_shutdown(self): """ @@ -1590,20 +1618,6 @@ class Connection(object): """ self._fail_connection(CancelledError(BROKER_SHUTDOWN_MSG)) - def _start_timer(self): - self.timer = self._router.broker.timers.schedule( - when=self.options.connect_deadline, - func=self._on_timer_expired, - ) - - def _on_timer_expired(self): - self._fail_connection( - mitogen.core.TimeoutError( - 'Failed to setup connection after %.2f seconds', - self.options.connect_timeout, - ) - ) - def stream_factory(self): return self.stream_protocol_class.build_stream( broker=self._router.broker, @@ -1632,12 +1646,23 @@ class Connection(object): self._router.broker.start_receive(stream) return stream + def _on_timer_expired(self): + self._fail_connection( + mitogen.core.TimeoutError( + 'Failed to setup connection after %.2f seconds', + self.options.connect_timeout, + ) + ) + def _async_connect(self): LOG.debug('creating connection to context %d using %s', self.context.context_id, self.__class__.__module__) mitogen.core.listen(self._router.broker, 'shutdown', self._on_broker_shutdown) - self._start_timer() + self._timer = self._router.broker.timers.schedule( + when=self.options.connect_deadline, + func=self._on_timer_expired, + ) try: self.proc = self.start_child() @@ -2464,12 +2489,121 @@ class Router(mitogen.core.Router): return self.connect(u'ssh', **kwargs) -class Process(object): +class Reaper(object): + """ + Asynchronous logic for reaping :class:`Process` objects. This is necessary + to prevent uncontrolled buildup of zombie processes in long-lived parents + that will eventually reach an OS limit, preventing creation of new threads + and processes, and to log the exit status of the child in the case of an + error. + + To avoid modifying process-global state such as with + :func:`signal.set_wakeup_fd` or installing a :data:`signal.SIGCHLD` handler + that might interfere with the user's ability to use those facilities, + Reaper polls for exit with backoff using timers installed on an associated + :class:`Broker`. + + :param mitogen.core.Broker broker: + The :class:`Broker` on which to install timers + :param Process proc: + The process to reap. + :param bool kill: + If :data:`True`, send ``SIGTERM`` and ``SIGKILL`` to the process. + :param bool wait_on_shutdown: + If :data:`True`, delay :class:`Broker` shutdown if child has not yet + exited. If :data:`False` simply forget the child. """ - Process objects contain asynchronous logic for reaping children, and - keeping track of their stdio descriptors. + #: :class:`Timer` that invokes :meth:`reap` after some polling delay. + _timer = None + + def __init__(self, broker, proc, kill, wait_on_shutdown): + self.broker = broker + self.proc = proc + self.kill = kill + self.wait_on_shutdown = wait_on_shutdown + self._tries = 0 + + def _signal_child(self, signum): + # For processes like sudo we cannot actually send sudo a signal, + # because it is setuid, so this is best-effort only. + LOG.debug('%r: sending %s', self.proc, SIGNAL_BY_NUM[signum]) + try: + os.kill(self.proc.pid, signum) + except OSError: + e = sys.exc_info()[1] + if e.args[0] != errno.EPERM: + raise - This base class is extended by :class:`PopenProcess` and + def _calc_delay(self, count): + """ + Calculate a poll delay given `count` attempts have already been made. + These constants have no principle, they just produce rapid but still + relatively conservative retries. + """ + delay = 0.05 + for _ in xrange(count): + delay *= 1.72 + return delay + + def _on_broker_shutdown(self): + """ + Respond to :class:`Broker` shutdown by cancelling the reap timer if + :attr:`Router.await_children_at_shutdown` is disabled. Otherwise + shutdown is delayed for up to :attr:`Broker.shutdown_timeout` for + subprocesses may have no intention of exiting any time soon. + """ + if not self.wait_on_shutdown: + self._timer.cancel() + + def _install_timer(self, delay): + new = self._timer is None + self._timer = self.broker.timers.schedule( + when=time.time() + delay, + func=self.reap, + ) + if new: + mitogen.core.listen(self.broker, 'shutdown', + self._on_broker_shutdown) + + def _remove_timer(self): + if self._timer and self._timer.active: + self._timer.cancel() + mitogen.core.unlisten(self.broker, 'shutdown', + self._on_broker_shutdown) + + def reap(self): + """ + Reap the child process during disconnection. + """ + status = self.proc.poll() + if status is not None: + LOG.debug('%r: %s', self.proc, returncode_to_str(status)) + self._remove_timer() + return + + self._tries += 1 + if self._tries > 20: + LOG.warning('%r: child will not exit, giving up', self) + self._remove_timer() + return + + delay = self._calc_delay(self._tries - 1) + LOG.debug('%r still running after IO disconnect, recheck in %.03fs', + self.proc, delay) + self._install_timer(delay) + + if not self.kill: + pass + elif self._tries == 1: + self._signal_child(signal.SIGTERM) + elif self._tries == 5: # roughly 4 seconds + self._signal_child(signal.SIGKILL) + + +class Process(object): + """ + Process objects provide a uniform interface to the :mod:`subprocess` and + :mod:`mitogen.fork`. This class is extended by :class:`PopenProcess` and :class:`mitogen.fork.Process`. :param int pid: @@ -2481,16 +2615,19 @@ class Process(object): :param file stderr: File object attached to standard error, or :data:`None`. """ - _delays = [0.05, 0.15, 0.3, 1.0, 5.0, 10.0] + #: Name of the process used in logs. Set to the stream/context name by + #: :class:`Connection`. name = None def __init__(self, pid, stdin, stdout, stderr=None): + #: The process ID. self.pid = pid + #: File object attached to standard input. self.stdin = stdin + #: File object attached to standard output. self.stdout = stdout + #: File object attached to standard error. self.stderr = stderr - self._returncode = None - self._reap_count = 0 def __repr__(self): return '%s %s pid %d' % ( @@ -2510,56 +2647,6 @@ class Process(object): """ raise NotImplementedError() - def _signal_child(self, signum): - # For processes like sudo we cannot actually send sudo a signal, - # because it is setuid, so this is best-effort only. - LOG.debug('%r: child process still alive, sending %s', - self, SIGNAL_BY_NUM[signum]) - try: - os.kill(self.pid, signum) - except OSError: - e = sys.exc_info()[1] - if e.args[0] != errno.EPERM: - raise - - def _async_reap(self, conn, router): - """ - Reap the child process during disconnection. - """ - if self._returncode is not None: - # on_disconnect() may be invoked more than once, for example, if - # there is still a pending message to be sent after the first - # on_disconnect() call. - return - - if conn.detached and conn.child_is_immediate_subprocess: - LOG.debug('%r: immediate child is detached, won\'t reap it', self) - return - - if router.profiling: - LOG.info('%r: wont kill child because profiling=True', self) - return - - self._reap_count += 1 - status = self.poll() - if status is not None: - LOG.debug('%r: %s', self, returncode_to_str(status)) - return - - i = self._reap_count - 1 - if i >= len(self._delays): - LOG.warning('%r: child will not die, abandoning it', self) - return - elif i == 0: - self._signal_child(signal.SIGTERM) - elif i == 1: - self._signal_child(signal.SIGKILL) - - router.broker.timers.schedule( - when=time.time() + self._delays[i], - func=lambda: self._async_reap(conn, router), - ) - class PopenProcess(Process): """ diff --git a/tests/connection_test.py b/tests/connection_test.py index a66428e4..619594d9 100644 --- a/tests/connection_test.py +++ b/tests/connection_test.py @@ -1,9 +1,10 @@ -import time -import tempfile -import sys import os +import signal +import sys +import tempfile import threading +import time import unittest2 import testlib @@ -44,5 +45,33 @@ class ConnectionTest(testlib.RouterMixin, testlib.TestCase): self.assertEquals(mitogen.parent.BROKER_SHUTDOWN_MSG, exc.args[0]) +@mitogen.core.takes_econtext +def do_detach(econtext): + econtext.detach() + while 1: + time.sleep(1) + logging.getLogger('mitogen').error('hi') + + +class DetachReapTest(testlib.RouterMixin, testlib.TestCase): + def test_subprocess_preserved_on_shutdown(self): + c1 = self.router.local() + pid = c1.call(os.getpid) + + l = mitogen.core.Latch() + mitogen.core.listen(c1, 'disconnect', l.put) + c1.call_no_reply(do_detach) + l.get() + + self.broker.shutdown() + self.broker.join() + + os.kill(pid, 0) # succeeds if process still alive + + # now clean up + os.kill(pid, signal.SIGTERM) + os.waitpid(pid, 0) + + if __name__ == '__main__': unittest2.main() diff --git a/tests/testlib.py b/tests/testlib.py index e26c6544..f856bfc1 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -192,7 +192,7 @@ def sync_with_broker(broker, timeout=10.0): """ sem = mitogen.core.Latch() broker.defer(sem.put, None) - sem.get(timeout=10.0) + sem.get(timeout=timeout) def log_fd_calls(): From f304ab8deca9b3d881be3ae2bdec88ef44793b13 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 7 Aug 2019 20:51:25 +0100 Subject: [PATCH 277/383] core: split preserve_tty_fp() out into a function --- mitogen/core.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 9a863078..d10a90e3 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3680,13 +3680,15 @@ class ExternalContext(object): os.dup2(fd, stdfd) os.close(fd) - def _setup_stdio(self): - # #481: when stderr is a TTY due to being started via - # tty_create_child()/hybrid_tty_create_child(), and some privilege - # escalation tool like prehistoric versions of sudo exec this process - # over the top of itself, there is nothing left to keep the slave PTY - # open after we replace our stdio. Therefore if stderr is a TTY, keep - # around a permanent dup() to avoid receiving SIGHUP. + def _preserve_tty_fp(self): + """ + #481: when stderr is a TTY due to being started via tty_create_child() + or hybrid_tty_create_child(), and some privilege escalation tool like + prehistoric versions of sudo exec this process over the top of itself, + there is nothing left to keep the slave PTY open after we replace our + stdio. Therefore if stderr is a TTY, keep around a permanent dup() to + avoid receiving SIGHUP. + """ try: if os.isatty(2): self.reserve_tty_fp = os.fdopen(os.dup(2), 'r+b', 0) @@ -3694,6 +3696,8 @@ class ExternalContext(object): except OSError: pass + def _setup_stdio(self): + self._preserve_tty_fp() # When sys.stdout was opened by the runtime, overwriting it will not # close FD 1. However when forking from a child that previously used # fdopen(), overwriting it /will/ close FD 1. So we must swallow the From 70deb34bce0f784a635bf13cc0a033981287c6cb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 7 Aug 2019 20:51:42 +0100 Subject: [PATCH 278/383] [stream-refactor] stop leaking FD 100 for the life of the child This prevents successful detachment since [stream-refactor] landed --- mitogen/core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index d10a90e3..76ece87c 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3594,7 +3594,10 @@ class ExternalContext(object): else: self.parent = Context(self.router, parent_id, 'parent') - in_fp = os.fdopen(os.dup(self.config.get('in_fd', 100)), 'rb', 0) + in_fd = self.config.get('in_fd', 100) + in_fp = os.fdopen(os.dup(in_fd), 'rb', 0) + os.close(in_fd) + out_fp = os.fdopen(os.dup(self.config.get('out_fd', 1)), 'wb', 0) self.stream = MitogenProtocol.build_stream(self.router, parent_id) self.stream.accept(in_fp, out_fp) From c0d87c0aa18478292f0fd5b97382d68d73399965 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 7 Aug 2019 20:54:03 +0100 Subject: [PATCH 279/383] testlib: use lsof +E for much clearer leaked FD output --- tests/testlib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testlib.py b/tests/testlib.py index f856bfc1..3d641892 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -338,7 +338,7 @@ class TestCase(unittest2.TestCase): def _teardown_check_fds(self): mitogen.core.Latch._on_fork() if get_fd_count() != self._fd_count_before: - import os; os.system('lsof -w -p %s' % (os.getpid(),)) + import os; os.system('lsof +E -w -p %s' % (os.getpid(),)) assert 0, "%s leaked FDs. Count before: %s, after: %s" % ( self, self._fd_count_before, get_fd_count(), ) From b33b29af3300bba11cbcf9449e8d92462bfac258 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 01:53:21 +0100 Subject: [PATCH 280/383] core: remove dead Router.on_shutdown() and Router "shutdown" signal Its functionality was duplicated by _on_broker_exit() somewhere along the way, and nothing has referred to it in a long time. I have no idea how this happened. Merge its docstring into _on_broker_exit() and delete it, remove the Router "shutdown" signal after confirming it has no users, and move all the Router-originated error messages together in a block at the top of the class. Already covered by router_test.AddHandlerTest.test_dead_message_sent_at_shutdown --- docs/signals.rst | 4 ---- mitogen/core.py | 42 +++++++++++++++++------------------------- 2 files changed, 17 insertions(+), 29 deletions(-) diff --git a/docs/signals.rst b/docs/signals.rst index 9447e529..7ec0fdac 100644 --- a/docs/signals.rst +++ b/docs/signals.rst @@ -47,10 +47,6 @@ These signals are used internally by Mitogen. - ``disconnect`` - Fired on the Broker thread during shutdown (???) - * - :py:class:`mitogen.core.Router` - - ``shutdown`` - - Fired on the Broker thread after Broker.shutdown() is called. - * - :py:class:`mitogen.core.Broker` - ``shutdown`` - Fired after Broker.shutdown() is called. diff --git a/mitogen/core.py b/mitogen/core.py index 76ece87c..1fb222c6 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2759,6 +2759,18 @@ class Router(object): #: parameter. unidirectional = False + duplicate_handle_msg = 'cannot register a handle that already exists' + refused_msg = 'refused by policy' + invalid_handle_msg = 'invalid handle' + too_large_msg = 'message too large (max %d bytes)' + respondent_disconnect_msg = 'the respondent Context has disconnected' + broker_exit_msg = 'Broker has exitted' + no_route_msg = 'no route to %r, my ID is %r' + unidirectional_msg = ( + 'routing mode prevents forward of message from context %d via ' + 'context %d' + ) + def __init__(self, broker): self.broker = broker listen(broker, 'exit', self._on_broker_exit) @@ -2826,9 +2838,12 @@ class Router(object): for context in notify: context.on_disconnect() - broker_exit_msg = 'Broker has exitted' - def _on_broker_exit(self): + """ + Called prior to broker exit, informs callbacks registered with + :meth:`add_handler` the connection is dead. + """ + _v and LOG.debug('%r: broker has exitted', self) while self._handle_map: _, (_, func, _, _) = self._handle_map.popitem() func(Message.dead(self.broker_exit_msg)) @@ -3006,35 +3021,12 @@ class Router(object): return handle - duplicate_handle_msg = 'cannot register a handle that already exists' - refused_msg = 'refused by policy' - invalid_handle_msg = 'invalid handle' - too_large_msg = 'message too large (max %d bytes)' - respondent_disconnect_msg = 'the respondent Context has disconnected' - broker_shutdown_msg = 'Broker is shutting down' - no_route_msg = 'no route to %r, my ID is %r' - unidirectional_msg = ( - 'routing mode prevents forward of message from context %d via ' - 'context %d' - ) - def _on_respondent_disconnect(self, context): for handle in self._handles_by_respondent.pop(context, ()): _, fn, _, _ = self._handle_map[handle] fn(Message.dead(self.respondent_disconnect_msg)) del self._handle_map[handle] - def on_shutdown(self, broker): - """ - Called during :meth:`Broker.shutdown`, informs callbacks registered - with :meth:`add_handler` the connection is dead. - """ - _v and LOG.debug('%r: shutting down', self, broker) - fire(self, 'shutdown') - for handle, (persist, fn) in self._handle_map.iteritems(): - _v and LOG.debug('%r.on_shutdown(): killing %r: %r', self, handle, fn) - fn(Message.dead(self.broker_shutdown_msg)) - def _maybe_send_dead(self, msg, reason, *args): if args: reason %= args From 95b067a11469963ea279d11d3ead7ee0b38556f9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 7 Aug 2019 21:56:38 +0000 Subject: [PATCH 281/383] parent: docstring fixes --- mitogen/parent.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 5a525c15..235290fb 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -303,6 +303,9 @@ def create_best_pipe(escalates_privilege=False): If SELinux is detected, fall back to using pipes. + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. :returns: `(parent_rfp, child_wfp, child_rfp, parent_wfp)` """ @@ -344,7 +347,7 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, :param bool merge_stdio: If :data:`True`, arrange for `stderr` to be connected to the `stdout` socketpair, rather than inherited from the parent process. This may be - necessary to ensure that not TTY is connected to any stdio handle, for + necessary to ensure that no TTY is connected to any stdio handle, for instance when using LXC. :param bool stderr_pipe: If :data:`True` and `merge_stdio` is :data:`False`, arrange for @@ -526,6 +529,15 @@ def hybrid_tty_create_child(args, escalates_privilege=False): like :func:`create_child`, but leave stderr and the controlling TTY attached to a TTY. + This permits high throughput communication with programs that are reached + via some program that requires a TTY for password input, like many + configurations of sudo. The UNIX TTY layer tends to have tiny (no more than + 14KiB) buffers, forcing many IO loop iterations when transferring bulk + data, causing significant performance loss. + + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. :param list args: Program argument vector. :returns: @@ -765,7 +777,7 @@ def get_connection_class(name): def _proxy_connect(name, method_name, kwargs, econtext): """ Implements the target portion of Router._proxy_connect() by upgrading the - local context to a parent if it was not already, then calling back into + local process to a parent if it was not already, then calling back into Router._connect() using the arguments passed to the parent's Router.connect(). @@ -1146,7 +1158,6 @@ class RegexProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): falling back to :meth:`on_unrecognized_line_received` and :meth:`on_unrecognized_partial_line_received`. """ - #: A sequence of 2-tuples of the form `(compiled pattern, method)` for #: patterns that should be matched against complete (delimited) messages, #: i.e. full lines. From 49a6446af8777afb7da0bf5bbd20556085509f97 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 10:45:18 +0000 Subject: [PATCH 282/383] core/select: add {Select,Latch,Receiver}.size(), deprecate empty() Knowing an estimate of the buffered items is needed for adding a latch/receiver with many existing buffered items via Select.add(). --- mitogen/core.py | 64 +++++++++++++++++++++++++++++++++++------------ mitogen/select.py | 26 ++++++++++++------- 2 files changed, 65 insertions(+), 25 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 1fb222c6..9ef3bb02 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1095,13 +1095,32 @@ class Receiver(object): self.handle = None self._latch.close() + def size(self): + """ + Return the number of items currently buffered. + + As with :class:`Queue.Queue`, `0` may be returned even though a + subsequent call to :meth:`get` will succeed, since a message may be + posted at any moment between :meth:`size` and :meth:`get`. + + As with :class:`Queue.Queue`, `>0` may be returned even though a + subsequent call to :meth:`get` will block, since another waiting thread + may be woken at any moment between :meth:`size` and :meth:`get`. + + :raises LatchError: + The underlying latch has already been marked closed. + """ + return self._latch.size() + def empty(self): """ - Return :data:`True` if calling :meth:`get` would block. + Return `size() == 0`. + + .. deprecated:: 0.2.8 + Use :meth:`size` instead. - As with :class:`Queue.Queue`, :data:`True` may be returned even though - a subsequent call to :meth:`get` will succeed, since a message may be - posted at any moment between :meth:`empty` and :meth:`get`. + :raises LatchError: + The latch has already been marked closed. """ return self._latch.empty() @@ -1150,7 +1169,10 @@ class Channel(Sender, Receiver): A channel inherits from :class:`mitogen.core.Sender` and `mitogen.core.Receiver` to provide bidirectional functionality. - This class is incomplete and obsolete, it will be removed in Mitogen 0.3. + .. deprecated:: 0.2.0 + This class is incomplete and obsolete, it will be removed in Mitogen + 0.3. + Channels were an early attempt at syntax sugar. It is always easier to pass around unidirectional pairs of senders/receivers, even though the syntax is baroque: @@ -2385,19 +2407,17 @@ class Latch(object): finally: self._lock.release() - def empty(self): + def size(self): """ - Return :data:`True` if calling :meth:`get` would block. + Return the number of items currently buffered. - As with :class:`Queue.Queue`, :data:`True` may be returned even - though a subsequent call to :meth:`get` will succeed, since a - message may be posted at any moment between :meth:`empty` and - :meth:`get`. + As with :class:`Queue.Queue`, `0` may be returned even though a + subsequent call to :meth:`get` will succeed, since a message may be + posted at any moment between :meth:`size` and :meth:`get`. - As with :class:`Queue.Queue`, :data:`False` may be returned even - though a subsequent call to :meth:`get` will block, since another - waiting thread may be woken at any moment between :meth:`empty` and - :meth:`get`. + As with :class:`Queue.Queue`, `>0` may be returned even though a + subsequent call to :meth:`get` will block, since another waiting thread + may be woken at any moment between :meth:`size` and :meth:`get`. :raises LatchError: The latch has already been marked closed. @@ -2406,10 +2426,22 @@ class Latch(object): try: if self.closed: raise LatchError() - return len(self._queue) == 0 + return len(self._queue) finally: self._lock.release() + def empty(self): + """ + Return `size() == 0`. + + .. deprecated:: 0.2.8 + Use :meth:`size` instead. + + :raises LatchError: + The latch has already been marked closed. + """ + return self.size() == 0 + def _get_socketpair(self): """ Return an unused socketpair, creating one if none exist. diff --git a/mitogen/select.py b/mitogen/select.py index ca3c32bc..f03fdae1 100644 --- a/mitogen/select.py +++ b/mitogen/select.py @@ -259,18 +259,26 @@ class Select(object): self.remove(recv) self._latch.close() - def empty(self): + def size(self): + """ + Return the number of items currently buffered. + + As with :class:`Queue.Queue`, `0` may be returned even though a + subsequent call to :meth:`get` will succeed, since a message may be + posted at any moment between :meth:`size` and :meth:`get`. + + As with :class:`Queue.Queue`, `>0` may be returned even though a + subsequent call to :meth:`get` will block, since another waiting thread + may be woken at any moment between :meth:`size` and :meth:`get`. """ - Return :data:`True` if calling :meth:`get` would block. + return sum(recv.size() for recv in self._receivers) - As with :class:`Queue.Queue`, :data:`True` may be returned even though - a subsequent call to :meth:`get` will succeed, since a message may be - posted at any moment between :meth:`empty` and :meth:`get`. + def empty(self): + """ + Return `size() == 0`. - :meth:`empty` may return :data:`False` even when :meth:`get` would - block if another thread has drained a receiver added to this select. - This can be avoided by only consuming each receiver from a single - thread. + .. deprecated:: 0.2.8 + Use :meth:`size` instead. """ return self._latch.empty() From ecc570cbdacdd4a8741d4f88444327161b47f152 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 10:47:04 +0000 Subject: [PATCH 283/383] select: make Select.add() handle multiple buffered items. Previously given something like: l = mitogen.core.Latch() l.put(1) l.put(2) s = mitogen.select.Select([l], oneshot=False) assert 1 == s.get(block=False) assert 2 == s.get(block=False) The second call would throw TimeoutError, because Select.add() only queued the receiver/latch once if it was non-empty, rather than once for each item as should happen. --- mitogen/select.py | 14 +++++++++++--- tests/select_test.py | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/mitogen/select.py b/mitogen/select.py index f03fdae1..f880fcc3 100644 --- a/mitogen/select.py +++ b/mitogen/select.py @@ -224,8 +224,15 @@ class Select(object): raise Error(self.owned_msg) recv.notify = self._put - # Avoid race by polling once after installation. - if not recv.empty(): + # After installing the notify function, _put() will potentially begin + # receiving calls from other threads immediately, but not for items + # they already had buffered. For those we call _put(), possibly + # duplicating the effect of other _put() being made concurrently, such + # that the Select ends up with more items in its buffer than exist in + # the underlying receivers. We handle the possibility of receivers + # marked notified yet empty inside Select.get(), so this should be + # robust. + for _ in range(recv.size()): self._put(recv) not_present_msg = 'Instance is not a member of this Select' @@ -335,5 +342,6 @@ class Select(object): # A receiver may have been queued with no result if another # thread drained it before we woke up, or because another # thread drained it between add() calling recv.empty() and - # self._put(). In this case just sleep again. + # self._put(), or because Select.add() caused duplicate _put() + # calls. In this case simply retry. continue diff --git a/tests/select_test.py b/tests/select_test.py index f08c9f3a..56e7e6cd 100644 --- a/tests/select_test.py +++ b/tests/select_test.py @@ -358,6 +358,18 @@ class GetReceiverTest(testlib.RouterMixin, testlib.TestCase): msg = select.get() self.assertEquals('123', msg.unpickle()) + def test_nonempty_multiple_items_before_add(self): + recv = mitogen.core.Receiver(self.router) + recv._on_receive(mitogen.core.Message.pickled('123')) + recv._on_receive(mitogen.core.Message.pickled('234')) + select = self.klass([recv], oneshot=False) + msg = select.get() + self.assertEquals('123', msg.unpickle()) + msg = select.get() + self.assertEquals('234', msg.unpickle()) + self.assertRaises(mitogen.core.TimeoutError, + lambda: select.get(block=False)) + def test_nonempty_after_add(self): recv = mitogen.core.Receiver(self.router) select = self.klass([recv]) @@ -415,6 +427,16 @@ class GetLatchTest(testlib.RouterMixin, testlib.TestCase): select = self.klass([latch]) self.assertEquals(123, select.get()) + def test_nonempty_multiple_items_before_add(self): + latch = mitogen.core.Latch() + latch.put(123) + latch.put(234) + select = self.klass([latch], oneshot=False) + self.assertEquals(123, select.get()) + self.assertEquals(234, select.get()) + self.assertRaises(mitogen.core.TimeoutError, + lambda: select.get(block=False)) + def test_nonempty_after_add(self): latch = mitogen.core.Latch() select = self.klass([latch]) From 50b2d590fd1eb95a8b01450da23b549964aa34bb Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 10:55:00 +0000 Subject: [PATCH 284/383] docs: update Changelog. --- .ci/ci_lib.py | 1 + docs/changelog.rst | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/.ci/ci_lib.py b/.ci/ci_lib.py index 5a03b906..34a6faba 100644 --- a/.ci/ci_lib.py +++ b/.ci/ci_lib.py @@ -224,6 +224,7 @@ def start_containers(containers): "docker rm -f %(name)s || true" % container, "docker run " "--rm " + "--cpuset-cpus 0,1 " "--detach " "--privileged " "--cap-add=SYS_PTRACE " diff --git a/docs/changelog.rst b/docs/changelog.rst index 1bdb1f6d..3861e501 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -165,6 +165,18 @@ Core Library buffer management when logging lines received from a child's redirected standard IO. +* `49a6446a `_: the + :meth:`empty` method of :class:`mitogen.core.Latch`, + :class:`mitogen.core.Receiver` and :class:`mitogen.select.Select` has been + replaced by a more general :meth:`size` method. :meth:`empty` will be removed + in Mitogen 0.3 + +* `ecc570cb `_: previously + :meth:`mitogen.select.Select.add` would enqueue a single wake event when + adding an existing receiver, latch or subselect that contained multiple + buffered items, causing future :meth:`get` calls to block or fail even though + data existed that could be returned. + Thanks! ~~~~~~~ From 769a8b2015bafb31cb4f80e3e7e9cf5e5ee3450c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 11:14:35 +0000 Subject: [PATCH 285/383] issue #547: core/service: race/deadlock-free service pool init The previous method of spinning up a transient thread to import the service pool in a child context could deadlock with use of the importer on the main thread. Therefore wake the main thread to handle import for us, and use a regular Receiver to buffer messages to the stub, which is inherited rather than replaced by the real service pool. --- mitogen/core.py | 89 +++++++++++++++++++++++++++------------------- mitogen/service.py | 24 ++++++++++--- 2 files changed, 72 insertions(+), 41 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 9ef3bb02..c09958dc 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -122,6 +122,7 @@ LOAD_MODULE = 107 FORWARD_MODULE = 108 DETACHING = 109 CALL_SERVICE = 110 +STUB_CALL_SERVICE = 111 #: Special value used to signal disconnection or the inability to route a #: message, when it appears in the `reply_to` field. Usually causes @@ -3432,9 +3433,22 @@ class Dispatcher(object): self.econtext = econtext #: Chain ID -> CallError if prior call failed. self._error_by_chain_id = {} - self.recv = Receiver(router=econtext.router, - handle=CALL_FUNCTION, - policy=has_parent_authority) + self.recv = Receiver( + router=econtext.router, + handle=CALL_FUNCTION, + policy=has_parent_authority, + ) + #: The :data:`CALL_SERVICE` :class:`Receiver` that will eventually be + #: reused by :class:`mitogen.service.Pool`, should it ever be loaded. + #: This is necessary for race-free reception of all service requests + #: delivered regardless of whether the stub or real service pool are + #: loaded. See #547 for related sorrows. + Dispatcher._service_recv = Receiver( + router=econtext.router, + handle=CALL_SERVICE, + policy=has_parent_authority, + ) + self._service_recv.notify = self._on_call_service listen(econtext.broker, 'shutdown', self.recv.close) @classmethod @@ -3475,8 +3489,44 @@ class Dispatcher(object): self._error_by_chain_id[chain_id] = e return chain_id, e + def _on_call_service(self, recv): + """ + Notifier for the :data:`CALL_SERVICE` receiver. This is called on the + :class:`Broker` thread for any service messages arriving at this + context, for as long as no real service pool implementation is loaded. + + In order to safely bootstrap the service pool implementation a sentinel + message is enqueued on the :data:`CALL_FUNCTION` receiver in order to + wake the main thread, where the importer can run without any + possibility of suffering deadlock due to concurrent uses of the + importer. + + Should the main thread be blocked indefinitely, preventing the import + from ever running, if it is blocked waiting on a service call, then it + means :mod:`mitogen.service` has already been imported and + :func:`mitogen.service.get_or_create_pool` has already run, meaning the + service pool is already active and the duplicate initialization was not + needed anyway. + + #547: This trickery is needed to avoid the alternate option of spinning + a temporary thread to import the service pool, which could deadlock if + a custom import hook executing on the main thread (under the importer + lock) would block waiting for some data that was in turn received by a + service. Main thread import lock can't be released until service is + running, service cannot satisfy request until import lock is released. + """ + self.recv._on_receive(Message(handle=STUB_CALL_SERVICE)) + + def _init_service_pool(self): + import mitogen.service + mitogen.service.get_or_create_pool(router=self.econtext.router) + def _dispatch_calls(self): for msg in self.recv: + if msg.handle == STUB_CALL_SERVICE: + self._init_service_pool() + continue + chain_id, ret = self._dispatch_one(msg) _v and LOG.debug('%r: %r -> %r', self, msg, ret) if msg.reply_to: @@ -3535,34 +3585,6 @@ class ExternalContext(object): if not self.config['profiling']: os.kill(os.getpid(), signal.SIGTERM) - #: On Python >3.4, the global importer lock has split into per-module - #: locks, so there is no guarantee the import statement in - #: service_stub_main will complete before a second thread attempting the - #: same import will see a partially initialized module. Therefore serialize - #: the stub explicitly. - service_stub_lock = threading.Lock() - - def _service_stub_main(self, msg): - self.service_stub_lock.acquire() - try: - import mitogen.service - pool = mitogen.service.get_or_create_pool(router=self.router) - pool._receiver._on_receive(msg) - finally: - self.service_stub_lock.release() - - def _on_call_service_msg(self, msg): - """ - Stub service handler. Start a thread to import the mitogen.service - implementation from, and deliver the message to the newly constructed - pool. This must be done as CALL_SERVICE for e.g. PushFileService may - race with a CALL_FUNCTION blocking the main thread waiting for a result - from that service. - """ - if not msg.is_dead: - th = threading.Thread(target=self._service_stub_main, args=(msg,)) - th.start() - def _on_shutdown_msg(self, msg): if not msg.is_dead: _v and LOG.debug('shutdown request from context %d', msg.src_id) @@ -3606,11 +3628,6 @@ class ExternalContext(object): handle=SHUTDOWN, policy=has_parent_authority, ) - self.router.add_handler( - fn=self._on_call_service_msg, - handle=CALL_SERVICE, - policy=has_parent_authority, - ) self.master = Context(self.router, 0, 'master') parent_id = self.config['parent_ids'][0] if parent_id == 0: diff --git a/mitogen/service.py b/mitogen/service.py index f58f882c..598de957 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -86,8 +86,13 @@ def get_or_create_pool(size=None, router=None): _pool_lock.acquire() try: if _pool_pid != my_pid: - _pool = Pool(router, [], size=size or DEFAULT_POOL_SIZE, - overwrite=True) + _pool = Pool( + router, + services=[], + size=size or DEFAULT_POOL_SIZE, + overwrite=True, + recv=mitogen.core.Dispatcher._service_recv, + ) # In case of Broker shutdown crash, Pool can cause 'zombie' # processes. mitogen.core.listen(router.broker, 'shutdown', @@ -475,22 +480,31 @@ class Pool(object): program's configuration or its input data. :param mitogen.core.Router router: - Router to listen for ``CALL_SERVICE`` messages on. + :class:`mitogen.core.Router` to listen for + :data:`mitogen.core.CALL_SERVICE` messages. :param list services: Initial list of services to register. + :param mitogen.core.Receiver recv: + :data:`mitogen.core.CALL_SERVICE` receiver to reuse. This is used by + :func:`get_or_create_pool` to hand off a queue of messages from the + Dispatcher stub handler while avoiding a race. """ activator_class = Activator - def __init__(self, router, services=(), size=1, overwrite=False): + def __init__(self, router, services=(), size=1, overwrite=False, + recv=None): self.router = router self._activator = self.activator_class() self._ipc_latch = mitogen.core.Latch() - self._receiver = mitogen.core.Receiver( + self._receiver = recv or mitogen.core.Receiver( router=router, handle=mitogen.core.CALL_SERVICE, overwrite=overwrite, ) + # If self._receiver was inherited from mitogen.core.Dispatcher, we must + # remove its stub notification function before adding it to our Select. + self._receiver.notify = None self._select = mitogen.select.Select(oneshot=False) self._select.add(self._receiver) self._select.add(self._ipc_latch) From 41d180495a2116a98655dce7b2279ed3edc97446 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 11:21:17 +0000 Subject: [PATCH 286/383] issue #547: update Changelog. --- docs/changelog.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 3861e501..707099a8 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -75,6 +75,12 @@ Mitogen for Ansible * `#467 `_: an incompatibility running Mitogen under Molecule was resolved. +* `#547 `_: fix a serious deadlock + possible during initialization of any task executed by forking, such as + ``async`` tasks, tasks using custom :mod:`ansible.module_utils`, + ``mitogen_task_isolation: fork`` modules, and those present on an internal + blacklist of misbehaving modules. + * `#549 `_: the open file descriptor limit for the Ansible process is increased to the available hard limit. It is common for distributions to ship with a much higher hard limit than their From cc02906d2a29819355aa4f3e2028f31388fa1135 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 11:52:17 +0000 Subject: [PATCH 287/383] issue #547: fix service_test failures. --- mitogen/core.py | 3 ++- mitogen/service.py | 15 +++++++++++---- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index c09958dc..8428a479 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3524,7 +3524,8 @@ class Dispatcher(object): def _dispatch_calls(self): for msg in self.recv: if msg.handle == STUB_CALL_SERVICE: - self._init_service_pool() + if msg.src_id == mitogen.context_id: + self._init_service_pool() continue chain_id, ret = self._dispatch_one(msg) diff --git a/mitogen/service.py b/mitogen/service.py index 598de957..77a1ae1b 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -496,15 +496,12 @@ class Pool(object): self.router = router self._activator = self.activator_class() self._ipc_latch = mitogen.core.Latch() - self._receiver = recv or mitogen.core.Receiver( + self._receiver = mitogen.core.Receiver( router=router, handle=mitogen.core.CALL_SERVICE, overwrite=overwrite, ) - # If self._receiver was inherited from mitogen.core.Dispatcher, we must - # remove its stub notification function before adding it to our Select. - self._receiver.notify = None self._select = mitogen.select.Select(oneshot=False) self._select.add(self._receiver) self._select.add(self._ipc_latch) @@ -516,6 +513,16 @@ class Pool(object): } self._invoker_by_name = {} + if recv is not None: + # When inheriting from mitogen.core.Dispatcher, we must remove its + # stub notification function before adding it to our Select. We + # always overwrite this receiver since the standard service.Pool + # handler policy differs from the one inherited from + # core.Dispatcher. + recv.notify = None + self._select.add(recv) + self._func_by_source[recv] = self._on_service_call + for service in services: self.add(service) self._py_24_25_compat() From 3231c62a66805bc3f06c6907d42e76a187e38f67 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 15:07:57 +0100 Subject: [PATCH 288/383] parent: fix get_log_level() for split out loggers. --- mitogen/parent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 235290fb..99d8d621 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -161,7 +161,7 @@ _core_source_partial = None def get_log_level(): - return (LOG.level or logging.getLogger().level or logging.INFO) + return (LOG.getEffectiveLevel() or logging.INFO) def get_sys_executable(): From e3dcce206922cd4357008af91ecec029745a3994 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 14:14:02 +0000 Subject: [PATCH 289/383] os_fork: do not attempt to cork the active thread. --- mitogen/os_fork.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/mitogen/os_fork.py b/mitogen/os_fork.py index b27cfd5c..da832c65 100644 --- a/mitogen/os_fork.py +++ b/mitogen/os_fork.py @@ -35,6 +35,7 @@ Support for operating in a mixed threading/forking environment. import os import socket import sys +import threading import weakref import mitogen.core @@ -157,6 +158,7 @@ class Corker(object): held. This will not return until each thread acknowledges it has ceased execution. """ + current = threading.currentThread() s = mitogen.core.b('CORK') * ((128 // 4) * 1024) self._rsocks = [] @@ -164,12 +166,14 @@ class Corker(object): # participation of a broker in order to complete. for pool in self.pools: if not pool.closed: - for x in range(pool.size): - self._cork_one(s, pool) + for th in pool._threads: + if th != current: + self._cork_one(s, pool) for broker in self.brokers: if broker._alive: - self._cork_one(s, broker) + if broker._thread != current: + self._cork_one(s, broker) # Pause until we can detect every thread has entered write(). for rsock in self._rsocks: From 5970b041e038e2d3610f10c7fe93f8419e64103e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 16:15:28 +0100 Subject: [PATCH 290/383] docs: merge signals.rst into internals.rst --- docs/internals.rst | 51 ++++++++++++++++++++++++++++++++++++++++- docs/signals.rst | 57 ---------------------------------------------- 2 files changed, 50 insertions(+), 58 deletions(-) delete mode 100644 docs/signals.rst diff --git a/docs/internals.rst b/docs/internals.rst index 71c6273d..1df1c2ad 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -273,4 +273,53 @@ Helpers Signals ======= -:ref:`Please refer to Signals `. +Mitogen contains a simplistic signal mechanism to decouple its components. When +a signal is fired by an instance of a class, functions registered to receive it +are called back. + +.. warning:: + + As signals execute on the Broker thread, and without exception handling, + they are generally unsafe for consumption by user code, as any bugs could + trigger crashes and hangs for which the broker is unable to forward logs, + or ensure the buggy context always shuts down on disconnect. + + +Functions +--------- + +.. currentmodule:: mitogen.core + +.. autofunction:: listen +.. autofunction:: unlisten +.. autofunction:: fire + + +List +---- + +These signals are used internally by Mitogen. + +.. list-table:: + :header-rows: 1 + :widths: auto + + * - Class + - Name + - Description + + * - :py:class:`mitogen.core.Stream` + - ``disconnect`` + - Fired on the Broker thread when disconnection is detected. + + * - :py:class:`mitogen.core.Context` + - ``disconnect`` + - Fired on the Broker thread during shutdown (???) + + * - :py:class:`mitogen.core.Broker` + - ``shutdown`` + - Fired after Broker.shutdown() is called. + + * - :py:class:`mitogen.core.Broker` + - ``exit`` + - Fired immediately prior to the broker thread exit. diff --git a/docs/signals.rst b/docs/signals.rst deleted file mode 100644 index 7ec0fdac..00000000 --- a/docs/signals.rst +++ /dev/null @@ -1,57 +0,0 @@ - -.. _signals: - -Signals -======= - -Mitogen contains a simplistic signal mechanism to help decouple its internal -components. When a signal is fired by a particular instance of a class, any -functions registered to receive it will be called back. - -.. warning:: - - As signals execute on the Broker thread, and without exception handling, - they are generally unsafe for consumption by user code, as any bugs could - trigger crashes and hangs for which the broker is unable to forward logs, - or ensure the buggy context always shuts down on disconnect. - - -Functions ---------- - -.. currentmodule:: mitogen.core - -.. autofunction:: listen -.. autofunction:: unlisten -.. autofunction:: fire - - -List ----- - -These signals are used internally by Mitogen. - -.. list-table:: - :header-rows: 1 - :widths: auto - - * - Class - - Name - - Description - - * - :py:class:`mitogen.core.Stream` - - ``disconnect`` - - Fired on the Broker thread when disconnection is detected. - - * - :py:class:`mitogen.core.Context` - - ``disconnect`` - - Fired on the Broker thread during shutdown (???) - - * - :py:class:`mitogen.core.Broker` - - ``shutdown`` - - Fired after Broker.shutdown() is called. - - * - :py:class:`mitogen.core.Broker` - - ``exit`` - - Fired immediately prior to the broker thread exit. - From 444b7d6d97d2108423ccaa5f1250fa2a6e33534d Mon Sep 17 00:00:00 2001 From: Marc Hartmayer Date: Thu, 8 Aug 2019 18:50:40 +0200 Subject: [PATCH 291/383] parent: use protocol for getting remote_id Fixes 8d1b01d8efba ("Refactor Stream, introduce quasi-asynchronous connect, much more"). --- mitogen/parent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 99d8d621..79b484c2 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2331,7 +2331,7 @@ class Router(mitogen.core.Router): directly connected. """ stream = self.stream_by_id(context) - if stream.remote_id != context.context_id: + if stream.protocol.remote_id != context.context_id: return l = mitogen.core.Latch() From 0a6c0cd8fb36792efdeeb0f1226389fa69f23ba4 Mon Sep 17 00:00:00 2001 From: Marc Hartmayer Date: Thu, 8 Aug 2019 18:50:40 +0200 Subject: [PATCH 292/383] pkgutil: fix Python3 compatibility Starting with Python3 the `as` clause must be used to associate a name to the exception being passed. --- mitogen/compat/pkgutil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/compat/pkgutil.py b/mitogen/compat/pkgutil.py index 28e2aead..df3983a2 100644 --- a/mitogen/compat/pkgutil.py +++ b/mitogen/compat/pkgutil.py @@ -542,7 +542,7 @@ def extend_path(path, name): if os.path.isfile(pkgfile): try: f = open(pkgfile) - except IOError, msg: + except IOError as msg: sys.stderr.write("Can't open %s: %s\n" % (pkgfile, msg)) else: From 2ed8395d6ce2adc6a252b68c310646707348f3a1 Mon Sep 17 00:00:00 2001 From: Marc Hartmayer Date: Thu, 8 Aug 2019 18:50:40 +0200 Subject: [PATCH 293/383] master: fix TypeError Add a guard for the case `path == None`. This commit fixes `TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType` --- mitogen/master.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/master.py b/mitogen/master.py index 48f82ab1..814f7019 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -462,7 +462,7 @@ class DefectivePython3xMainMethod(FinderMethod): return None path = getattr(mod, '__file__', None) - if not (os.path.exists(path) and _looks_like_script(path)): + if not (path is not None and os.path.exists(path) and _looks_like_script(path)): return None fp = open(path, 'rb') From 5ae6f921772a419f06547e4a7cd58f0b3ad355ae Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 18:17:42 +0000 Subject: [PATCH 294/383] issue #612: update Changelog. --- docs/changelog.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 707099a8..0622762b 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -167,6 +167,9 @@ Core Library * `#606 `_: fix example code on the documentation front page. +* `#612 `_: fix various errors + introduced by stream refactoring. + * `a5536c35 `_: avoid quadratic buffer management when logging lines received from a child's redirected standard IO. @@ -195,6 +198,7 @@ bug reports, testing, features and fixes in this release contributed by `Dave Cottlehuber `_, `El Mehdi CHAOUKI `_, `James Hogarth `_, +`Marc Hartmayer `_, `Nigel Metheringham `_, `Orion Poplawski `_, `Pieter Voet `_, From 436a4b3b3cfc6f901c66fe79b4f0828149a4312e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 20:08:51 +0000 Subject: [PATCH 295/383] docs: tidy up Select.all() --- mitogen/select.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mitogen/select.py b/mitogen/select.py index f880fcc3..3875042b 100644 --- a/mitogen/select.py +++ b/mitogen/select.py @@ -122,9 +122,10 @@ class Select(object): @classmethod def all(cls, receivers): """ - Take an iterable of receivers and retrieve a :class:`Message` from - each, returning the result of calling `msg.unpickle()` on each in turn. - Results are returned in the order they arrived. + Take an iterable of receivers and retrieve a :class:`Message + ` from each, returning the result of calling + :meth:`Message.unpickle() ` on each in + turn. Results are returned in the order they arrived. This is sugar for handling batch :meth:`Context.call_async ` invocations: From 5924af1566763e48c42028399ea0cd95c457b3dc Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 23:03:43 +0100 Subject: [PATCH 296/383] [security] core: undirectional routing wasn't respected in some cases When creating a context using Router.method(via=somechild), unidirectional mode was set on the new child correctly, however if the child were to call Router.method(), due to a typing mistake the new child would start without it. This doesn't impact the Ansible extension, as only forked tasks are started directly by children, and they are not responsible for routing messages. Add test so it can't happen again. --- mitogen/core.py | 2 +- tests/router_test.py | 48 ++++++++++++++++++++++++++++++++------------ 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 8428a479..c228079d 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3623,7 +3623,7 @@ class ExternalContext(object): self.broker = Broker(activate_compat=False) self.router = Router(self.broker) self.router.debug = self.config.get('debug', False) - self.router.undirectional = self.config['unidirectional'] + self.router.unidirectional = self.config['unidirectional'] self.router.add_handler( fn=self._on_shutdown_msg, handle=SHUTDOWN, diff --git a/tests/router_test.py b/tests/router_test.py index 1bd6c26a..2b4a9d78 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -1,9 +1,11 @@ +import sys import time import zlib import unittest2 import testlib +import mitogen.core import mitogen.master import mitogen.parent import mitogen.utils @@ -341,22 +343,42 @@ class NoRouteTest(testlib.RouterMixin, testlib.TestCase): )) +def test_siblings_cant_talk(router): + l1 = router.local() + l2 = router.local() + logs = testlib.LogCapturer() + logs.start() + + try: + l2.call(ping_context, l1) + except mitogen.core.CallError: + e = sys.exc_info()[1] + + msg = mitogen.core.Router.unidirectional_msg % ( + l2.context_id, + l1.context_id, + ) + assert msg in str(e) + assert 'routing mode prevents forward of ' in logs.stop() + + +@mitogen.core.takes_econtext +def test_siblings_cant_talk_remote(econtext): + mitogen.parent.upgrade_router(econtext) + test_siblings_cant_talk(econtext.router) + + class UnidirectionalTest(testlib.RouterMixin, testlib.TestCase): - def test_siblings_cant_talk(self): + def test_siblings_cant_talk_master(self): self.router.unidirectional = True - l1 = self.router.local() - l2 = self.router.local() - logs = testlib.LogCapturer() - logs.start() - e = self.assertRaises(mitogen.core.CallError, - lambda: l2.call(ping_context, l1)) + test_siblings_cant_talk(self.router) - msg = self.router.unidirectional_msg % ( - l2.context_id, - l1.context_id, - ) - self.assertTrue(msg in str(e)) - self.assertTrue('routing mode prevents forward of ' in logs.stop()) + def test_siblings_cant_talk_parent(self): + # ensure 'unidirectional' attribute is respected for contexts started + # by children. + self.router.unidirectional = True + parent = self.router.local() + parent.call(test_siblings_cant_talk_remote) def test_auth_id_can_talk(self): self.router.unidirectional = True From f0138072f10f89694b54f2833f7e5450d41393b3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 8 Aug 2019 23:09:41 +0100 Subject: [PATCH 297/383] docs: update Changelog. --- docs/changelog.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 0622762b..8f63aa74 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -186,6 +186,16 @@ Core Library buffered items, causing future :meth:`get` calls to block or fail even though data existed that could be returned. +* `5924af15 `_: *[security]* the + unidirectional routing mode, in which contexts may only communicate with + their parents and never siblings (such that a program cannot be used as a + bridge for air-gapped networks) was not inherited when a new child context + was initiated directly from an existing child. + + The bug did not effect the Ansible extension since the top-level controller + process initiates any new context that could be used for routing. Only + forked tasks are started directly from children. + Thanks! ~~~~~~~ From 6fa69955c46d70bfb3c97ec8a3aa346763da7028 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 04:42:23 +0100 Subject: [PATCH 298/383] issue #586: update Changelog. --- docs/changelog.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 8f63aa74..08d581e1 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -164,6 +164,10 @@ Core Library `closed` flag, preventing historical bugs where a double close could destroy descriptors belonging to unrelated streams. +* `#586 `_: fix import of the + `__main__` module on later versions of Python 3 when running from the + interactive console. + * `#606 `_: fix example code on the documentation front page. @@ -206,6 +210,7 @@ bug reports, testing, features and fixes in this release contributed by `Anton Markelov `_, `Dan `_, `Dave Cottlehuber `_, +`Denis Krienbühl `_, `El Mehdi CHAOUKI `_, `James Hogarth `_, `Marc Hartmayer `_, From e352b9e5fdeb67de0449060d1b5afad1f9cb4f19 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 04:48:24 +0100 Subject: [PATCH 299/383] docs: update Changelog. --- docs/changelog.rst | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 08d581e1..26b2c316 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -164,8 +164,8 @@ Core Library `closed` flag, preventing historical bugs where a double close could destroy descriptors belonging to unrelated streams. -* `#586 `_: fix import of the - `__main__` module on later versions of Python 3 when running from the +* `#586 `_: fix import of + :mod:`__main__` on later versions of Python 3 when running from the interactive console. * `#606 `_: fix example code on the @@ -192,13 +192,11 @@ Core Library * `5924af15 `_: *[security]* the unidirectional routing mode, in which contexts may only communicate with - their parents and never siblings (such that a program cannot be used as a - bridge for air-gapped networks) was not inherited when a new child context - was initiated directly from an existing child. - - The bug did not effect the Ansible extension since the top-level controller - process initiates any new context that could be used for routing. Only - forked tasks are started directly from children. + parents and never siblings (so a program cannot accidentally bridge + air-gapped networks) was not inherited when a child context was initiated + directly from an existing child. This did not effect the Ansible extension, + since the controller initiates any new context used for routing, only forked + tasks are initiated by children. Thanks! From 83a86a2ce15e1737f508db746b20d6349de8cad0 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 14:58:03 +0100 Subject: [PATCH 300/383] issue #482: tests: fail DockerMixin tests if stray processes exist --- tests/testlib.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/testlib.py b/tests/testlib.py index 3d641892..672d677b 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -454,6 +454,22 @@ class DockerizedSshDaemon(object): def wait_for_sshd(self): wait_for_port(self.get_host(), self.port, pattern='OpenSSH') + def check_processes(self): + args = ['docker', 'exec', self.container_name, 'ps', '-o', 'comm='] + counts = {} + for comm in subprocess__check_output(args).splitlines(): + comm = comm.strip() + counts[comm] = counts.get(comm, 0) + 1 + + if counts != {'ps': 1, 'sshd': 1}: + assert 0, ( + 'Docker container %r contained extra running processes ' + 'after test completed: %r' % ( + self.container_name, + counts + ) + ) + def close(self): args = ['docker', 'rm', '-f', self.container_name] subprocess__check_output(args) @@ -501,6 +517,7 @@ class DockerMixin(RouterMixin): @classmethod def tearDownClass(cls): + cls.dockerized_ssh.check_processes() cls.dockerized_ssh.close() super(DockerMixin, cls).tearDownClass() From 2ee0e070374982dc13b0bd3a759ec35a102e629d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 17:10:04 +0100 Subject: [PATCH 301/383] core: MitogenProtocol.is_privileged was not set in children Follow the previous unidirectional routing fix, now errors are occurring where they should not. --- mitogen/core.py | 35 ++++++++++++++++++++++++++--------- mitogen/unix.py | 3 +-- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index c228079d..a14286f9 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1937,18 +1937,28 @@ class MitogenProtocol(Protocol): :class:`Protocol` implementing mitogen's :ref:`stream protocol `. """ - #: If not :data:`None`, :class:`Router` stamps this into - #: :attr:`Message.auth_id` of every message received on this stream. - auth_id = None - #: If not :data:`False`, indicates the stream has :attr:`auth_id` set and #: its value is the same as :data:`mitogen.context_id` or appears in #: :data:`mitogen.parent_ids`. is_privileged = False - def __init__(self, router, remote_id): + def __init__(self, router, remote_id, auth_id=None, + local_id=None, parent_ids=None): self._router = router self.remote_id = remote_id + #: If not :data:`None`, :class:`Router` stamps this into + #: :attr:`Message.auth_id` of every message received on this stream. + self.auth_id = auth_id + + if parent_ids is None: + parent_ids = mitogen.parent_ids + if local_id is None: + local_id = mitogen.context_id + + self.is_privileged = ( + (remote_id in parent_ids) or + auth_id in ([local_id] + parent_ids) + ) self.sent_modules = set(['mitogen', 'mitogen.core']) self._input_buf = collections.deque() self._input_buf_len = 0 @@ -2800,8 +2810,8 @@ class Router(object): broker_exit_msg = 'Broker has exitted' no_route_msg = 'no route to %r, my ID is %r' unidirectional_msg = ( - 'routing mode prevents forward of message from context %d via ' - 'context %d' + 'routing mode prevents forward of message from context %d to ' + 'context %d via context %d' ) def __init__(self, broker): @@ -3152,7 +3162,9 @@ class Router(object): (in_stream.protocol.is_privileged or out_stream.protocol.is_privileged): self._maybe_send_dead(msg, self.unidirectional_msg, - in_stream.protocol.remote_id, out_stream.protocol.remote_id) + in_stream.protocol.remote_id, + out_stream.protocol.remote_id, + mitogen.context_id) return out_stream.protocol._send(msg) @@ -3641,7 +3653,12 @@ class ExternalContext(object): os.close(in_fd) out_fp = os.fdopen(os.dup(self.config.get('out_fd', 1)), 'wb', 0) - self.stream = MitogenProtocol.build_stream(self.router, parent_id) + self.stream = MitogenProtocol.build_stream( + self.router, + parent_id, + local_id=self.config['context_id'], + parent_ids=self.config['parent_ids'] + ) self.stream.accept(in_fp, out_fp) self.stream.name = 'parent' self.stream.receive_side.keep_alive = False diff --git a/mitogen/unix.py b/mitogen/unix.py index 645b061d..1af1c0ec 100644 --- a/mitogen/unix.py +++ b/mitogen/unix.py @@ -162,10 +162,9 @@ class Listener(mitogen.core.Protocol): stream = mitogen.core.MitogenProtocol.build_stream( router=self._router, remote_id=context_id, + auth_id=mitogen.context_id, ) stream.name = u'unix_client.%d' % (pid,) - stream.protocol.auth_id = mitogen.context_id - stream.protocol.is_privileged = True stream.accept(sock, sock) LOG.debug('listener: accepted connection from PID %d: %s', pid, stream.name) From 1e3621a88bc38e8256a02ef1425e5889815d47c3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 19:26:52 +0100 Subject: [PATCH 302/383] tests: fix format string error --- tests/router_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/router_test.py b/tests/router_test.py index 2b4a9d78..e42a065a 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -357,6 +357,7 @@ def test_siblings_cant_talk(router): msg = mitogen.core.Router.unidirectional_msg % ( l2.context_id, l1.context_id, + mitogen.context_id, ) assert msg in str(e) assert 'routing mode prevents forward of ' in logs.stop() From 7ca073cdf8b65c690db428ecb51c667fd84bb4a7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 20:19:16 +0100 Subject: [PATCH 303/383] issue #482: ci: add stray process checks to all jobs List of interesting processes can probably expand more over time. --- .ci/ansible_tests.py | 9 +++++ .ci/ci_lib.py | 73 ++++++++++++++++++++++++++++++++++++++ .ci/debops_common_tests.py | 6 ++++ .ci/mitogen_install.py | 1 + .ci/mitogen_tests.py | 2 ++ 5 files changed, 91 insertions(+) diff --git a/.ci/ansible_tests.py b/.ci/ansible_tests.py index 51eab874..4df2dc70 100755 --- a/.ci/ansible_tests.py +++ b/.ci/ansible_tests.py @@ -20,11 +20,17 @@ def pause_if_interactive(): signal.pause() +interesting = ci_lib.get_interesting_procs() + + with ci_lib.Fold('unit_tests'): os.environ['SKIP_MITOGEN'] = '1' ci_lib.run('./run_tests -v') +ci_lib.check_stray_processes(interesting) + + with ci_lib.Fold('docker_setup'): containers = ci_lib.make_containers() ci_lib.start_containers(containers) @@ -75,4 +81,7 @@ with ci_lib.Fold('ansible'): pause_if_interactive() raise + +ci_lib.check_stray_processes(interesting, containers) + pause_if_interactive() diff --git a/.ci/ci_lib.py b/.ci/ci_lib.py index 34a6faba..9225723e 100644 --- a/.ci/ci_lib.py +++ b/.ci/ci_lib.py @@ -215,6 +215,44 @@ def make_containers(name_prefix='', port_offset=0): return lst +INTERESTING_COMMS = ('python', 'ssh', 'sudo', 'su', 'doas') + + +def proc_is_docker(pid): + try: + fp = open('/proc/%s/cgroup' % (pid,), 'rb') + except IOError: + return False + + try: + return 'docker' in fp.read() + finally: + fp.close() + + +def get_interesting_procs(container_name=None): + args = ['ps', '-a', '-x', '-oppid=', '-opid=', '-ocomm=', '-ocommand='] + if container_name is not None: + args = ['docker', 'exec', container_name] + args + + out = [] + for line in subprocess__check_output(args).splitlines(): + ppid, pid, comm, rest = line.split(None, 3) + if ( + ( + any(comm.startswith(s) for s in INTERESTING_COMMS) or + 'mitogen:' in rest + ) and + ( + container_name is not None or + (not proc_is_docker(pid)) + ) + ): + out.append((int(pid), line)) + + return sorted(out) + + def start_containers(containers): if os.environ.get('KEEP'): return @@ -236,9 +274,44 @@ def start_containers(containers): ] for container in containers ]) + + for container in containers: + container['interesting'] = get_interesting_procs(container['name']) + return containers +def verify_procs(hostname, old, new): + oldpids = set(pid for pid, _ in old) + if any(pid not in oldpids for pid, _ in new): + print('%r had stray processes running:' % (hostname,)) + for pid, line in new: + if pid not in oldpids: + print('New process:', line) + + print() + return False + + return True + + +def check_stray_processes(old, containers=None): + ok = True + + new = get_interesting_procs() + if old is not None: + ok &= verify_procs('test host machine', old, new) + + for container in containers or (): + ok &= verify_procs( + container['name'], + container['interesting'], + get_interesting_procs(container['name']) + ) + + assert ok, 'stray processes were found' + + def dump_file(path): print() print('--- %s ---' % (path,)) diff --git a/.ci/debops_common_tests.py b/.ci/debops_common_tests.py index 8b35de1e..e8f2907b 100755 --- a/.ci/debops_common_tests.py +++ b/.ci/debops_common_tests.py @@ -68,9 +68,15 @@ with ci_lib.Fold('job_setup'): os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False' +interesting = ci_lib.get_interesting_procs() + with ci_lib.Fold('first_run'): ci_lib.run('debops common %s', ' '.join(sys.argv[1:])) +ci_lib.check_stray_processes(interesting, containers) + with ci_lib.Fold('second_run'): ci_lib.run('debops common %s', ' '.join(sys.argv[1:])) + +ci_lib.check_stray_processes(interesting, containers) diff --git a/.ci/mitogen_install.py b/.ci/mitogen_install.py index 72bc75e3..b8862f89 100755 --- a/.ci/mitogen_install.py +++ b/.ci/mitogen_install.py @@ -14,4 +14,5 @@ if ci_lib.have_docker(): 'docker pull %s' % (ci_lib.image_for_distro(ci_lib.DISTRO),), ]) + ci_lib.run_batches(batches) diff --git a/.ci/mitogen_tests.py b/.ci/mitogen_tests.py index 36928ac9..4de94b4c 100755 --- a/.ci/mitogen_tests.py +++ b/.ci/mitogen_tests.py @@ -14,4 +14,6 @@ os.environ.update({ if not ci_lib.have_docker(): os.environ['SKIP_DOCKER_TESTS'] = '1' +interesting = ci_lib.get_interesting_procs() ci_lib.run('./run_tests -v') +ci_lib.check_stray_processes(interesting) From cf23d0dee6f40e5a19b633fd8c4e6744f96a7f72 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 20:33:36 +0100 Subject: [PATCH 304/383] issue #279: add one more test for max_message_size --- tests/router_test.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/router_test.py b/tests/router_test.py index e42a065a..dba56c9b 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -262,6 +262,13 @@ class MessageSizeTest(testlib.BrokerMixin, testlib.TestCase): size = remote.call(return_router_max_message_size) self.assertEquals(size, 64*1024) + def test_remote_of_remote_configured(self): + router = self.klass(broker=self.broker, max_message_size=64*1024) + remote = router.local() + remote2 = router.local(via=remote) + size = remote2.call(return_router_max_message_size) + self.assertEquals(size, 64*1024) + def test_remote_exceeded(self): # Ensure new contexts receive a router with the same value. router = self.klass(broker=self.broker, max_message_size=64*1024) From faec0158d9ce9b2955867116b02984a5da301c81 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 21:21:00 +0100 Subject: [PATCH 305/383] ci: Py3 fix --- .ci/ci_lib.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/ci_lib.py b/.ci/ci_lib.py index 9225723e..251c826b 100644 --- a/.ci/ci_lib.py +++ b/.ci/ci_lib.py @@ -220,7 +220,7 @@ INTERESTING_COMMS = ('python', 'ssh', 'sudo', 'su', 'doas') def proc_is_docker(pid): try: - fp = open('/proc/%s/cgroup' % (pid,), 'rb') + fp = open('/proc/%s/cgroup' % (pid,), 'r') except IOError: return False @@ -236,7 +236,7 @@ def get_interesting_procs(container_name=None): args = ['docker', 'exec', container_name] + args out = [] - for line in subprocess__check_output(args).splitlines(): + for line in subprocess__check_output(args).decode().splitlines(): ppid, pid, comm, rest = line.split(None, 3) if ( ( From f2e35be143b1ad97a42bae6de8c446b817d20f4e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 21:58:54 +0100 Subject: [PATCH 306/383] issue #482: remove 'ssh' from checked processes Can't be used due to regular Ansible behaviour --- .ci/ci_lib.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.ci/ci_lib.py b/.ci/ci_lib.py index 251c826b..971ae5d8 100644 --- a/.ci/ci_lib.py +++ b/.ci/ci_lib.py @@ -215,7 +215,9 @@ def make_containers(name_prefix='', port_offset=0): return lst -INTERESTING_COMMS = ('python', 'ssh', 'sudo', 'su', 'doas') +# ssh removed from here because 'linear' strategy relies on processes that hang +# around after the Ansible run completes +INTERESTING_COMMS = ('python', 'sudo', 'su', 'doas') def proc_is_docker(pid): From 30ae3d85cbffe166e8779b57eab75ec549a6324b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 22:17:31 +0100 Subject: [PATCH 307/383] compat: fix Py2.4 SyntaxError --- mitogen/compat/pkgutil.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mitogen/compat/pkgutil.py b/mitogen/compat/pkgutil.py index df3983a2..15eb2afa 100644 --- a/mitogen/compat/pkgutil.py +++ b/mitogen/compat/pkgutil.py @@ -542,7 +542,8 @@ def extend_path(path, name): if os.path.isfile(pkgfile): try: f = open(pkgfile) - except IOError as msg: + except IOError: + msg = sys.exc_info()[1] sys.stderr.write("Can't open %s: %s\n" % (pkgfile, msg)) else: From 1cad04185b943afe01c81d96f6333f3b3827c3e0 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 22:21:11 +0100 Subject: [PATCH 308/383] ci: try removing exclude: to make Azure jobs work again --- .ci/azure-pipelines.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.ci/azure-pipelines.yml b/.ci/azure-pipelines.yml index 6d0832da..920e82a1 100644 --- a/.ci/azure-pipelines.yml +++ b/.ci/azure-pipelines.yml @@ -3,11 +3,6 @@ # Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more: # https://docs.microsoft.com/azure/devops/pipelines/languages/python -trigger: - branches: - exclude: - - docs-master - jobs: - job: Mac From 8bac1cf368fb39a0ddace504e952af90a37cd16a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 9 Aug 2019 22:45:00 +0100 Subject: [PATCH 309/383] issue #482: another Py3 fix --- tests/testlib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testlib.py b/tests/testlib.py index 672d677b..255fba88 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -457,7 +457,7 @@ class DockerizedSshDaemon(object): def check_processes(self): args = ['docker', 'exec', self.container_name, 'ps', '-o', 'comm='] counts = {} - for comm in subprocess__check_output(args).splitlines(): + for comm in subprocess__check_output(args).decode().splitlines(): comm = comm.strip() counts[comm] = counts.get(comm, 0) + 1 From 5af6c9b26fef2489ed2643b1059921a2c2d4387b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 00:37:17 +0100 Subject: [PATCH 310/383] issue #615: use FileService for target->controll file transfers --- ansible_mitogen/connection.py | 9 +++++---- mitogen/core.py | 9 +++++++-- mitogen/service.py | 6 +++++- tests/ansible/tests/connection_test.py | 15 +++++++++++++++ 4 files changed, 32 insertions(+), 7 deletions(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index c08df611..06a152b2 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -950,11 +950,12 @@ class Connection(ansible.plugins.connection.ConnectionBase): :param str out_path: Local filesystem path to write. """ - output = self.get_chain().call( - ansible_mitogen.target.read_path, - mitogen.utils.cast(in_path), + self._connect() + ansible_mitogen.target.transfer_file( + context=self.context, + in_path=in_path, + out_path=out_path ) - ansible_mitogen.target.write_path(out_path, output) def put_data(self, out_path, data, mode=None, utimes=None): """ diff --git a/mitogen/core.py b/mitogen/core.py index a14286f9..8b4f135e 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -395,6 +395,12 @@ else: return _partition(s, sep, s.find) or (s, '', '') +def _has_parent_authority(context_id): + return ( + (context_id == mitogen.context_id) or + (context_id in mitogen.parent_ids) + ) + def has_parent_authority(msg, _stream=None): """ Policy function for use with :class:`Receiver` and @@ -403,8 +409,7 @@ def has_parent_authority(msg, _stream=None): ` has been set to that of a parent context or the current context. """ - return (msg.auth_id == mitogen.context_id or - msg.auth_id in mitogen.parent_ids) + return _has_parent_authority(msg.auth_id) def _signals(obj, signal): diff --git a/mitogen/service.py b/mitogen/service.py index 77a1ae1b..6654fb32 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -1023,7 +1023,11 @@ class FileService(Service): :raises Error: Unregistered path, or Sender did not match requestee context. """ - if path not in self._paths and not self._prefix_is_authorized(path): + if ( + (path not in self._paths) and + (not self._prefix_is_authorized(path)) and + (not mitogen.core._has_parent_authority(msg.auth_id)) + ): msg.reply(mitogen.core.CallError( Error(self.unregistered_msg % (path,)) )) diff --git a/tests/ansible/tests/connection_test.py b/tests/ansible/tests/connection_test.py index e7646716..36d61f09 100644 --- a/tests/ansible/tests/connection_test.py +++ b/tests/ansible/tests/connection_test.py @@ -99,6 +99,21 @@ class OptionalIntTest(testlib.TestCase): self.assertEquals(None, self.func({1:2})) +class FetchFileTest(ConnectionMixin, testlib.TestCase): + def test_success(self): + with tempfile.NamedTemporaryFile(prefix='mitotest') as ifp, \ + tempfile.NamedTemporaryFile(prefix='mitotest') as ofp: + ifp.write('x' * (1048576 * 4)) + ifp.flush() + ifp.seek(0) + + self.conn.fetch_file(ifp.name, ofp.name) + # transfer_file() uses os.rename rather than direct data overwrite, + # so we must reopen. + with open(ofp.name, 'rb') as fp: + self.assertEquals(ifp.read(), fp.read()) + + class PutDataTest(ConnectionMixin, testlib.TestCase): def test_out_path(self): path = tempfile.mktemp(prefix='mitotest') From c464bb534620bbdef64680dda758f5f879e41b1a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 00:40:08 +0100 Subject: [PATCH 311/383] issue #615: update Changelog. --- docs/changelog.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 26b2c316..6155eb43 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -116,6 +116,12 @@ Mitogen for Ansible module, and for any other action plug-ins that establish new connections of their own. +* `#615 `_: streaming file transfer + is implemented for the ``fetch`` and any other action that transfers files + from the target to the controller. Previously the file would be sent as a + single message, requiring the file to fit in RAM and be smaller than internal + limits on the size of a single message. + * `7ae926b3 `_: the ``lineinfile`` module began leaking writable temporary file descriptors since Ansible 2.7.0. When ``lineinfile`` was used to create or modify a script, and @@ -219,6 +225,7 @@ bug reports, testing, features and fixes in this release contributed by `Szabó Dániel Ernő `_, `Ulrich Schreiner `_, `Yuki Nishida `_, +`@alexhexabeam `_, `@DavidVentura `_, `@ghp-rr `_, `@rizzly `_, and From 9e1e1ba0159ed30028e4fd0783a5af57f9619494 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 00:44:49 +0100 Subject: [PATCH 312/383] issue #615: Py3x fix. --- tests/ansible/tests/connection_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ansible/tests/connection_test.py b/tests/ansible/tests/connection_test.py index 36d61f09..738aed28 100644 --- a/tests/ansible/tests/connection_test.py +++ b/tests/ansible/tests/connection_test.py @@ -103,7 +103,7 @@ class FetchFileTest(ConnectionMixin, testlib.TestCase): def test_success(self): with tempfile.NamedTemporaryFile(prefix='mitotest') as ifp, \ tempfile.NamedTemporaryFile(prefix='mitotest') as ofp: - ifp.write('x' * (1048576 * 4)) + ifp.write(b'x' * (1048576 * 4)) ifp.flush() ifp.seek(0) From 588859423a499e58ff1585cae75561e1a43379e1 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 09:52:37 +0100 Subject: [PATCH 313/383] issue #615: another Py3x fix. --- tests/ansible/tests/connection_test.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/ansible/tests/connection_test.py b/tests/ansible/tests/connection_test.py index 738aed28..54ea3d99 100644 --- a/tests/ansible/tests/connection_test.py +++ b/tests/ansible/tests/connection_test.py @@ -101,17 +101,17 @@ class OptionalIntTest(testlib.TestCase): class FetchFileTest(ConnectionMixin, testlib.TestCase): def test_success(self): - with tempfile.NamedTemporaryFile(prefix='mitotest') as ifp, \ - tempfile.NamedTemporaryFile(prefix='mitotest') as ofp: - ifp.write(b'x' * (1048576 * 4)) - ifp.flush() - ifp.seek(0) - - self.conn.fetch_file(ifp.name, ofp.name) - # transfer_file() uses os.rename rather than direct data overwrite, - # so we must reopen. - with open(ofp.name, 'rb') as fp: - self.assertEquals(ifp.read(), fp.read()) + with tempfile.NamedTemporaryFile(prefix='mitotest') as ifp: + with tempfile.NamedTemporaryFile(prefix='mitotest') as ofp: + ifp.write(b'x' * (1048576 * 4)) + ifp.flush() + ifp.seek(0) + + self.conn.fetch_file(ifp.name, ofp.name) + # transfer_file() uses os.rename rather than direct data + # overwrite, so we must reopen. + with open(ofp.name, 'rb') as fp: + self.assertEquals(ifp.read(), fp.read()) class PutDataTest(ConnectionMixin, testlib.TestCase): From 7d4ae6cec41e0652280f3766e10c66f73ddb7835 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 10:22:08 +0100 Subject: [PATCH 314/383] issue #615: fix up FileService tests for new logic Can't perform authorization test in the same process so easily any more since it checks is_privileged --- tests/file_service_test.py | 86 +++++++++++++++++++++++++++----------- 1 file changed, 62 insertions(+), 24 deletions(-) diff --git a/tests/file_service_test.py b/tests/file_service_test.py index b9034bb1..45b621ac 100644 --- a/tests/file_service_test.py +++ b/tests/file_service_test.py @@ -22,15 +22,26 @@ class FetchTest(testlib.RouterMixin, testlib.TestCase): return recv, msg def test_unauthorized(self): + l1 = self.router.local() + service = self.klass(self.router) - recv, msg = self.replyable_msg() - service.fetch( - path='/etc/shadow', - sender=None, - msg=msg, + pool = mitogen.service.Pool( + router=self.router, + services=[service], + size=1, ) - e = self.assertRaises(mitogen.core.CallError, - lambda: recv.get().unpickle()) + try: + e = self.assertRaises(mitogen.core.CallError, + lambda: l1.call( + mitogen.service.FileService.get, + context=self.router.myself(), + path='/etc/shadow', + out_fp=None, + ) + ) + finally: + pool.stop() + expect = service.unregistered_msg % ('/etc/shadow',) self.assertTrue(expect in e.args[0]) @@ -85,30 +96,57 @@ class FetchTest(testlib.RouterMixin, testlib.TestCase): self._validate_response(recv.get().unpickle()) def test_prefix_authorized_abspath_bad(self): - recv = mitogen.core.Receiver(self.router) + l1 = self.router.local() + service = self.klass(self.router) service.register_prefix('/etc') - recv, msg = self.replyable_msg() - service.fetch( - path='/etc/foo/bar/../../../passwd', - sender=recv.to_sender(), - msg=msg, + + pool = mitogen.service.Pool( + router=self.router, + services=[service], + size=1, ) - self.assertEquals(None, recv.get().unpickle()) + path = '/etc/foo/bar/../../../passwd' + try: + e = self.assertRaises(mitogen.core.CallError, + lambda: l1.call( + mitogen.service.FileService.get, + context=self.router.myself(), + path=path, + out_fp=None, + ) + ) + finally: + pool.stop() + + expect = service.unregistered_msg % (path,) + self.assertTrue(expect in e.args[0]) + + def test_prefix_authorized_abspath_good(self): + l1 = self.router.local() - def test_prefix_authorized_abspath_bad(self): - recv = mitogen.core.Receiver(self.router) service = self.klass(self.router) service.register_prefix('/etc') - recv, msg = self.replyable_msg() - service.fetch( - path='/etc/../shadow', - sender=recv.to_sender(), - msg=msg, + path = '/etc/../shadow' + + pool = mitogen.service.Pool( + router=self.router, + services=[service], + size=1, ) - e = self.assertRaises(mitogen.core.CallError, - lambda: recv.get().unpickle()) - expect = service.unregistered_msg % ('/etc/../shadow',) + try: + e = self.assertRaises(mitogen.core.CallError, + lambda: l1.call( + mitogen.service.FileService.get, + context=self.router.myself(), + path=path, + out_fp=None + ) + ) + finally: + pool.stop() + + expect = service.unregistered_msg % (path,) self.assertTrue(expect in e.args[0]) From 1d943388b7e19e150a5d794ff58b0e5e150f044b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 11:04:49 +0100 Subject: [PATCH 315/383] docs: tidy up some Changelog text --- docs/changelog.rst | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 6155eb43..9f3e3546 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -107,20 +107,18 @@ Mitogen for Ansible server has been increased from `15*3` seconds to `30*10` seconds. * `#600 `_: functionality to reflect - changes to ``/etc/environment`` in the running interpreter did not account - for Unicode file contents. Now the file may contain data in any single byte - encoding. + changes to ``/etc/environment`` did not account for Unicode file contents. + The file may now use any single byte encoding. * `#602 `_: connection configuration is more accurately inferred for `meta: reset_connection`, the `synchronize` - module, and for any other action plug-ins that establish new connections of - their own. + module, and for any action plug-ins that establish additional connections. * `#615 `_: streaming file transfer - is implemented for the ``fetch`` and any other action that transfers files - from the target to the controller. Previously the file would be sent as a - single message, requiring the file to fit in RAM and be smaller than internal - limits on the size of a single message. + is implemented for ``fetch`` and other actions that transfer files from the + target to the controller. Previously the file was sent in one message, + requiring it to fit in RAM and be smaller than the internal message size + limit. * `7ae926b3 `_: the ``lineinfile`` module began leaking writable temporary file descriptors since @@ -188,21 +186,20 @@ Core Library :meth:`empty` method of :class:`mitogen.core.Latch`, :class:`mitogen.core.Receiver` and :class:`mitogen.select.Select` has been replaced by a more general :meth:`size` method. :meth:`empty` will be removed - in Mitogen 0.3 + in 0.3 * `ecc570cb `_: previously - :meth:`mitogen.select.Select.add` would enqueue a single wake event when - adding an existing receiver, latch or subselect that contained multiple - buffered items, causing future :meth:`get` calls to block or fail even though - data existed that could be returned. - -* `5924af15 `_: *[security]* the - unidirectional routing mode, in which contexts may only communicate with - parents and never siblings (so a program cannot accidentally bridge - air-gapped networks) was not inherited when a child context was initiated - directly from an existing child. This did not effect the Ansible extension, - since the controller initiates any new context used for routing, only forked - tasks are initiated by children. + :meth:`mitogen.select.Select.add` would enqueue one wake event when adding an + existing receiver, latch or subselect that contained multiple buffered items, + causing :meth:`get` calls to block or fail even though data existed to return. + +* `5924af15 `_: *[security]* + unidirectional routing, where contexts may optionally only communicate with + parents and never siblings (so that air-gapped networks cannot be + unintentionally bridged) was not inherited when a child was initiated + directly from an another child. This did not effect Ansible, since the + controller initiates any new child used for routing, only forked tasks are + initiated by children. Thanks! From 93e8d5dfcc44641a337c947f5890ea2977399ca4 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 14:52:24 +0100 Subject: [PATCH 316/383] docs: fix Sphinx warnings, add LogHandler, more docstrings --- docs/api.rst | 5 -- docs/changelog.rst | 10 +-- docs/internals.rst | 15 +++-- mitogen/core.py | 150 ++++++++++++++++++++++++++++++++++++--------- mitogen/master.py | 6 +- mitogen/parent.py | 2 +- preamble_size.py | 4 +- 7 files changed, 143 insertions(+), 49 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index 2f1f9784..51895318 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -2,11 +2,6 @@ API Reference ************* -.. toctree:: - :hidden: - - signals - Package Layout ============== diff --git a/docs/changelog.rst b/docs/changelog.rst index 9f3e3546..eb889daa 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -22,7 +22,7 @@ To avail of fixes in an unreleased version, please download a ZIP file `directly from GitHub `_. Enhancements -^^^^^^^^^^^^ +~~~~~~~~~~~~ * `#556 `_, `#587 `_: Ansible 2.8 is partially @@ -61,7 +61,7 @@ Enhancements Mitogen for Ansible -^^^^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~~~~ * `#363 `_: fix an obscure race matching *Permission denied* errors from some versions of ``su`` running on @@ -488,7 +488,7 @@ Enhancements `#491 `_, `#493 `_: the interface employed for in-process queues changed from `kqueue - `_ / `epoll + `_ / `epoll `_ to `poll() `_, which requires no setup or teardown, yielding a 38% latency reduction for inter-thread communication. @@ -1034,7 +1034,7 @@ bug reports, testing, features and fixes in this release contributed by `Josh Smift `_, `Luca Nunzi `_, `Orion Poplawski `_, -`Peter V. Saveliev `_, +`Peter V. Saveliev `_, `Pierre-Henry Muller `_, `Pierre-Louis Bonicoli `_, `Prateek Jain `_, @@ -1092,7 +1092,7 @@ Core Library * `#300 `_: the broker could crash on OS X during shutdown due to scheduled `kqueue - `_ filter changes for + `_ filter changes for descriptors that were closed before the IO loop resumes. As a temporary workaround, kqueue's bulk change feature is not used. diff --git a/docs/internals.rst b/docs/internals.rst index 1df1c2ad..d062b6d9 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -7,11 +7,6 @@ Internal API Reference Internal APIs are subject to rapid change even across minor releases. This page exists to help users modify and extend the library. -.. toctree:: - :hidden: - - signals - Constants ========= @@ -50,6 +45,10 @@ Logging See also :class:`mitogen.core.IoLoggerProtocol`. +.. currentmodule:: mitogen.core +.. autoclass:: LogHandler + :members: + .. currentmodule:: mitogen.master .. autoclass:: LogForwarder :members: @@ -270,6 +269,8 @@ Helpers .. autofunction:: minimize_source +.. _signals: + Signals ======= @@ -312,6 +313,10 @@ These signals are used internally by Mitogen. - ``disconnect`` - Fired on the Broker thread when disconnection is detected. + * - :py:class:`mitogen.core.Stream` + - ``shutdown`` + - Fired on the Broker thread when broker shutdown begins. + * - :py:class:`mitogen.core.Context` - ``disconnect`` - Fired on the Broker thread during shutdown (???) diff --git a/mitogen/core.py b/mitogen/core.py index 8b4f135e..aebd337e 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1024,11 +1024,11 @@ class Receiver(object): routed to the context due to disconnection, and ignores messages that did not originate from the respondent context. """ - #: If not :data:`None`, a reference to a function invoked as - #: `notify(receiver)` when a new message is delivered to this receiver. The - #: function is invoked on the broker thread, therefore it must not block. - #: Used by :class:`mitogen.select.Select` to implement waiting on multiple - #: receivers. + #: If not :data:`None`, a function invoked as `notify(receiver)` after a + #: message has been received. The function is invoked on :class:`Broker` + #: thread, therefore it must not block. Used by + #: :class:`mitogen.select.Select` to efficiently implement waiting on + #: multiple event sources. notify = None raise_channelerror = True @@ -1513,6 +1513,22 @@ class Importer(object): class LogHandler(logging.Handler): + """ + A :class:`logging.Handler` subclass that arranges for :data:`FORWARD_LOG` + messages to be sent to a parent context in response to logging messages + generated by the current context. This is installed by default in child + contexts during bootstrap, so that :mod:`logging` events can be viewed and + managed centrally in the master process. + + The handler is initially *corked* after construction, such that it buffers + messages until :meth:`uncork` is called. This allows logging to be + installed prior to communication with the target being available, and + avoids any possible race where early log messages might be dropped. + + :param mitogen.core.Context context: + The context to send log messages towards. At present this is always + the master process. + """ def __init__(self, context): logging.Handler.__init__(self) self.context = context @@ -1549,6 +1565,9 @@ class LogHandler(logging.Handler): self._buffer_lock.release() def emit(self, rec): + """ + Send a :data:`FORWARD_LOG` message towards the target context. + """ if rec.name == 'mitogen.io' or \ getattr(self.local, 'in_emit', False): return @@ -1566,6 +1585,30 @@ class LogHandler(logging.Handler): class Stream(object): + """ + A :class:`Stream` is one readable and optionally one writeable file + descriptor (represented by :class:`Side`) aggregated alongside an + associated :class:`Protocol` that knows how to respond to IO readiness + events for those descriptors. + + Streams are registered with :class:`Broker`, and callbacks are invoked on + the broker thread in response to IO activity. When registered using + :meth:`Broker.start_receive` or :meth:`Broker._start_transmit`, the broker + may call any of :meth:`on_receive`, :meth:`on_transmit`, + :meth:`on_shutdown` or :meth:`on_disconnect`. + + It is expected that the :class:`Protocol` associated with a stream will + change over its life. For example during connection setup, the initial + protocol may be :class:`mitogen.parent.BootstrapProtocol` that knows how to + enter SSH and sudo passwords and transmit the :mod:`mitogen.core` source to + the target, before handing off to :class:`MitogenProtocol` when the target + process is initialized. + + Streams connecting to children are in turn aggregated by + :class:`mitogen.parent.Connection`, which contains additional logic for + managing any child process, and a reference to any separate ``stderr`` + :class:`Stream` connected to that process. + """ #: A :class:`Side` representing the stream's receive file descriptor. receive_side = None @@ -1578,14 +1621,16 @@ class Stream(object): #: In parents, the :class:`mitogen.parent.Connection` instance. conn = None + #: The stream name. This is used in the :meth:`__repr__` output in any log + #: messages, it may be any descriptive string. name = u'default' def set_protocol(self, protocol): """ - Bind a protocol to this stream, by updating :attr:`Protocol.stream` to - refer to this stream, and updating this stream's - :attr:`Stream.protocol` to the refer to the protocol. Any prior - protocol's :attr:`Protocol.stream` is set to :data:`None`. + Bind a :class:`Protocol` to this stream, by updating + :attr:`Protocol.stream` to refer to this stream, and updating this + stream's :attr:`Stream.protocol` to the refer to the protocol. Any + prior protocol's :attr:`Protocol.stream` is set to :data:`None`. """ if self.protocol: self.protocol.stream = None @@ -1593,6 +1638,21 @@ class Stream(object): self.protocol.stream = self def accept(self, rfp, wfp): + """ + Attach a pair of file objects to :attr:`receive_side` and + :attr:`transmit_side`, after wrapping them in :class:`Side` instances. + :class:`Side` will call :func:`set_nonblock` and :func:`set_cloexec` + on the underlying file descriptors during construction. + + The same file object may be used for both sides. The default + :meth:`on_disconnect` is handles the possibility that only one + descriptor may need to be closed. + + :param file rfp: + The file object to receive from. + :param file wfp: + The file object to transmit to. + """ self.receive_side = Side(self, rfp) self.transmit_side = Side(self, wfp) @@ -1601,13 +1661,17 @@ class Stream(object): def on_receive(self, broker): """ - Called by :class:`Broker` when the stream's :attr:`receive_side` has + Invoked by :class:`Broker` when the stream's :attr:`receive_side` has been marked readable using :meth:`Broker.start_receive` and the broker has detected the associated file descriptor is ready for reading. - Subclasses must implement this if :meth:`Broker.start_receive` is ever - called on them, and the method must call :meth:`on_disconect` if - reading produces an empty string. + Subclasses must implement this if they are registered using + :meth:`Broker.start_receive`, and the method must invoke + :meth:`on_disconnect` if reading produces an empty string. + + The default implementation reads :attr:`Protocol.read_size` bytes and + passes the resulting bytestring to :meth:`Protocol.on_receive`. If the + bytestring is 0 bytes, invokes :meth:`on_disconnect` instead. """ buf = self.receive_side.read(self.protocol.read_size) if not buf: @@ -1618,30 +1682,39 @@ class Stream(object): def on_transmit(self, broker): """ - Called by :class:`Broker` when the stream's :attr:`transmit_side` - has been marked writeable using :meth:`Broker._start_transmit` and - the broker has detected the associated file descriptor is ready for + Invoked by :class:`Broker` when the stream's :attr:`transmit_side` has + been marked writeable using :meth:`Broker._start_transmit` and the + broker has detected the associated file descriptor is ready for writing. - Subclasses must implement this if :meth:`Broker._start_transmit` is - ever called on them. + Subclasses must implement they are ever registerd with + :meth:`Broker._start_transmit`. + + The default implementation invokes :meth:`Protocol.on_transmit`. """ self.protocol.on_transmit(broker) def on_shutdown(self, broker): """ - Called by :meth:`Broker.shutdown` to allow the stream time to - gracefully shutdown. The base implementation simply called - :meth:`on_disconnect`. + Invoked by :meth:`Broker.shutdown` to allow the stream time to + gracefully shutdown. + + The default implementation emits a ``shutdown`` signal before + invoking :meth:`on_disconnect`. """ fire(self, 'shutdown') self.protocol.on_shutdown(broker) def on_disconnect(self, broker): """ - Called by :class:`Broker` to force disconnect the stream. The base - implementation simply closes :attr:`receive_side` and - :attr:`transmit_side` and unregisters the stream from the broker. + Invoked by :class:`Broker` to force disconnect the stream during + shutdown, invoked by the default :meth:`on_shutdown` implementation, + and usually invoked by any subclass :meth:`on_receive` implementation + in response to a 0-byte read. + + The base implementation fires a ``disconnect`` event, then closes + :attr:`receive_side` and :attr:`transmit_side` after unregistering the + stream from the broker. """ fire(self, 'disconnect') self.protocol.on_disconnect(broker) @@ -1666,6 +1739,8 @@ class Protocol(object): #: :data:`None`. stream = None + #: The size of the read buffer used by :class:`Stream` when this is the + #: active protocol for the stream. read_size = CHUNK_SIZE @classmethod @@ -2369,8 +2444,18 @@ class Latch(object): See :ref:`waking-sleeping-threads` for further discussion. """ + #: The :class:`Poller` implementation to use for waiting. Since the poller + #: will be very short-lived, we prefer :class:`mitogen.parent.PollPoller` + #: if it is available, or :class:`mitogen.core.Poller` otherwise, since + #: these implementations require no system calls to create, configure or + #: destroy. poller_class = Poller + #: If not :data:`None`, a function invoked as `notify(latch)` after a + #: successful call to :meth:`put`. The function is invoked on the + #: :meth:`put` caller's thread, which may be the :class:`Broker` thread, + #: therefore it must not block. Used by :class:`mitogen.select.Select` to + #: efficiently implement waiting on multiple event sources. notify = None # The _cls_ prefixes here are to make it crystal clear in the code which @@ -2725,15 +2810,22 @@ class Waker(Protocol): class IoLoggerProtocol(DelimitedProtocol): """ - Handle redirection of standard IO into the :mod:`logging` package. + Attached to one end of a socket pair whose other end overwrites one of the + standard ``stdout`` or ``stderr`` file descriptors in a child context. + Received data is split up into lines, decoded as UTF-8 and logged to the + :mod:`logging` package as either the ``stdout`` or ``stderr`` logger. + + Logging in child contexts is in turn forwarded to the master process using + :class:`LogHandler`. """ @classmethod def build_stream(cls, name, dest_fd): """ - Even though the descriptor `dest_fd` will hold the opposite end of the - socket open, we must keep a separate dup() of it (i.e. wsock) in case - some code decides to overwrite `dest_fd` later, which would thus break - :meth:`on_shutdown`. + Even though the file descriptor `dest_fd` will hold the opposite end of + the socket open, we must keep a separate dup() of it (i.e. wsock) in + case some code decides to overwrite `dest_fd` later, which would + prevent break :meth:`on_shutdown` from calling :meth:`shutdown() + ` on it. """ rsock, wsock = socket.socketpair() os.dup2(wsock.fileno(), dest_fd) diff --git a/mitogen/master.py b/mitogen/master.py index 814f7019..09da775e 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -741,7 +741,7 @@ class ModuleFinder(object): The list is determined by retrieving the source code of `fullname`, compiling it, and examining all IMPORT_NAME ops. - :param fullname: Fully qualified name of an _already imported_ module + :param fullname: Fully qualified name of an *already imported* module for which source code can be retrieved :type fullname: str """ @@ -789,7 +789,7 @@ class ModuleFinder(object): This method is like :py:meth:`find_related_imports`, but also recursively searches any modules which are imported by `fullname`. - :param fullname: Fully qualified name of an _already imported_ module + :param fullname: Fully qualified name of an *already imported* module for which source code can be retrieved :type fullname: str """ @@ -841,7 +841,7 @@ class ModuleResponder(object): def add_source_override(self, fullname, path, source, is_pkg): """ - See :meth:`ModuleFinder.add_source_override. + See :meth:`ModuleFinder.add_source_override`. """ self._finder.add_source_override(fullname, path, source, is_pkg) diff --git a/mitogen/parent.py b/mitogen/parent.py index 79b484c2..ec218913 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2516,7 +2516,7 @@ class Reaper(object): :param mitogen.core.Broker broker: The :class:`Broker` on which to install timers - :param Process proc: + :param mitogen.parent.Process proc: The process to reap. :param bool kill: If :data:`True`, send ``SIGTERM`` and ``SIGKILL`` to the process. diff --git a/preamble_size.py b/preamble_size.py index 692ad7b1..43c10029 100644 --- a/preamble_size.py +++ b/preamble_size.py @@ -24,10 +24,12 @@ conn = mitogen.ssh.Connection(options, router) conn.context = context print('SSH command size: %s' % (len(' '.join(conn.get_boot_command())),)) -print('Preamble size: %s (%.2fKiB)' % ( +print('Bootstrap (mitogen.core) size: %s (%.2fKiB)' % ( len(conn.get_preamble()), len(conn.get_preamble()) / 1024.0, )) +print('') + if '--dump' in sys.argv: print(zlib.decompress(conn.get_preamble())) exit() From a91a8bf19c203f884ca0f8ca134789f8a7b21c0b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 17:20:30 +0100 Subject: [PATCH 317/383] docs: upgrade Sphinx to 2.1.2, require Python 3 to build docs. --- docs/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index a93c2140..85f30a2e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,3 @@ -Sphinx==1.7.1 -sphinxcontrib-programoutput==0.11 +Sphinx==2.1.2 +sphinxcontrib-programoutput==0.14 alabaster==0.7.10 From 284dda53e8fe565074e1ed6523012ae596d3c044 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 17:20:46 +0100 Subject: [PATCH 318/383] preamble_size: make it work on Python 3. --- preamble_size.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/preamble_size.py b/preamble_size.py index 43c10029..f0d1e804 100644 --- a/preamble_size.py +++ b/preamble_size.py @@ -59,7 +59,7 @@ for mod in ( original_size = len(original) minimized = mitogen.minify.minimize_source(original) minimized_size = len(minimized) - compressed = zlib.compress(minimized, 9) + compressed = zlib.compress(minimized.encode(), 9) compressed_size = len(compressed) print( '%-25s' From 379dca90b957c84758b890703d19395d2e96ccf1 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 17:21:11 +0100 Subject: [PATCH 319/383] docs: move decorator docs into core.py and use autodecorator --- docs/api.rst | 23 ++--------------------- docs/conf.py | 2 +- mitogen/core.py | 20 ++++++++++++++++++++ 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index 51895318..273e31e3 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -26,29 +26,10 @@ mitogen.core .. automodule:: mitogen.core .. currentmodule:: mitogen.core -.. decorator:: takes_econtext - - Decorator that marks a function or class method to automatically receive a - kwarg named `econtext`, referencing the - :class:`mitogen.core.ExternalContext` active in the context in which the - function is being invoked in. The decorator is only meaningful when the - function is invoked via :data:`CALL_FUNCTION - `. - - When the function is invoked directly, `econtext` must still be passed to - it explicitly. +.. autodecorator:: takes_econtext .. currentmodule:: mitogen.core -.. decorator:: takes_router - - Decorator that marks a function or class method to automatically receive a - kwarg named `router`, referencing the :class:`mitogen.core.Router` - active in the context in which the function is being invoked in. The - decorator is only meaningful when the function is invoked via - :data:`CALL_FUNCTION `. - - When the function is invoked directly, `router` must still be passed to it - explicitly. +.. autodecorator:: takes_router mitogen.master diff --git a/docs/conf.py b/docs/conf.py index 2ee63aa8..aa91c8b8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -7,7 +7,7 @@ VERSION = '%s.%s.%s' % mitogen.__version__ author = u'Network Genomics' copyright = u'2019, Network Genomics' -exclude_patterns = ['_build'] +exclude_patterns = ['_build', '.venv'] extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.programoutput'] html_show_copyright = False html_show_sourcelink = False diff --git a/mitogen/core.py b/mitogen/core.py index aebd337e..a8de98e2 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -448,11 +448,31 @@ def fire(obj, name, *args, **kwargs): def takes_econtext(func): + """ + Decorator that marks a function or class method to automatically receive a + kwarg named `econtext`, referencing the + :class:`mitogen.core.ExternalContext` active in the context in which the + function is being invoked in. The decorator is only meaningful when the + function is invoked via :data:`CALL_FUNCTION `. + + When the function is invoked directly, `econtext` must still be passed to + it explicitly. + """ func.mitogen_takes_econtext = True return func def takes_router(func): + """ + Decorator that marks a function or class method to automatically receive a + kwarg named `router`, referencing the :class:`mitogen.core.Router` active + in the context in which the function is being invoked in. The decorator is + only meaningful when the function is invoked via :data:`CALL_FUNCTION + `. + + When the function is invoked directly, `router` must still be passed to it + explicitly. + """ func.mitogen_takes_router = True return func From 57012e0f72fb27176fc21deb382779a5e21303fe Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 17:36:10 +0100 Subject: [PATCH 320/383] Add mitogen.core.now() and use it everywhere; closes #614. --- docs/api.rst | 8 ++++++++ mitogen/core.py | 12 ++++++++---- mitogen/master.py | 8 ++++---- mitogen/parent.py | 12 ++++++------ mitogen/service.py | 5 +++-- tests/bench/fork.py | 8 ++++---- tests/bench/large_messages.py | 10 ++++++---- tests/bench/latch_roundtrip.py | 5 +++-- tests/bench/local.py | 9 +++++---- tests/bench/megatime.py | 6 ++++-- tests/bench/roundtrip.py | 5 +++-- tests/bench/service.py | 7 ++++--- tests/bench/ssh-roundtrip.py | 9 +++++---- tests/bench/throughput.py | 5 +++-- tests/create_child_test.py | 3 +-- tests/parent_test.py | 5 +++-- tests/poller_test.py | 8 ++++---- tests/testlib.py | 6 +++--- tests/timer_test.py | 4 ++-- tests/unix_test.py | 4 ++-- 20 files changed, 81 insertions(+), 58 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index 273e31e3..09aa8582 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -603,6 +603,14 @@ Fork Safety Utility Functions ================= +.. currentmodule:: mitogen.core +.. function:: now + + A reference to :func:`time.time` on Python 2, or :func:`time.monotonic` on + Python >3.3. We prefer :func:`time.monotonic` when available to ensure + timers are not impacted by system clock changes. + + .. module:: mitogen.utils A random assortment of utility functions useful on masters and children. diff --git a/mitogen/core.py b/mitogen/core.py index a8de98e2..d6e1739e 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -362,6 +362,10 @@ def to_text(o): return UnicodeType(o) +# Documented in api.rst to work around Sphinx limitation. +now = getattr(time, 'monotonic', time.time) + + # Python 2.4 try: any @@ -636,7 +640,7 @@ def _real_profile_hook(name, func, *args): return func(*args) finally: path = _profile_fmt % { - 'now': int(1e6 * time.time()), + 'now': int(1e6 * now()), 'identity': name, 'pid': os.getpid(), 'ext': '%s' @@ -3482,9 +3486,9 @@ class Broker(object): for _, (side, _) in self.poller.readers + self.poller.writers: self._call(side.stream, side.stream.on_shutdown) - deadline = time.time() + self.shutdown_timeout - while self.keep_alive() and time.time() < deadline: - self._loop_once(max(0, deadline - time.time())) + deadline = now() + self.shutdown_timeout + while self.keep_alive() and now() < deadline: + self._loop_once(max(0, deadline - now())) if self.keep_alive(): LOG.error('%r: pending work still existed %d seconds after ' diff --git a/mitogen/master.py b/mitogen/master.py index 09da775e..b4eb6643 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -910,9 +910,9 @@ class ModuleResponder(object): if self.minify_safe_re.search(source): # If the module contains a magic marker, it's safe to minify. - t0 = time.time() + t0 = mitogen.core.now() source = mitogen.minify.minimize_source(source).encode('utf-8') - self.minify_secs += time.time() - t0 + self.minify_secs += mitogen.core.now() - t0 if is_pkg: pkg_present = get_child_modules(path) @@ -1001,11 +1001,11 @@ class ModuleResponder(object): LOG.warning('_on_get_module(): dup request for %r from %r', fullname, stream) - t0 = time.time() + t0 = mitogen.core.now() try: self._send_module_and_related(stream, fullname) finally: - self.get_module_secs += time.time() - t0 + self.get_module_secs += mitogen.core.now() - t0 def _send_forward_module(self, stream, context, fullname): if stream.protocol.remote_id != context.context_id: diff --git a/mitogen/parent.py b/mitogen/parent.py index ec218913..7a94c2b0 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -633,7 +633,7 @@ class TimerList(object): :meth:`expire`. The main user interface to :class:`TimerList` is :meth:`schedule`. """ - _now = time.time + _now = mitogen.core.now def __init__(self): self._lst = [] @@ -1124,7 +1124,7 @@ class LineLoggingProtocolMixin(object): def on_line_received(self, line): self.logged_partial = None - self.logged_lines.append((time.time(), line)) + self.logged_lines.append((mitogen.core.now(), line)) self.logged_lines[:] = self.logged_lines[-100:] return super(LineLoggingProtocolMixin, self).on_line_received(line) @@ -1134,7 +1134,7 @@ class LineLoggingProtocolMixin(object): def on_disconnect(self, broker): if self.logged_partial: - self.logged_lines.append((time.time(), self.logged_partial)) + self.logged_lines.append((mitogen.core.now(), self.logged_partial)) self.logged_partial = None super(LineLoggingProtocolMixin, self).on_disconnect(broker) @@ -1324,7 +1324,7 @@ class Options(object): self.profiling = profiling self.unidirectional = unidirectional self.max_message_size = max_message_size - self.connect_deadline = time.time() + self.connect_timeout + self.connect_deadline = mitogen.core.now() + self.connect_timeout class Connection(object): @@ -1819,7 +1819,7 @@ class CallChain(object): socket.gethostname(), os.getpid(), thread.get_ident(), - int(1e6 * time.time()), + int(1e6 * mitogen.core.now()), ) def __repr__(self): @@ -2569,7 +2569,7 @@ class Reaper(object): def _install_timer(self, delay): new = self._timer is None self._timer = self.broker.timers.schedule( - when=time.time() + delay, + when=mitogen.core.now() + delay, func=self.reap, ) if new: diff --git a/mitogen/service.py b/mitogen/service.py index 6654fb32..8882b30b 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -1109,7 +1109,7 @@ class FileService(Service): :meth:`fetch`. """ LOG.debug('get_file(): fetching %r from %r', path, context) - t0 = time.time() + t0 = mitogen.core.now() recv = mitogen.core.Receiver(router=context.router) metadata = context.call_service( service_name=cls.name(), @@ -1143,5 +1143,6 @@ class FileService(Service): path, metadata['size'], received_bytes) LOG.debug('target.get_file(): fetched %d bytes of %r from %r in %dms', - metadata['size'], path, context, 1000 * (time.time() - t0)) + metadata['size'], path, context, + 1000 * (mitogen.core.now() - t0)) return ok, metadata diff --git a/tests/bench/fork.py b/tests/bench/fork.py index b2f2382c..af5cb3a7 100644 --- a/tests/bench/fork.py +++ b/tests/bench/fork.py @@ -3,13 +3,13 @@ Measure latency of .fork() setup/teardown. """ import mitogen -import time +import mitogen.core @mitogen.main() def main(router): - t0 = time.time() + t0 = mitogen.core.now() for x in xrange(200): - t = time.time() + t = mitogen.core.now() ctx = router.fork() ctx.shutdown(wait=True) - print '++', 1000 * ((time.time() - t0) / (1.0+x)) + print '++', 1000 * ((mitogen.core.now() - t0) / (1.0+x)) diff --git a/tests/bench/large_messages.py b/tests/bench/large_messages.py index 24220023..e977e36d 100644 --- a/tests/bench/large_messages.py +++ b/tests/bench/large_messages.py @@ -4,7 +4,9 @@ import subprocess import time import socket + import mitogen +import mitogen.core @mitogen.main() @@ -15,12 +17,12 @@ def main(router): s = ' ' * n print('bytes in %.2fMiB string...' % (n/1048576.0),) - t0 = time.time() + t0 = mitogen.core.now() for x in range(10): - tt0 = time.time() + tt0 = mitogen.core.now() assert n == c.call(len, s) - print('took %dms' % (1000 * (time.time() - tt0),)) - t1 = time.time() + print('took %dms' % (1000 * (mitogen.core.now() - tt0),)) + t1 = mitogen.core.now() print('total %dms / %dms avg / %.2fMiB/sec' % ( 1000 * (t1 - t0), (1000 * (t1 - t0)) / (x + 1), diff --git a/tests/bench/latch_roundtrip.py b/tests/bench/latch_roundtrip.py index 49314fb9..1198aa48 100644 --- a/tests/bench/latch_roundtrip.py +++ b/tests/bench/latch_roundtrip.py @@ -6,6 +6,7 @@ import threading import time import mitogen +import mitogen.core import mitogen.utils import ansible_mitogen.affinity @@ -33,8 +34,8 @@ t2.start() ready.get() ready.get() -t0 = time.time() +t0 = mitogen.core.now() l1.put(None) t1.join() t2.join() -print('++', int(1e6 * ((time.time() - t0) / (1.0+X))), 'usec') +print('++', int(1e6 * ((mitogen.core.now() - t0) / (1.0+X))), 'usec') diff --git a/tests/bench/local.py b/tests/bench/local.py index 2808d803..aefeb84d 100644 --- a/tests/bench/local.py +++ b/tests/bench/local.py @@ -5,6 +5,7 @@ Measure latency of .local() setup. import time import mitogen +import mitogen.core import mitogen.utils import ansible_mitogen.affinity @@ -15,10 +16,10 @@ mitogen.utils.setup_gil() @mitogen.main() def main(router): - t0=time.time() + t0 = mitogen.core.now() for x in range(100): - t = time.time() + t = mitogen.core.now() f = router.local()# debug=True) - tt = time.time() + tt = mitogen.core.now() print(x, 1000 * (tt - t)) - print('%.03f ms' % (1000 * (time.time() - t0) / (1.0 + x))) + print('%.03f ms' % (1000 * (mitogen.core.now() - t0) / (1.0 + x))) diff --git a/tests/bench/megatime.py b/tests/bench/megatime.py index 6f5f3b5d..40cd9986 100755 --- a/tests/bench/megatime.py +++ b/tests/bench/megatime.py @@ -4,12 +4,14 @@ import sys import os import time +import mitogen.core + times = [] for x in range(5): - t0 = time.time() + t0 = mitogen.core.now() os.spawnvp(os.P_WAIT, sys.argv[1], sys.argv[1:]) - t = time.time() - t0 + t = mitogen.core.now() - t0 times.append(t) print('+++', t) diff --git a/tests/bench/roundtrip.py b/tests/bench/roundtrip.py index 8d86d75b..8f31b1a2 100644 --- a/tests/bench/roundtrip.py +++ b/tests/bench/roundtrip.py @@ -5,6 +5,7 @@ Measure latency of local RPC. import time import mitogen +import mitogen.core import mitogen.utils import ansible_mitogen.affinity @@ -23,7 +24,7 @@ def do_nothing(): def main(router): f = router.fork() f.call(do_nothing) - t0 = time.time() + t0 = mitogen.core.now() for x in xrange(20000): f.call(do_nothing) - print('++', int(1e6 * ((time.time() - t0) / (1.0+x))), 'usec') + print('++', int(1e6 * ((mitogen.core.now() - t0) / (1.0+x))), 'usec') diff --git a/tests/bench/service.py b/tests/bench/service.py index 6d866b5c..267ae3f6 100644 --- a/tests/bench/service.py +++ b/tests/bench/service.py @@ -4,8 +4,9 @@ Measure latency of local service RPC. import time -import mitogen.service import mitogen +import mitogen.core +import mitogen.service class MyService(mitogen.service.Service): @@ -17,7 +18,7 @@ class MyService(mitogen.service.Service): @mitogen.main() def main(router): f = router.fork() - t0 = time.time() + t0 = mitogen.core.now() for x in range(1000): f.call_service(service_name=MyService, method_name='ping') - print('++', int(1e6 * ((time.time() - t0) / (1.0+x))), 'usec') + print('++', int(1e6 * ((mitogen.core.now() - t0) / (1.0+x))), 'usec') diff --git a/tests/bench/ssh-roundtrip.py b/tests/bench/ssh-roundtrip.py index 8745505d..06c596c0 100644 --- a/tests/bench/ssh-roundtrip.py +++ b/tests/bench/ssh-roundtrip.py @@ -6,6 +6,7 @@ import sys import time import mitogen +import mitogen.core import mitogen.utils import ansible_mitogen.affinity @@ -24,12 +25,12 @@ def do_nothing(): def main(router): f = router.ssh(hostname=sys.argv[1]) f.call(do_nothing) - t0 = time.time() - end = time.time() + 5.0 + t0 = mitogen.core.now() + end = mitogen.core.now() + 5.0 i = 0 - while time.time() < end: + while mitogen.core.now() < end: f.call(do_nothing) i += 1 - t1 = time.time() + t1 = mitogen.core.now() print('++', float(1e3 * (t1 - t0) / (1.0+i)), 'ms') diff --git a/tests/bench/throughput.py b/tests/bench/throughput.py index 42604826..acb51afa 100644 --- a/tests/bench/throughput.py +++ b/tests/bench/throughput.py @@ -8,6 +8,7 @@ import tempfile import time import mitogen +import mitogen.core import mitogen.service import ansible_mitogen.affinity @@ -35,9 +36,9 @@ def run_test(router, fp, s, context): size = fp.tell() print('Testing %s...' % (s,)) context.call(prepare) - t0 = time.time() + t0 = mitogen.core.now() context.call(transfer, router.myself(), fp.name) - t1 = time.time() + t1 = mitogen.core.now() print('%s took %.2f ms to transfer %.2f MiB, %.2f MiB/s' % ( s, 1000 * (t1 - t0), size / 1048576.0, (size / (t1 - t0) / 1048576.0), diff --git a/tests/create_child_test.py b/tests/create_child_test.py index 1c2f526a..26f10d57 100644 --- a/tests/create_child_test.py +++ b/tests/create_child_test.py @@ -9,6 +9,7 @@ import tempfile import mock import unittest2 +import mitogen.core import mitogen.parent from mitogen.core import b @@ -188,7 +189,6 @@ class TtyCreateChildTest(testlib.TestCase): proc = self.func([ 'bash', '-c', 'exec 2>%s; echo hi > /dev/tty' % (tf.name,) ]) - deadline = time.time() + 5.0 mitogen.core.set_block(proc.stdin.fileno()) # read(3) below due to https://bugs.python.org/issue37696 self.assertEquals(mitogen.core.b('hi\n'), proc.stdin.read(3)) @@ -271,7 +271,6 @@ class TtyCreateChildTest(testlib.TestCase): proc = self.func([ 'bash', '-c', 'exec 2>%s; echo hi > /dev/tty' % (tf.name,) ]) - deadline = time.time() + 5.0 self.assertEquals(mitogen.core.b('hi\n'), wait_read(proc.stdout, 3)) waited_pid, status = os.waitpid(proc.pid, 0) self.assertEquals(proc.pid, waited_pid) diff --git a/tests/parent_test.py b/tests/parent_test.py index b314d472..d6efe998 100644 --- a/tests/parent_test.py +++ b/tests/parent_test.py @@ -12,6 +12,7 @@ import unittest2 import testlib from testlib import Popen__terminate +import mitogen.core import mitogen.parent try: @@ -21,8 +22,8 @@ except NameError: def wait_for_child(pid, timeout=1.0): - deadline = time.time() + timeout - while timeout < time.time(): + deadline = mitogen.core.now() + timeout + while timeout < mitogen.core.now(): try: target_pid, status = os.waitpid(pid, os.WNOHANG) if target_pid == pid: diff --git a/tests/poller_test.py b/tests/poller_test.py index b05a9b94..3ed59ae3 100644 --- a/tests/poller_test.py +++ b/tests/poller_test.py @@ -164,14 +164,14 @@ class CloseMixin(PollerMixin): class PollMixin(PollerMixin): def test_empty_zero_timeout(self): - t0 = time.time() + t0 = mitogen.core.now() self.assertEquals([], list(self.p.poll(0))) - self.assertTrue((time.time() - t0) < .1) # vaguely reasonable + self.assertTrue((mitogen.core.now() - t0) < .1) # vaguely reasonable def test_empty_small_timeout(self): - t0 = time.time() + t0 = mitogen.core.now() self.assertEquals([], list(self.p.poll(.2))) - self.assertTrue((time.time() - t0) >= .2) + self.assertTrue((mitogen.core.now() - t0) >= .2) class ReadableMixin(PollerMixin, SockMixin): diff --git a/tests/testlib.py b/tests/testlib.py index 255fba88..73d3438d 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -107,11 +107,11 @@ def wait_for_port( If a regex pattern is supplied try to find it in the initial data. Return None on success, or raise on error. """ - start = time.time() + start = mitogen.core.now() end = start + overall_timeout addr = (host, port) - while time.time() < end: + while mitogen.core.now() < end: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(connect_timeout) try: @@ -130,7 +130,7 @@ def wait_for_port( sock.settimeout(receive_timeout) data = mitogen.core.b('') found = False - while time.time() < end: + while mitogen.core.now() < end: try: resp = sock.recv(1024) except socket.timeout: diff --git a/tests/timer_test.py b/tests/timer_test.py index ff3e022f..749405a4 100644 --- a/tests/timer_test.py +++ b/tests/timer_test.py @@ -162,7 +162,7 @@ def do_timer_test_econtext(econtext): def do_timer_test(broker): - now = time.time() + now = mitogen.core.now() latch = mitogen.core.Latch() broker.defer(lambda: broker.timers.schedule( @@ -172,7 +172,7 @@ def do_timer_test(broker): ) assert 'hi' == latch.get() - assert time.time() > (now + 0.250) + assert mitogen.core.now() > (now + 0.250) class BrokerTimerTest(testlib.TestCase): diff --git a/tests/unix_test.py b/tests/unix_test.py index ba1ba152..cf3e595f 100644 --- a/tests/unix_test.py +++ b/tests/unix_test.py @@ -86,12 +86,12 @@ class ClientTest(testlib.TestCase): def _try_connect(self, path): # give server a chance to setup listener - timeout = time.time() + 30.0 + timeout = mitogen.core.now() + 30.0 while True: try: return mitogen.unix.connect(path) except mitogen.unix.ConnectError: - if time.time() > timeout: + if mitogen.core.now() > timeout: raise time.sleep(0.1) From 4fa760cd21f5bd0d572a3e90f737ad1f20f0f906 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 20:30:24 +0100 Subject: [PATCH 321/383] issue #613: add tests for all the weird shutdown methods --- docs/internals.rst | 5 +++ mitogen/parent.py | 3 +- tests/router_test.py | 105 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 112 insertions(+), 1 deletion(-) diff --git a/docs/internals.rst b/docs/internals.rst index d062b6d9..40ea33df 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -321,6 +321,11 @@ These signals are used internally by Mitogen. - ``disconnect`` - Fired on the Broker thread during shutdown (???) + * - :py:class:`mitogen.parent.Process` + - ``exit`` + - Fired when :class:`mitogen.parent.Reaper` detects subprocess has fully + exitted. + * - :py:class:`mitogen.core.Broker` - ``shutdown`` - Fired after Broker.shutdown() is called. diff --git a/mitogen/parent.py b/mitogen/parent.py index 7a94c2b0..983df829 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2331,7 +2331,7 @@ class Router(mitogen.core.Router): directly connected. """ stream = self.stream_by_id(context) - if stream.protocol.remote_id != context.context_id: + if stream is None or stream.protocol.remote_id != context.context_id: return l = mitogen.core.Latch() @@ -2589,6 +2589,7 @@ class Reaper(object): status = self.proc.poll() if status is not None: LOG.debug('%r: %s', self.proc, returncode_to_str(status)) + mitogen.core.fire(self.proc, 'exit') self._remove_timer() return diff --git a/tests/router_test.py b/tests/router_test.py index dba56c9b..a9ad5ae1 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -1,3 +1,5 @@ +import errno +import os import sys import time import zlib @@ -425,5 +427,108 @@ class EgressIdsTest(testlib.RouterMixin, testlib.TestCase): ])) +class ShutdownTest(testlib.RouterMixin, testlib.TestCase): + # 613: tests for all the weird shutdown() variants we ended up with. + + def test_shutdown_wait_false(self): + l1 = self.router.local() + pid = l1.call(os.getpid) + + conn = self.router.stream_by_id(l1.context_id).conn + exitted = mitogen.core.Latch() + mitogen.core.listen(conn.proc, 'exit', exitted.put) + + l1.shutdown(wait=False) + exitted.get() + + e = self.assertRaises(OSError, + lambda: os.waitpid(pid, 0)) + self.assertEquals(e.args[0], errno.ECHILD) + + e = self.assertRaises(mitogen.core.ChannelError, + lambda: l1.call(os.getpid)) + self.assertEquals(e.args[0], mitogen.core.Router.no_route_msg % ( + l1.context_id, + mitogen.context_id, + )) + + def test_shutdown_wait_true(self): + l1 = self.router.local() + pid = l1.call(os.getpid) + + conn = self.router.stream_by_id(l1.context_id).conn + exitted = mitogen.core.Latch() + mitogen.core.listen(conn.proc, 'exit', exitted.put) + + l1.shutdown(wait=True) + exitted.get() + + e = self.assertRaises(OSError, + lambda: os.waitpid(pid, 0)) + self.assertEquals(e.args[0], errno.ECHILD) + + e = self.assertRaises(mitogen.core.ChannelError, + lambda: l1.call(os.getpid)) + self.assertEquals(e.args[0], mitogen.core.Router.no_route_msg % ( + l1.context_id, + mitogen.context_id, + )) + + def test_disconnect_invalid_context(self): + self.router.disconnect( + mitogen.core.Context(self.router, 1234) + ) + + def test_disconnect_valid_context(self): + l1 = self.router.local() + pid = l1.call(os.getpid) + + strm = self.router.stream_by_id(l1.context_id) + + exitted = mitogen.core.Latch() + mitogen.core.listen(strm.conn.proc, 'exit', exitted.put) + self.router.disconnect_stream(strm) + exitted.get() + + e = self.assertRaises(OSError, + lambda: os.waitpid(pid, 0)) + self.assertEquals(e.args[0], errno.ECHILD) + + e = self.assertRaises(mitogen.core.ChannelError, + lambda: l1.call(os.getpid)) + self.assertEquals(e.args[0], mitogen.core.Router.no_route_msg % ( + l1.context_id, + mitogen.context_id, + )) + + def test_disconnet_all(self): + l1 = self.router.local() + l2 = self.router.local() + + pids = [l1.call(os.getpid), l2.call(os.getpid)] + + exitted = mitogen.core.Latch() + for ctx in l1, l2: + strm = self.router.stream_by_id(ctx.context_id) + mitogen.core.listen(strm.conn.proc, 'exit', exitted.put) + + self.router.disconnect_all() + exitted.get() + exitted.get() + + for pid in pids: + e = self.assertRaises(OSError, + lambda: os.waitpid(pid, 0)) + self.assertEquals(e.args[0], errno.ECHILD) + + for ctx in l1, l2: + e = self.assertRaises(mitogen.core.ChannelError, + lambda: ctx.call(os.getpid)) + self.assertEquals(e.args[0], mitogen.core.Router.no_route_msg % ( + ctx.context_id, + mitogen.context_id, + )) + + if __name__ == '__main__': unittest2.main() From f78a5f08c6973496deded69de2edc0e12f9c97ba Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 23:40:36 +0000 Subject: [PATCH 322/383] issue #605: ansible: share a sem_t instead of a pthread_mutex_t The previous version quite reliably causes worker deadlocks within 10 minutes running: # 100 times: - import_playbook: integration/async/runner_one_job.yml # 100 times: - import_playbook: integration/module_utils/adjacent_to_playbook.yml via .ci/soak/mitogen.sh with PLAYBOOK= set to the above playbook. Attaching to the worker with gdb reveals it in an instruction immediately following a futex() call, which likely returned EINTR due to attaching gdb. Examining the pthread_mutex_t state reveals it to be completely unlocked. pthread_mutex_t on Linux should have zero trouble living in shmem, so it's not clear how this deadlock is happening. Meanwhile POSIX semaphores are explicitly designed for cross-process use and have a completely different internal implementation, so try those instead. 1 hour of soaking reveals no deadlock. This is about avoiding managing a lockable temporary file on disk to contain our counter, and somehow communicating a reference to it into subprocesses (despite the subprocess module closing inherited fds, etc), somehow deleting it reliably at exit, and somehow avoiding concurrent Ansible runs stepping on the same file. For now ctypes is still less pain. A final possibility would be to abandon a shared counter and instead pick a CPU based on the hash of e.g. the new child's process ID. That would likely balance equally well, and might be worth exploring when making this code work on BSD. --- ansible_mitogen/affinity.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/ansible_mitogen/affinity.py b/ansible_mitogen/affinity.py index 9eb6597a..67e16d8a 100644 --- a/ansible_mitogen/affinity.py +++ b/ansible_mitogen/affinity.py @@ -92,37 +92,37 @@ try: _libc = ctypes.CDLL(None, use_errno=True) _strerror = _libc.strerror _strerror.restype = ctypes.c_char_p - _pthread_mutex_init = _libc.pthread_mutex_init - _pthread_mutex_lock = _libc.pthread_mutex_lock - _pthread_mutex_unlock = _libc.pthread_mutex_unlock + _sem_init = _libc.sem_init + _sem_wait = _libc.sem_wait + _sem_post = _libc.sem_post _sched_setaffinity = _libc.sched_setaffinity except (OSError, AttributeError): _libc = None _strerror = None - _pthread_mutex_init = None - _pthread_mutex_lock = None - _pthread_mutex_unlock = None + _sem_init = None + _sem_wait = None + _sem_post = None _sched_setaffinity = None -class pthread_mutex_t(ctypes.Structure): +class sem_t(ctypes.Structure): """ - Wrap pthread_mutex_t to allow storing a lock in shared memory. + Wrap sem_t to allow storing a lock in shared memory. """ _fields_ = [ - ('data', ctypes.c_uint8 * 512), + ('data', ctypes.c_uint8 * 128), ] def init(self): - if _pthread_mutex_init(self.data, 0): + if _sem_init(self.data, 1, 1): raise Exception(_strerror(ctypes.get_errno())) def acquire(self): - if _pthread_mutex_lock(self.data): + if _sem_wait(self.data): raise Exception(_strerror(ctypes.get_errno())) def release(self): - if _pthread_mutex_unlock(self.data): + if _sem_post(self.data): raise Exception(_strerror(ctypes.get_errno())) @@ -133,7 +133,7 @@ class State(ctypes.Structure): the context of the new child process. """ _fields_ = [ - ('lock', pthread_mutex_t), + ('lock', sem_t), ('counter', ctypes.c_uint8), ] From 240dc84d9455e18932e164e77345f275ba06d2dd Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 10 Aug 2019 23:57:35 +0000 Subject: [PATCH 323/383] issue #605: update Changelog. --- docs/changelog.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index eb889daa..63266113 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -114,6 +114,9 @@ Mitogen for Ansible is more accurately inferred for `meta: reset_connection`, the `synchronize` module, and for any action plug-ins that establish additional connections. +* `#605 `_: fix a deadlock managing a + shared counter used for load balancing. + * `#615 `_: streaming file transfer is implemented for ``fetch`` and other actions that transfer files from the target to the controller. Previously the file was sent in one message, From c89f6cbab637c60abe07445a7a9f821807b8778c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 11 Aug 2019 12:43:43 +0100 Subject: [PATCH 324/383] issue #598: update Changelog. --- docs/changelog.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 63266113..2f7e35e2 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -75,11 +75,15 @@ Mitogen for Ansible * `#467 `_: an incompatibility running Mitogen under Molecule was resolved. -* `#547 `_: fix a serious deadlock +* `#547 `_, + `#598 `_: fix a serious deadlock possible during initialization of any task executed by forking, such as ``async`` tasks, tasks using custom :mod:`ansible.module_utils`, ``mitogen_task_isolation: fork`` modules, and those present on an internal - blacklist of misbehaving modules. + blacklist of misbehaving modules. This deadlock is relatively easy hit, has + been present since 0.2.0, and is likely to have impacted many users. For new + connections it could manifest as a *Connection timed out* error, for forked + tasks it could manifest as a timeout or an apparent hang. * `#549 `_: the open file descriptor limit for the Ansible process is increased to the available hard limit. It is @@ -114,6 +118,7 @@ Mitogen for Ansible is more accurately inferred for `meta: reset_connection`, the `synchronize` module, and for any action plug-ins that establish additional connections. +* `#598 `_, * `#605 `_: fix a deadlock managing a shared counter used for load balancing. @@ -224,6 +229,7 @@ bug reports, testing, features and fixes in this release contributed by `Stefane Fermigier `_, `Szabó Dániel Ernő `_, `Ulrich Schreiner `_, +`yen `_, `Yuki Nishida `_, `@alexhexabeam `_, `@DavidVentura `_, From 9b45872246434e79e92d02cf53c9df00900ee156 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 11 Aug 2019 12:43:52 +0100 Subject: [PATCH 325/383] issue #598: allow disabling preempt in terraform --- tests/ansible/gcloud/mitogen-load-testing.tf | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ansible/gcloud/mitogen-load-testing.tf b/tests/ansible/gcloud/mitogen-load-testing.tf index 076722ce..9bab03d4 100644 --- a/tests/ansible/gcloud/mitogen-load-testing.tf +++ b/tests/ansible/gcloud/mitogen-load-testing.tf @@ -2,6 +2,10 @@ variable "node-count" { default = 0 } +variable "preemptible" { + default = true +} + variable "big" { default = false } @@ -93,7 +97,7 @@ resource "google_compute_instance_template" "node" { machine_type = "custom-1-1024" scheduling { - preemptible = true + preemptible = "${var.preemptible}" automatic_restart = false } From 9b9fe57ea87863d9abc497852e8f97e857708c25 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 11 Aug 2019 10:14:47 +0100 Subject: [PATCH 326/383] docs: make Sphinx install soft fail on Python 2. --- docs/requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 85f30a2e..3c4674fd 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,3 @@ -Sphinx==2.1.2 -sphinxcontrib-programoutput==0.14 -alabaster==0.7.10 +Sphinx==2.1.2; python_version > '3.0' +sphinxcontrib-programoutput==0.14; python_version > '3.0' +alabaster==0.7.10; python_version > '3.0' From 9cb187c2c4647dd678eccb1a6c170e1c1e721df7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 11 Aug 2019 12:46:33 +0100 Subject: [PATCH 327/383] formatting error --- docs/changelog.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 2f7e35e2..39d6a46c 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -119,7 +119,7 @@ Mitogen for Ansible module, and for any action plug-ins that establish additional connections. * `#598 `_, -* `#605 `_: fix a deadlock managing a + `#605 `_: fix a deadlock managing a shared counter used for load balancing. * `#615 `_: streaming file transfer From 1d41adb346262f19618dbb8604320b7fe1a8d302 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 11 Aug 2019 15:33:42 +0100 Subject: [PATCH 328/383] docs: tweaks --- docs/ansible_detailed.rst | 3 +-- docs/changelog.rst | 16 +++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index 4e8dcd27..c3bbad59 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -82,8 +82,7 @@ Installation
    - Releases occur frequently and often include important fixes. Subscribe - to the mitogen-announce list to stay updated. + Get notified of new releases and important fixes.

    diff --git a/docs/changelog.rst b/docs/changelog.rst index 39d6a46c..37cab2e3 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -72,18 +72,20 @@ Mitogen for Ansible detected, to work around a broken heuristic in popular SELinux policies that prevents inheriting ``AF_UNIX`` sockets across privilege domains. -* `#467 `_: an incompatibility +* `#467 `_: an incompatibility running Mitogen under Molecule was resolved. * `#547 `_, `#598 `_: fix a serious deadlock - possible during initialization of any task executed by forking, such as - ``async`` tasks, tasks using custom :mod:`ansible.module_utils`, + possible while initializing the service pool of any child, such as during + connection, ``async`` tasks, tasks using custom :mod:`module_utils`, ``mitogen_task_isolation: fork`` modules, and those present on an internal - blacklist of misbehaving modules. This deadlock is relatively easy hit, has - been present since 0.2.0, and is likely to have impacted many users. For new - connections it could manifest as a *Connection timed out* error, for forked - tasks it could manifest as a timeout or an apparent hang. + blacklist of misbehaving modules. + + This deadlock is relatively easy hit, has been present since 0.2.0, and is + likely to have impacted many users. For new connections it could manifest as + a *Connection timed out* error, for forked tasks it could manifest as a + timeout or an apparent hang. * `#549 `_: the open file descriptor limit for the Ansible process is increased to the available hard limit. It is From e12f391106bb256741b3315ca626fc15631b9587 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 11 Aug 2019 16:03:52 +0100 Subject: [PATCH 329/383] docs: mention another __main__ safeguard --- docs/howitworks.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/howitworks.rst b/docs/howitworks.rst index e5fdee2f..20c4f948 100644 --- a/docs/howitworks.rst +++ b/docs/howitworks.rst @@ -673,8 +673,12 @@ code occurring after the first conditional that looks like a standard if __name__ == '__main__': run_some_code() -This is a hack, but it's the least annoying hack I've found for the problem -yet. +To further avoid accidental execution, Mitogen will refuse to serve +:mod:`__main__` to children if no execution guard is found, as it is common +that no guard is present during early script prototyping. + +These are hacks, but they are the safest and least annoying found to solve the +problem. Avoiding Negative Imports From 4b9b1ca24d6b92ec3fd9f03a5193a9e4462b5791 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 11 Aug 2019 17:10:51 +0100 Subject: [PATCH 330/383] tests: work around AnsibleModule.run_command() race. See https://github.com/ansible/ansible/issues/51393 --- tests/ansible/integration/async/runner_one_job.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/ansible/integration/async/runner_one_job.yml b/tests/ansible/integration/async/runner_one_job.yml index a576d4ce..871d672f 100644 --- a/tests/ansible/integration/async/runner_one_job.yml +++ b/tests/ansible/integration/async/runner_one_job.yml @@ -9,9 +9,10 @@ # Verify output of a single async job. - name: start 2 second op + # Sleep after writing; see https://github.com/ansible/ansible/issues/51393 shell: | + echo alldone; sleep 1; - echo alldone async: 1000 poll: 0 register: job1 @@ -40,9 +41,9 @@ # ansible/b72e989e1837ccad8dcdc926c43ccbc4d8cdfe44 - | (ansible_version.full >= '2.8' and - result1.cmd == "sleep 1;\necho alldone\n") or + result1.cmd == "echo alldone;\nsleep 1;\n") or (ansible_version.full < '2.8' and - result1.cmd == "sleep 1;\n echo alldone") + result1.cmd == "echo alldone;\n sleep 1;") - result1.delta|length == 14 - result1.start|length == 26 - result1.finished == 1 From 3b63da670fe70d4272fcc2963d927a50322749b7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 12 Aug 2019 11:12:09 +0100 Subject: [PATCH 331/383] Fix up another handful of LGTM errors. --- ansible_mitogen/loaders.py | 9 +++++++++ ansible_mitogen/process.py | 1 - ansible_mitogen/runner.py | 1 - examples/mitogen-fuse.py | 2 +- mitogen/master.py | 1 - mitogen/parent.py | 2 -- mitogen/service.py | 1 - 7 files changed, 10 insertions(+), 7 deletions(-) diff --git a/ansible_mitogen/loaders.py b/ansible_mitogen/loaders.py index ff06c0c5..99294c1f 100644 --- a/ansible_mitogen/loaders.py +++ b/ansible_mitogen/loaders.py @@ -32,6 +32,15 @@ Stable names for PluginLoader instances across Ansible versions. from __future__ import absolute_import +__all__ = [ + 'action_loader', + 'connection_loader', + 'module_loader', + 'module_utils_loader', + 'shell_loader', + 'strategy_loader', +] + try: from ansible.plugins.loader import action_loader from ansible.plugins.loader import connection_loader diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 93b72f3f..503e9bb7 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -32,7 +32,6 @@ import logging import multiprocessing import os import resource -import signal import socket import sys diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py index ce5d335e..5cf171b6 100644 --- a/ansible_mitogen/runner.py +++ b/ansible_mitogen/runner.py @@ -37,7 +37,6 @@ how to build arguments for it, preseed related data, etc. """ import atexit -import codecs import imp import os import re diff --git a/examples/mitogen-fuse.py b/examples/mitogen-fuse.py index c1b17032..55b272d9 100644 --- a/examples/mitogen-fuse.py +++ b/examples/mitogen-fuse.py @@ -245,7 +245,7 @@ def main(router): if sys.platform == 'darwin': kwargs['volname'] = '%s (Mitogen)' % (sys.argv[1],) - f = fuse.FUSE( + fuse.FUSE( operations=Operations(sys.argv[1]), mountpoint=sys.argv[2], foreground=True, diff --git a/mitogen/master.py b/mitogen/master.py index b4eb6643..11ef2b00 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -47,7 +47,6 @@ import re import string import sys import threading -import time import types import zlib diff --git a/mitogen/parent.py b/mitogen/parent.py index 983df829..bc4bfc0d 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -41,7 +41,6 @@ import getpass import heapq import inspect import logging -import logging import os import re import signal @@ -52,7 +51,6 @@ import sys import termios import textwrap import threading -import time import zlib # Absolute imports for <2.5. diff --git a/mitogen/service.py b/mitogen/service.py index 8882b30b..168f0140 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -37,7 +37,6 @@ import pwd import stat import sys import threading -import time import mitogen.core import mitogen.select From 6af337c3d3adb665ec05388a2cce688c5e7fb46c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 12 Aug 2019 11:12:21 +0100 Subject: [PATCH 332/383] Import LGTM config to disable some stuff - ignore mitogen/compat/** - switch off unreachable code check - switch off try/finally vs. with - switch off mixed import/import-from --- lgtm.yml | 416 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 416 insertions(+) create mode 100644 lgtm.yml diff --git a/lgtm.yml b/lgtm.yml new file mode 100644 index 00000000..09349570 --- /dev/null +++ b/lgtm.yml @@ -0,0 +1,416 @@ +# ########################################################################################## +# # Customize file classifications. # +# # Results from files under any classifier will be excluded from LGTM # +# # statistics. # +# ########################################################################################## +# +# ########################################################################################## +# # Use the `path_classifiers` block to define changes to the default classification of # +# # files. # +# ########################################################################################## +# +path_classifiers: +# docs: +# # Identify the top-level file called `generate_javadoc.py` as documentation-related. +# - generate_javadoc.py +# test: +# # Override LGTM’s default classification of test files by excluding all files. +# - exclude: / +# # Classify all files in the top-level directories test/ and testsuites/ as test code. +# - test +# - testsuites +# # Classify all files with suffix `.test` as test code. +# # Note: use only forward slash / as a path separator. +# # Use ** to indicate an arbitrary parent path. +# # Use * to indicate any sequence of characters excluding /. +# # Always enclose the expression in double quotes if it includes *. +# - "**/*.test" +# # Refine the classifications above by excluding files in test/util/. +# - exclude: test/util +# # The default behavior is to tag all files created during the +# # build as `generated`. Results are hidden for generated code. You can tag +# # further files as being generated by adding them to the `generated` section. +# generated: +# # Exclude all `*.c` files under the `ui/` directory from classification as +# # generated code. +# # By default, all files not checked into the repository are considered to be +# # 'generated'. +# - exclude: ui/**.c +# # The default behavior is to tag library code as `library`. Results are hidden +# # for library code. You can tag further files as being library code by adding them +# # to the `library` section. + library: + - "mitogen/compat" +# # The default behavior is to tag template files as `template`. Results are hidden +# # for template files. You can tag further files as being template files by adding +# # them to the `template` section. +# template: +# # Define your own category, for example: 'some_custom_category'. +# some_custom_category: +# # Classify all files in the top-level directory tools/ (or the top-level file +# # called tools). +# - tools +# +# ######################################################################################### +# # Use the `queries` block to change the default display of query results. # +# ######################################################################################### +# +queries: + # Mitogen 2.4 compatibility trips this query everywhere, so just disable it + - exclude: py/unreachable-statement + - exclude: py/should-use-with + # mitogen.core.b() trips this query everywhere, so just disable it + - exclude: py/import-and-import-from +# # Specifically hide the results of two queries. +# - exclude: cpp/use-of-goto +# - exclude: java/equals-on-unrelated-types +# # Hide the results of all queries. +# - exclude: "*" +# # Refine by including the `java/command-line-injection` query. +# - include: java/command-line-injection +# # Include all queries tagged ‘security’ and ‘correctness', and with a severity of +# # ‘error’. +# - include: +# tags: +# - "security" +# - "correctness" +# severity: "error" +# +# ######################################################################################### +# # Define changes to the default code extraction process. # +# # Each block configures the extraction of a single language, and modifies actions in a # +# # named step. Every named step includes automatic default actions, # +# # except for the ‘prepare’ step. The steps are performed in the following sequence: # +# # prepare # +# # after_prepare # +# # configure (C/C++ only) # +# # python_setup (Python only) # +# # before_index # +# # index # +########################################################################################### +# +# ######################################################################################### +# # Environment variables available to the steps: # +# ######################################################################################### +# +# # LGTM_SRC +# # The root of the source tree. +# # LGTM_WORKSPACE +# # An existing (initially empty) folder outside the source tree. +# # Used for temporary download and setup commands. +# +# ######################################################################################### +# # Use the extraction block to define changes to the default code extraction process # +# # for one or more languages. The settings for each language are defined in a child # +# # block, with one or more steps. # +# ######################################################################################### +# +# extraction: +# # Define settings for C/C++ analysis +# ##################################### +# cpp: +# # The `prepare` step exists for customization on LGTM.com only. +# prepare: +# # The `packages` section is valid for LGTM.com only. It names Ubuntu packages to +# # be installed. +# packages: +# - libxml2-dev +# - libxslt1-dev +# # Add an `after-prepare` step if you need to run commands after the prepare step. +# # Each command should be listed on a separate line. +# # This step is useful for C/C++ analysis where you want to prepare the environment +# # for the `configure` step without changing the default behavior for that step. +# after_prepare: +# - mkdir -p $LGTM_WORKSPACE/latest-gcc-symlinks +# - ln -s /usr/bin/g++-6 $LGTM_WORKSPACE/latest-gcc-symlinks/g++ +# - ln -s /usr/bin/gcc-6 $LGTM_WORKSPACE/latest-gcc-symlinks/gcc +# - export PATH=$LGTM_WORKSPACE/latest-gcc-symlinks:$PATH +# - export GNU_MAKE=make +# - export GIT=true +# # The `configure` step generates build configuration files which the `index` step +# # then uses to build the codebase. +# configure: +# command: +# - ./prepare_deps +# # Optional step. You should add a `before_index` step if you need to run commands +# # before the `index` step. +# before_index: +# - export BOOST_DIR=$LGTM_SRC/boost +# - export GTEST_DIR=$LGTM_SRC/googletest +# - export HUNSPELL_DIR=$LGTM_SRC/hunspell +# - export CRYPTOPP_DIR=$LGTM_SRC/cryptopp +# # The `index` step builds the code and extracts information during the build +# # process. +# index: +# # Override the autobuild process by specifying a list of custom build commands +# # to use instead. +# build_command: +# - $GNU_MAKE -j2 -s +# # Specify that all project or solution files should be used for extraction. +# # Default: false. +# all_solutions: true +# # Specify a list of one or more project or solution files for extraction. +# # Default: LGTM chooses the file closest to the root of the repository (this may +# # fail if there are multiple candidates). +# solution: +# - myProject.sln +# # Specify MSBuild settings +# msbuild: +# # Specify a list of additional arguments to MSBuild. Default: empty. +# arguments: /p:Platform=x64 /p:Configuration=Release +# # Specify the MSBuild configuration to use, for example, debug or release. +# # Default: read from the solution file or files. +# configuration: +# # Specify the platform to target, for example: x86, x64, or Any CPU. +# # Default: read from the solution file or files. +# platform: +# # Specify the MSBuild target. Default: rebuild. +# target: +# # Specify whether or not to perform a NuGet restore for extraction. Default: true. +# nuget_restore: false +# # Specify a version of Microsoft Visual Studio to use for MSBuild or any custom +# # build commands (build_command). For example: +# # 10 for Visual Studio 2010 +# # 12 for Visual Studio 2012 +# # 14 for Visual Studio 2015 +# # 15 for Visual Studio 2017 +# # Default: read from project files. +# vstools_version: 10 +# +# # Define settings for C# analysis +# ################################## +# csharp: +# # The `prepare` step exists for customization on LGTM.com only. +# prepare: +# # Add an `after-prepare` step if you need to run commands after the `prepare` step. +# # Each command should be listed on a separate line. +# after_prepare: +# - export PATH=$LGTM_WORKSPACE/tools:$PATH +# # The `index` step builds the code and extracts information during the build +# # process. +# index: +# # Specify that all project or solution files should be used for extraction. +# # Default: false. +# all_solutions: true +# # Specify a list of one or more project or solution files for extraction. +# # Default: LGTM chooses the file closest to the root of the repository (this may +# # fail if there are multiple candidates). +# solution: +# - myProject.sln +# # Override the autobuild process by specifying a list of custom build commands +# # to use instead. +# build_command: +# # By default, LGTM analyzes the code by building it. You can override this, +# # and tell LGTM not to build the code. Beware that this can lead +# # to less accurate results. +# buildless: true +# # Specify .NET Core settings. +# dotnet: +# # Specify additional arguments to `dotnet build`. +# # Default: empty. +# arguments: +# # Specify the version of .NET Core SDK to use. +# # Default: The version installed on the build machine. +# version: 2.1 +# # Specify MSBuild settings. +# msbuild: +# # Specify a list of additional arguments to MSBuild. Default: empty. +# arguments: /P:WarningLevel=2 +# # Specify the MSBuild configuration to use, for example, debug or release. +# # Default: read from the solution file or files. +# configuration: release +# # Specify the platform to target, for example: x86, x64, or Any CPU. +# # Default: read from the solution file or files. +# platform: x86 +# # Specify the MSBuild target. Default: rebuild. +# target: notest +# # Specify whether or not to perform a NuGet restore for extraction. Default: true. +# nuget_restore: false +# # Specify a version of Microsoft Visual Studio to use for MSBuild or any custom +# # build commands (build_command). For example: +# # 10 for Visual Studio 2010 +# # 12 for Visual Studio 2012 +# # 14 for Visual Studio 2015 +# # 15 for Visual Studio 2017 +# # Default: read from project files +# vstools_version: 10 +# # Specify additional options for the extractor, +# # for example --fast to perform a faster extraction that produces a smaller +# # database. +# extractor: +# +# # Define settings for COBOL analysis +# ##################################### +# cobol: +# # The `prepare` step exists for customization on LGTM.com only. +# prepare: +# # Add an `after-prepare` step if you need to run commands after the prepare step. +# # Each command should be listed on a separate line. +# after_prepare: +# - export PATH=$LGTM_WORKSPACE/tools:$PATH +# index: +# # Specify the source file format: fixed, free, or variable. +# # Default: fixed. +# format: "variable" +# # Specify the number of spaces in a tab. Default: 4. +# tab_length: 2 +# # Specify whether preprocessing is applied before extraction or not. +# # Default: true (preprocessing on). +# preprocessing: false +# # Specify file extensions that LGTM will identify as belonging to source files. +# # Default: files with a .CBL, .cbl, .COB or .cob extension are considered +# # source files. +# source_globs: +# - "**.cbl" +# - "**.CBL" +# - "**.cob" +# - "**.cobol" +# - "**.COB" +# # Specify file extensions that LGTM will identify as belonging to library files. +# # Default: files with a .CPY, .cpy, .COPY or .copy extension. +# library_globs: +# - "**.CPY" +# - "**.COPY" +# +# # Define settings for Java analysis +# #################################### +# java: +# # The `prepare` step exists for customization on LGTM.com only. +# prepare: +# # Add an `after-prepare` step if you need to run commands after the prepare step. +# # Each command should be listed on a separate line. +# after_prepare: +# - export PATH=$LGTM_WORKSPACE/tools:$PATH +# # The `index` step extracts information from the files in the codebase. +# index: +# # Specify Gradle settings. +# gradle: +# # Specify the required Gradle version. +# # Default: determined automatically. +# version: 4.4 +# # Override the autobuild process by specifying a list of custom build commands +# # to use instead. +# build_command: ./compile-all.sh +# # Specify the Java version required to build the project. +# java_version: 11 +# # Specify Maven settings. +# maven: +# # Specify the path (absolute or relative) of a Maven settings file to use. +# # Default: Maven uses a settings file in the default location, if it exists. +# settings_file: /opt/share/settings.xml +# # Specify the path of a Maven toolchains file. +# # Default: Maven uses a toolchains file in the default location, if it exists. +# toolchains_file: /opt/share/toolchains.xml +# # Specify the required Maven version. +# # Default: the Maven version is determined automatically, where feasible. +# version: 3.5.2 +# # Specify how XML files should be extracted: +# # ALL = extract all XML files. +# # DEFAULT = only extract XML files named `AndroidManifest.xml`. +# # DISABLED = do not extract any XML files. +# xml_mode: ALL +# +# # Define settings for JavaScript analysis +# ########################################## +# javascript: +# # The `prepare` step exists for customization on LGTM.com only. +# prepare: +# # Add an `after-prepare` step if you need to run commands after the prepare step. +# # Each command should be listed on a separate line. +# after_prepare: +# - export PATH=$LGTM_WORKSPACE/tools:$PATH +# # The `index` step extracts information from the files in the codebase. +# index: +# # Specify a list of files and folders to extract. +# # Default: The project root directory. +# include: +# - src/js +# # Specify a list of files and folders to exclude from extraction. +# exclude: +# - thirdparty/lib +# - node_modules +# # You can add additional file types for LGTM to extract, by mapping file +# # extensions (including the leading dot) to file types. The usual +# # include/exclude patterns apply, so, for example, `.jsm` files under +# # `thirdparty/lib` and `node_modules` will not be extracted. +# filetypes: +# ".jsm": "js" +# ".tmpl": "html" +# # Specify a list of glob patterns to include/exclude files from extraction; this +# # is applied on top of the include/exclude paths from above; patterns are +# # processed in the same way as for path classifiers above. +# # Default: include all files with known extensions (such as .js, .ts and .html), +# # but exclude files ending in `-min.js` or `.min.js`. +# filters: +# # exclude any node_modules folders anywhere. +# - exclude: "**/node_modules/*" +# # exclude any *.ts files anywhere. +# - exclude: "**/*.ts" +# # but include *.ts files under src/js/typescript. +# - include: "src/js/typescript/**/*.ts" +# # Specify the type of project: module or script. +# # Default: type is automatically detected. +# source_type: module +# # Specify how TypeScript files should be extracted: +# # none = exclude all TypeScript files. +# # basic = extract syntactic information from TypeScript files. +# # full = extract syntactic and type information from TypeScript files. +# # Default: full. +# typescript: basic +# # By default, LGTM doesn't extract any XML files. You can override this by +# # using the `xml_mode` property and setting it to `all`. +# xml_mode: all +# +# # Define settings for Python analysis +# ###################################### +# python: +# # The `prepare` step exists for customization on LGTM.com only. +# prepare: +# # The `packages` section is valid for LGTM.com only. It names packages to +# # be installed. +# packages: libpng-dev +# # This step is useful for Python analysis where you want to prepare the +# # environment for the `python_setup` step without changing the default behavior +# # for that step. +# after_prepare: +# - export PATH=$LGTM_WORKSPACE/tools:$PATH +# # This sets up the Python interpreter and virtual environment, ready for the +# # `index` step to extract the codebase. +# python_setup: +# # Specify packages that should NOT be installed despite being mentioned in the +# # requirements.txt file. +# # Default: no package marked for exclusion. +# exclude_requirements: +# - pywin32 +# # Specify a list of pip packages to install. +# # If any of these packages cannot be installed, the extraction will fail. +# requirements: +# - Pillow +# # Specify a list of requirements text files to use to set up the environment, +# # or false for none. Default: any requirements.txt, test-requirements.txt, +# # and similarly named files identified in the codebase are used. +# requirements_files: +# - required-packages.txt +# # Specify a setup.py file to use to set up the environment, or false for none. +# # Default: any setup.py files identified in the codebase are used in preference +# # to any requirements text files. +# setup_py: new-setup.py +# # Override the version of the Python interpreter used for setup and extraction +# # Default: Python 3 if no version is explicitly specified, and if there are no +# # commits to the repository before January 1, 2017. Otherwise Python 2. +# version: 3 +# # Optional step. You should add a `before_index` step if you need to run commands +# # before the `index` step. +# before_index: +# - antlr4 -Dlanguage=Python3 Grammar.g4 +# # The `index` step extracts information from the files in the codebase. +# index: +# # Specify a list of files and folders to exclude from extraction. +# # Default: Git submodules and Subversion externals. +# exclude: +# - legacy-implementation +# - thirdparty/libs +# filters: +# - exclude: "**/documentation/examples/snippets/*.py" +# - include: "**/documentation/examples/test_application/*" +# include: From 0c1d8825473d4f8b7a9fe29b2d6366dbedccd558 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 12 Aug 2019 11:38:24 +0100 Subject: [PATCH 333/383] issue #613: must await 'exit' and 'disconnect' in wait=False test --- tests/router_test.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/router_test.py b/tests/router_test.py index a9ad5ae1..1cde016d 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -434,12 +434,21 @@ class ShutdownTest(testlib.RouterMixin, testlib.TestCase): l1 = self.router.local() pid = l1.call(os.getpid) - conn = self.router.stream_by_id(l1.context_id).conn + strm = self.router.stream_by_id(l1.context_id) exitted = mitogen.core.Latch() - mitogen.core.listen(conn.proc, 'exit', exitted.put) + + # It is possible for Process 'exit' signal to fire immediately during + # processing of Stream 'disconnect' signal, so we must wait for both, + # otherwise ChannelError below will return 'respondent context has + # disconnected' rather than 'no route', because RouteMonitor hasn't run + # yet and the Receiver caught Context 'disconnect' signal instead of a + # dead message. + mitogen.core.listen(strm.conn.proc, 'exit', exitted.put) + mitogen.core.listen(strm, 'disconnect', exitted.put) l1.shutdown(wait=False) exitted.get() + exitted.get() e = self.assertRaises(OSError, lambda: os.waitpid(pid, 0)) From 8dfb3966dfbe809855ddd8c1069174aa12bc1737 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 12 Aug 2019 15:32:48 +0100 Subject: [PATCH 334/383] issue #558, #582: preserve remote tmpdir if caller did not supply one The undocumented 'tmp' parameter controls whether _execute_module() would delete anything on 2.3, so mimic that. This means _execute_remove_stat() calls will not blow away the temp directory, which broke the unarchive plugin. --- ansible_mitogen/mixins.py | 5 ++--- docs/changelog.rst | 10 +++++++++- tests/ansible/regression/all.yml | 1 + .../regression/issue_558_unarchive_failed.yml | 11 +++++++++++ tests/data/unarchive_test.tar | Bin 0 -> 10240 bytes 5 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 tests/ansible/regression/issue_558_unarchive_failed.yml create mode 100644 tests/data/unarchive_test.tar diff --git a/ansible_mitogen/mixins.py b/ansible_mitogen/mixins.py index 3a5d4c93..eee1ecd7 100644 --- a/ansible_mitogen/mixins.py +++ b/ansible_mitogen/mixins.py @@ -360,11 +360,10 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): ) ) - if ansible.__version__ < '2.5' and delete_remote_tmp and \ - getattr(self._connection._shell, 'tmpdir', None) is not None: + if tmp and ansible.__version__ < '2.5' and delete_remote_tmp: # Built-in actions expected tmpdir to be cleaned up automatically # on _execute_module(). - self._remove_tmp_path(self._connection._shell.tmpdir) + self._remove_tmp_path(tmp) return result diff --git a/docs/changelog.rst b/docs/changelog.rst index 37cab2e3..31605315 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -93,6 +93,11 @@ Mitogen for Ansible default soft limit, allowing *"too many open files"* errors to be avoided more often in large runs without user configuration. +* `#558 `_, + `#582 `_: on Ansible 2.3 a remote + directory was unconditionally deleted after the first module belonging to an + action plug-in had executed, causing the ``unarchive`` module to fail. + * `#578 `_: the extension could crash while rendering an error message, due to an incorrect format string. @@ -223,6 +228,7 @@ bug reports, testing, features and fixes in this release contributed by `Dave Cottlehuber `_, `Denis Krienbühl `_, `El Mehdi CHAOUKI `_, +`Florent Dutheil `_, `James Hogarth `_, `Marc Hartmayer `_, `Nigel Metheringham `_, @@ -235,8 +241,10 @@ bug reports, testing, features and fixes in this release contributed by `Yuki Nishida `_, `@alexhexabeam `_, `@DavidVentura `_, +`@dbiegunski `_, `@ghp-rr `_, -`@rizzly `_, and +`@rizzly `_, +`@SQGE `_, and `@tho86 `_. diff --git a/tests/ansible/regression/all.yml b/tests/ansible/regression/all.yml index 32852942..f75a050c 100644 --- a/tests/ansible/regression/all.yml +++ b/tests/ansible/regression/all.yml @@ -8,5 +8,6 @@ - include: issue_154__module_state_leaks.yml - include: issue_177__copy_module_failing.yml - include: issue_332_ansiblemoduleerror_first_occurrence.yml +- include: issue_558_unarchive_failed.yml - include: issue_590__sys_modules_crap.yml - include: issue_591__setuptools_cwd_crash.yml diff --git a/tests/ansible/regression/issue_558_unarchive_failed.yml b/tests/ansible/regression/issue_558_unarchive_failed.yml new file mode 100644 index 00000000..1f073d70 --- /dev/null +++ b/tests/ansible/regression/issue_558_unarchive_failed.yml @@ -0,0 +1,11 @@ +# _execute_module() would unconditionally delete shell.tmpdir without +# respecting the passed in 'tmp' parameter on Ansible 2.3. + +- name: regression/issue_558_unarchive_failed.yml + hosts: test-targets + tasks: + - file: state=absent path=/tmp/foo + - file: state=directory path=/tmp/foo + - unarchive: + src: "{{git_basedir}}/tests/data/unarchive_test.tar" + dest: /tmp/foo diff --git a/tests/data/unarchive_test.tar b/tests/data/unarchive_test.tar new file mode 100644 index 0000000000000000000000000000000000000000..97c36407ca2e73bccf7ccd020908e5e7afdf5434 GIT binary patch literal 10240 zcmeIvF$#b%3_#J$o}wp^Myp=IYX}Msil7xezH!n)aB>iSmXxFo;kECEVM^m3<-70} zWe8$PtHR&&*|I1_^CAnGbt@IGYWKzY?^e1duS;xZ=cFCCtN3tw>pwjCL#aOhKl#I^ z?1%sY2q1s}0tg_000IagfB*srAb Date: Mon, 12 Aug 2019 15:51:35 +0100 Subject: [PATCH 335/383] issue #558: disable test on OSX to cope with boundless mediocrity --- tests/ansible/regression/issue_558_unarchive_failed.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ansible/regression/issue_558_unarchive_failed.yml b/tests/ansible/regression/issue_558_unarchive_failed.yml index 1f073d70..c6b1c9f6 100644 --- a/tests/ansible/regression/issue_558_unarchive_failed.yml +++ b/tests/ansible/regression/issue_558_unarchive_failed.yml @@ -9,3 +9,5 @@ - unarchive: src: "{{git_basedir}}/tests/data/unarchive_test.tar" dest: /tmp/foo + # garbage doesn't work with BSD tar + when: ansible_system != 'Darwin' From 49796e0c3999bc333800b1247a2c4cc5b1e653d6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 12 Aug 2019 16:13:01 +0100 Subject: [PATCH 336/383] docs: update changelog --- docs/changelog.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 31605315..77c311c0 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -243,6 +243,7 @@ bug reports, testing, features and fixes in this release contributed by `@DavidVentura `_, `@dbiegunski `_, `@ghp-rr `_, +`@migalsp `_, `@rizzly `_, `@SQGE `_, and `@tho86 `_. From d9cc577a6cb0908a6d1b922f92bace01dea9e4ff Mon Sep 17 00:00:00 2001 From: David Wilson Date: Mon, 12 Aug 2019 16:50:27 +0100 Subject: [PATCH 337/383] issue #440: log Python version during bootstrap. --- mitogen/core.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index d6e1739e..2f52ebe4 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3919,7 +3919,7 @@ class ExternalContext(object): self.router.register(self.parent, self.stream) self.router._setup_logging() - sys.executable = os.environ.pop('ARGV0', sys.executable) + _v and LOG.debug('Python version is %s', sys.version) _v and LOG.debug('Parent is context %r (%s); my ID is %r', self.parent.context_id, self.parent.name, mitogen.context_id) @@ -3927,6 +3927,8 @@ class ExternalContext(object): os.getpid(), os.getppid(), os.geteuid(), os.getuid(), os.getegid(), os.getgid(), socket.gethostname()) + + sys.executable = os.environ.pop('ARGV0', sys.executable) _v and LOG.debug('Recovered sys.executable: %r', sys.executable) if self.config.get('send_ec2', True): From 866438aec611199a95e3d2c9aae850bc32a79ec7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 13 Aug 2019 10:16:41 +0100 Subject: [PATCH 338/383] Whoops, merge together lgtm.yml and .lgtm.yml Also add ansible_mitogen/compat. --- .lgtm.yml | 11 +- lgtm.yml | 416 ------------------------------------------------------ 2 files changed, 9 insertions(+), 418 deletions(-) delete mode 100644 lgtm.yml diff --git a/.lgtm.yml b/.lgtm.yml index 3e45b21e..a8e91c02 100644 --- a/.lgtm.yml +++ b/.lgtm.yml @@ -1,3 +1,10 @@ path_classifiers: - thirdparty: - - "mitogen/compat/*.py" + library: + - "mitogen/compat" + - "ansible_mitogen/compat" +queries: + # Mitogen 2.4 compatibility trips this query everywhere, so just disable it + - exclude: py/unreachable-statement + - exclude: py/should-use-with + # mitogen.core.b() trips this query everywhere, so just disable it + - exclude: py/import-and-import-from diff --git a/lgtm.yml b/lgtm.yml deleted file mode 100644 index 09349570..00000000 --- a/lgtm.yml +++ /dev/null @@ -1,416 +0,0 @@ -# ########################################################################################## -# # Customize file classifications. # -# # Results from files under any classifier will be excluded from LGTM # -# # statistics. # -# ########################################################################################## -# -# ########################################################################################## -# # Use the `path_classifiers` block to define changes to the default classification of # -# # files. # -# ########################################################################################## -# -path_classifiers: -# docs: -# # Identify the top-level file called `generate_javadoc.py` as documentation-related. -# - generate_javadoc.py -# test: -# # Override LGTM’s default classification of test files by excluding all files. -# - exclude: / -# # Classify all files in the top-level directories test/ and testsuites/ as test code. -# - test -# - testsuites -# # Classify all files with suffix `.test` as test code. -# # Note: use only forward slash / as a path separator. -# # Use ** to indicate an arbitrary parent path. -# # Use * to indicate any sequence of characters excluding /. -# # Always enclose the expression in double quotes if it includes *. -# - "**/*.test" -# # Refine the classifications above by excluding files in test/util/. -# - exclude: test/util -# # The default behavior is to tag all files created during the -# # build as `generated`. Results are hidden for generated code. You can tag -# # further files as being generated by adding them to the `generated` section. -# generated: -# # Exclude all `*.c` files under the `ui/` directory from classification as -# # generated code. -# # By default, all files not checked into the repository are considered to be -# # 'generated'. -# - exclude: ui/**.c -# # The default behavior is to tag library code as `library`. Results are hidden -# # for library code. You can tag further files as being library code by adding them -# # to the `library` section. - library: - - "mitogen/compat" -# # The default behavior is to tag template files as `template`. Results are hidden -# # for template files. You can tag further files as being template files by adding -# # them to the `template` section. -# template: -# # Define your own category, for example: 'some_custom_category'. -# some_custom_category: -# # Classify all files in the top-level directory tools/ (or the top-level file -# # called tools). -# - tools -# -# ######################################################################################### -# # Use the `queries` block to change the default display of query results. # -# ######################################################################################### -# -queries: - # Mitogen 2.4 compatibility trips this query everywhere, so just disable it - - exclude: py/unreachable-statement - - exclude: py/should-use-with - # mitogen.core.b() trips this query everywhere, so just disable it - - exclude: py/import-and-import-from -# # Specifically hide the results of two queries. -# - exclude: cpp/use-of-goto -# - exclude: java/equals-on-unrelated-types -# # Hide the results of all queries. -# - exclude: "*" -# # Refine by including the `java/command-line-injection` query. -# - include: java/command-line-injection -# # Include all queries tagged ‘security’ and ‘correctness', and with a severity of -# # ‘error’. -# - include: -# tags: -# - "security" -# - "correctness" -# severity: "error" -# -# ######################################################################################### -# # Define changes to the default code extraction process. # -# # Each block configures the extraction of a single language, and modifies actions in a # -# # named step. Every named step includes automatic default actions, # -# # except for the ‘prepare’ step. The steps are performed in the following sequence: # -# # prepare # -# # after_prepare # -# # configure (C/C++ only) # -# # python_setup (Python only) # -# # before_index # -# # index # -########################################################################################### -# -# ######################################################################################### -# # Environment variables available to the steps: # -# ######################################################################################### -# -# # LGTM_SRC -# # The root of the source tree. -# # LGTM_WORKSPACE -# # An existing (initially empty) folder outside the source tree. -# # Used for temporary download and setup commands. -# -# ######################################################################################### -# # Use the extraction block to define changes to the default code extraction process # -# # for one or more languages. The settings for each language are defined in a child # -# # block, with one or more steps. # -# ######################################################################################### -# -# extraction: -# # Define settings for C/C++ analysis -# ##################################### -# cpp: -# # The `prepare` step exists for customization on LGTM.com only. -# prepare: -# # The `packages` section is valid for LGTM.com only. It names Ubuntu packages to -# # be installed. -# packages: -# - libxml2-dev -# - libxslt1-dev -# # Add an `after-prepare` step if you need to run commands after the prepare step. -# # Each command should be listed on a separate line. -# # This step is useful for C/C++ analysis where you want to prepare the environment -# # for the `configure` step without changing the default behavior for that step. -# after_prepare: -# - mkdir -p $LGTM_WORKSPACE/latest-gcc-symlinks -# - ln -s /usr/bin/g++-6 $LGTM_WORKSPACE/latest-gcc-symlinks/g++ -# - ln -s /usr/bin/gcc-6 $LGTM_WORKSPACE/latest-gcc-symlinks/gcc -# - export PATH=$LGTM_WORKSPACE/latest-gcc-symlinks:$PATH -# - export GNU_MAKE=make -# - export GIT=true -# # The `configure` step generates build configuration files which the `index` step -# # then uses to build the codebase. -# configure: -# command: -# - ./prepare_deps -# # Optional step. You should add a `before_index` step if you need to run commands -# # before the `index` step. -# before_index: -# - export BOOST_DIR=$LGTM_SRC/boost -# - export GTEST_DIR=$LGTM_SRC/googletest -# - export HUNSPELL_DIR=$LGTM_SRC/hunspell -# - export CRYPTOPP_DIR=$LGTM_SRC/cryptopp -# # The `index` step builds the code and extracts information during the build -# # process. -# index: -# # Override the autobuild process by specifying a list of custom build commands -# # to use instead. -# build_command: -# - $GNU_MAKE -j2 -s -# # Specify that all project or solution files should be used for extraction. -# # Default: false. -# all_solutions: true -# # Specify a list of one or more project or solution files for extraction. -# # Default: LGTM chooses the file closest to the root of the repository (this may -# # fail if there are multiple candidates). -# solution: -# - myProject.sln -# # Specify MSBuild settings -# msbuild: -# # Specify a list of additional arguments to MSBuild. Default: empty. -# arguments: /p:Platform=x64 /p:Configuration=Release -# # Specify the MSBuild configuration to use, for example, debug or release. -# # Default: read from the solution file or files. -# configuration: -# # Specify the platform to target, for example: x86, x64, or Any CPU. -# # Default: read from the solution file or files. -# platform: -# # Specify the MSBuild target. Default: rebuild. -# target: -# # Specify whether or not to perform a NuGet restore for extraction. Default: true. -# nuget_restore: false -# # Specify a version of Microsoft Visual Studio to use for MSBuild or any custom -# # build commands (build_command). For example: -# # 10 for Visual Studio 2010 -# # 12 for Visual Studio 2012 -# # 14 for Visual Studio 2015 -# # 15 for Visual Studio 2017 -# # Default: read from project files. -# vstools_version: 10 -# -# # Define settings for C# analysis -# ################################## -# csharp: -# # The `prepare` step exists for customization on LGTM.com only. -# prepare: -# # Add an `after-prepare` step if you need to run commands after the `prepare` step. -# # Each command should be listed on a separate line. -# after_prepare: -# - export PATH=$LGTM_WORKSPACE/tools:$PATH -# # The `index` step builds the code and extracts information during the build -# # process. -# index: -# # Specify that all project or solution files should be used for extraction. -# # Default: false. -# all_solutions: true -# # Specify a list of one or more project or solution files for extraction. -# # Default: LGTM chooses the file closest to the root of the repository (this may -# # fail if there are multiple candidates). -# solution: -# - myProject.sln -# # Override the autobuild process by specifying a list of custom build commands -# # to use instead. -# build_command: -# # By default, LGTM analyzes the code by building it. You can override this, -# # and tell LGTM not to build the code. Beware that this can lead -# # to less accurate results. -# buildless: true -# # Specify .NET Core settings. -# dotnet: -# # Specify additional arguments to `dotnet build`. -# # Default: empty. -# arguments: -# # Specify the version of .NET Core SDK to use. -# # Default: The version installed on the build machine. -# version: 2.1 -# # Specify MSBuild settings. -# msbuild: -# # Specify a list of additional arguments to MSBuild. Default: empty. -# arguments: /P:WarningLevel=2 -# # Specify the MSBuild configuration to use, for example, debug or release. -# # Default: read from the solution file or files. -# configuration: release -# # Specify the platform to target, for example: x86, x64, or Any CPU. -# # Default: read from the solution file or files. -# platform: x86 -# # Specify the MSBuild target. Default: rebuild. -# target: notest -# # Specify whether or not to perform a NuGet restore for extraction. Default: true. -# nuget_restore: false -# # Specify a version of Microsoft Visual Studio to use for MSBuild or any custom -# # build commands (build_command). For example: -# # 10 for Visual Studio 2010 -# # 12 for Visual Studio 2012 -# # 14 for Visual Studio 2015 -# # 15 for Visual Studio 2017 -# # Default: read from project files -# vstools_version: 10 -# # Specify additional options for the extractor, -# # for example --fast to perform a faster extraction that produces a smaller -# # database. -# extractor: -# -# # Define settings for COBOL analysis -# ##################################### -# cobol: -# # The `prepare` step exists for customization on LGTM.com only. -# prepare: -# # Add an `after-prepare` step if you need to run commands after the prepare step. -# # Each command should be listed on a separate line. -# after_prepare: -# - export PATH=$LGTM_WORKSPACE/tools:$PATH -# index: -# # Specify the source file format: fixed, free, or variable. -# # Default: fixed. -# format: "variable" -# # Specify the number of spaces in a tab. Default: 4. -# tab_length: 2 -# # Specify whether preprocessing is applied before extraction or not. -# # Default: true (preprocessing on). -# preprocessing: false -# # Specify file extensions that LGTM will identify as belonging to source files. -# # Default: files with a .CBL, .cbl, .COB or .cob extension are considered -# # source files. -# source_globs: -# - "**.cbl" -# - "**.CBL" -# - "**.cob" -# - "**.cobol" -# - "**.COB" -# # Specify file extensions that LGTM will identify as belonging to library files. -# # Default: files with a .CPY, .cpy, .COPY or .copy extension. -# library_globs: -# - "**.CPY" -# - "**.COPY" -# -# # Define settings for Java analysis -# #################################### -# java: -# # The `prepare` step exists for customization on LGTM.com only. -# prepare: -# # Add an `after-prepare` step if you need to run commands after the prepare step. -# # Each command should be listed on a separate line. -# after_prepare: -# - export PATH=$LGTM_WORKSPACE/tools:$PATH -# # The `index` step extracts information from the files in the codebase. -# index: -# # Specify Gradle settings. -# gradle: -# # Specify the required Gradle version. -# # Default: determined automatically. -# version: 4.4 -# # Override the autobuild process by specifying a list of custom build commands -# # to use instead. -# build_command: ./compile-all.sh -# # Specify the Java version required to build the project. -# java_version: 11 -# # Specify Maven settings. -# maven: -# # Specify the path (absolute or relative) of a Maven settings file to use. -# # Default: Maven uses a settings file in the default location, if it exists. -# settings_file: /opt/share/settings.xml -# # Specify the path of a Maven toolchains file. -# # Default: Maven uses a toolchains file in the default location, if it exists. -# toolchains_file: /opt/share/toolchains.xml -# # Specify the required Maven version. -# # Default: the Maven version is determined automatically, where feasible. -# version: 3.5.2 -# # Specify how XML files should be extracted: -# # ALL = extract all XML files. -# # DEFAULT = only extract XML files named `AndroidManifest.xml`. -# # DISABLED = do not extract any XML files. -# xml_mode: ALL -# -# # Define settings for JavaScript analysis -# ########################################## -# javascript: -# # The `prepare` step exists for customization on LGTM.com only. -# prepare: -# # Add an `after-prepare` step if you need to run commands after the prepare step. -# # Each command should be listed on a separate line. -# after_prepare: -# - export PATH=$LGTM_WORKSPACE/tools:$PATH -# # The `index` step extracts information from the files in the codebase. -# index: -# # Specify a list of files and folders to extract. -# # Default: The project root directory. -# include: -# - src/js -# # Specify a list of files and folders to exclude from extraction. -# exclude: -# - thirdparty/lib -# - node_modules -# # You can add additional file types for LGTM to extract, by mapping file -# # extensions (including the leading dot) to file types. The usual -# # include/exclude patterns apply, so, for example, `.jsm` files under -# # `thirdparty/lib` and `node_modules` will not be extracted. -# filetypes: -# ".jsm": "js" -# ".tmpl": "html" -# # Specify a list of glob patterns to include/exclude files from extraction; this -# # is applied on top of the include/exclude paths from above; patterns are -# # processed in the same way as for path classifiers above. -# # Default: include all files with known extensions (such as .js, .ts and .html), -# # but exclude files ending in `-min.js` or `.min.js`. -# filters: -# # exclude any node_modules folders anywhere. -# - exclude: "**/node_modules/*" -# # exclude any *.ts files anywhere. -# - exclude: "**/*.ts" -# # but include *.ts files under src/js/typescript. -# - include: "src/js/typescript/**/*.ts" -# # Specify the type of project: module or script. -# # Default: type is automatically detected. -# source_type: module -# # Specify how TypeScript files should be extracted: -# # none = exclude all TypeScript files. -# # basic = extract syntactic information from TypeScript files. -# # full = extract syntactic and type information from TypeScript files. -# # Default: full. -# typescript: basic -# # By default, LGTM doesn't extract any XML files. You can override this by -# # using the `xml_mode` property and setting it to `all`. -# xml_mode: all -# -# # Define settings for Python analysis -# ###################################### -# python: -# # The `prepare` step exists for customization on LGTM.com only. -# prepare: -# # The `packages` section is valid for LGTM.com only. It names packages to -# # be installed. -# packages: libpng-dev -# # This step is useful for Python analysis where you want to prepare the -# # environment for the `python_setup` step without changing the default behavior -# # for that step. -# after_prepare: -# - export PATH=$LGTM_WORKSPACE/tools:$PATH -# # This sets up the Python interpreter and virtual environment, ready for the -# # `index` step to extract the codebase. -# python_setup: -# # Specify packages that should NOT be installed despite being mentioned in the -# # requirements.txt file. -# # Default: no package marked for exclusion. -# exclude_requirements: -# - pywin32 -# # Specify a list of pip packages to install. -# # If any of these packages cannot be installed, the extraction will fail. -# requirements: -# - Pillow -# # Specify a list of requirements text files to use to set up the environment, -# # or false for none. Default: any requirements.txt, test-requirements.txt, -# # and similarly named files identified in the codebase are used. -# requirements_files: -# - required-packages.txt -# # Specify a setup.py file to use to set up the environment, or false for none. -# # Default: any setup.py files identified in the codebase are used in preference -# # to any requirements text files. -# setup_py: new-setup.py -# # Override the version of the Python interpreter used for setup and extraction -# # Default: Python 3 if no version is explicitly specified, and if there are no -# # commits to the repository before January 1, 2017. Otherwise Python 2. -# version: 3 -# # Optional step. You should add a `before_index` step if you need to run commands -# # before the `index` step. -# before_index: -# - antlr4 -Dlanguage=Python3 Grammar.g4 -# # The `index` step extracts information from the files in the codebase. -# index: -# # Specify a list of files and folders to exclude from extraction. -# # Default: Git submodules and Subversion externals. -# exclude: -# - legacy-implementation -# - thirdparty/libs -# filters: -# - exclude: "**/documentation/examples/snippets/*.py" -# - include: "**/documentation/examples/test_application/*" -# include: From fa8755085a9a1534ff347c7df5a86a723ab4bf99 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 14 Aug 2019 11:47:04 +0100 Subject: [PATCH 339/383] ci: fix procps command line format warning --- .ci/ci_lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/ci_lib.py b/.ci/ci_lib.py index 971ae5d8..dc7a02a8 100644 --- a/.ci/ci_lib.py +++ b/.ci/ci_lib.py @@ -233,7 +233,7 @@ def proc_is_docker(pid): def get_interesting_procs(container_name=None): - args = ['ps', '-a', '-x', '-oppid=', '-opid=', '-ocomm=', '-ocommand='] + args = ['ps', 'ax', '-oppid=', '-opid=', '-ocomm=', '-ocommand='] if container_name is not None: args = ['docker', 'exec', container_name] + args From 7e0c2fd1afb9ff9536b9d171437cb474ddf5fa7d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 14 Aug 2019 11:47:24 +0100 Subject: [PATCH 340/383] tests: fix sudo_flags_failure for Ansible 2.8.3 --- tests/ansible/integration/become/sudo_flags_failure.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ansible/integration/become/sudo_flags_failure.yml b/tests/ansible/integration/become/sudo_flags_failure.yml index 52404019..39fbb4b8 100644 --- a/tests/ansible/integration/become/sudo_flags_failure.yml +++ b/tests/ansible/integration/become/sudo_flags_failure.yml @@ -17,5 +17,6 @@ - out.failed - | ('sudo: no such option: --derps' in out.msg) or + ("sudo: invalid option -- '-'" in out.module_stderr) or ("sudo: unrecognized option `--derps'" in out.module_stderr) or ("sudo: unrecognized option '--derps'" in out.module_stderr) From bc275b25264ae27562f2bb7e06f276025c3242f3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 14 Aug 2019 12:05:13 +0100 Subject: [PATCH 341/383] tests: another random string changed in 2.8.3 --- tests/ansible/integration/become/sudo_password.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ansible/integration/become/sudo_password.yml b/tests/ansible/integration/become/sudo_password.yml index 128d8aee..f377fead 100644 --- a/tests/ansible/integration/become/sudo_password.yml +++ b/tests/ansible/integration/become/sudo_password.yml @@ -16,6 +16,7 @@ that: | out.failed and ( ('password is required' in out.msg) or + ('Missing sudo password' in out.msg) or ('password is required' in out.module_stderr) ) From db37000dd563f5807d16db3fc3e28442b4a1bc7c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Tue, 30 Jul 2019 19:12:40 +0100 Subject: [PATCH 342/383] ci: update to Ansible 2.8.3 --- .travis.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index af637db1..580ced0b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,25 +36,25 @@ matrix: include: # Debops tests. - # 2.8.0; 3.6 -> 2.7 + # 2.8.3; 3.6 -> 2.7 - python: "3.6" - env: MODE=debops_common VER=2.8.0 + env: MODE=debops_common VER=2.8.3 # 2.4.6.0; 2.7 -> 2.7 - python: "2.7" env: MODE=debops_common VER=2.4.6.0 # Sanity check against vanilla Ansible. One job suffices. - python: "2.7" - env: MODE=ansible VER=2.8.0 DISTROS=debian STRATEGY=linear + env: MODE=ansible VER=2.8.3 DISTROS=debian STRATEGY=linear # ansible_mitogen tests. - # 2.8.0 -> {debian, centos6, centos7} + # 2.8.3 -> {debian, centos6, centos7} - python: "3.6" - env: MODE=ansible VER=2.8.0 - # 2.8.0 -> {debian, centos6, centos7} + env: MODE=ansible VER=2.8.3 + # 2.8.3 -> {debian, centos6, centos7} - python: "2.7" - env: MODE=ansible VER=2.8.0 + env: MODE=ansible VER=2.8.3 # 2.4.6.0 -> {debian, centos6, centos7} - python: "3.6" From e0d9b8d1e15048f236485b1720b16b1082c61268 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Wed, 14 Aug 2019 12:47:23 +0100 Subject: [PATCH 343/383] docs: a few more internals.rst additions --- docs/internals.rst | 8 ++++++++ mitogen/core.py | 6 +++--- mitogen/parent.py | 3 +++ 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/docs/internals.rst b/docs/internals.rst index 40ea33df..c3247be0 100644 --- a/docs/internals.rst +++ b/docs/internals.rst @@ -65,6 +65,10 @@ Stream, Side & Protocol .. autoclass:: Stream :members: +.. currentmodule:: mitogen.core +.. autoclass:: BufferedWriter + :members: + .. currentmodule:: mitogen.core .. autoclass:: Side :members: @@ -81,6 +85,10 @@ Stream, Side & Protocol .. autoclass:: DelimitedProtocol :members: +.. currentmodule:: mitogen.parent +.. autoclass:: LogProtocol + :members: + .. currentmodule:: mitogen.core .. autoclass:: IoLoggerProtocol :members: diff --git a/mitogen/core.py b/mitogen/core.py index 2f52ebe4..9f7079f5 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2727,9 +2727,9 @@ class Latch(object): class Waker(Protocol): """ - :class:`BasicStream` subclass implementing the `UNIX self-pipe trick`_. - Used to wake the multiplexer when another thread needs to modify its state - (via a cross-thread function call). + :class:`Protocol` implementing the `UNIX self-pipe trick`_. Used to wake + :class:`Broker` when another thread needs to modify its state, by enqueing + a function call to run on the :class:`Broker` thread. .. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html """ diff --git a/mitogen/parent.py b/mitogen/parent.py index bc4bfc0d..82b4a7d1 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1250,6 +1250,9 @@ class LogProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): written to it. """ def on_line_received(self, line): + """ + Read a line, decode it as UTF-8, and log it. + """ super(LogProtocol, self).on_line_received(line) LOG.info(u'%s: %s', self.stream.name, line.decode('utf-8', 'replace')) From 341c453eaad05deecc1ab4fe68a052dcaa8c351e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 15 Aug 2019 02:00:48 +0100 Subject: [PATCH 344/383] issue #595: add buildah to docs and changelog. --- docs/ansible_detailed.rst | 16 +++++++++++++++- docs/api.rst | 14 ++++++++++++++ docs/changelog.rst | 6 ++++++ 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index c3bbad59..fba7a86a 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -175,7 +175,8 @@ Noteworthy Differences your_ssh_username = (ALL) NOPASSWD:/usr/bin/python -c* -* The `docker `_, +* The `buildah `_, + `docker `_, `jail `_, `kubectl `_, `local `_, @@ -722,6 +723,19 @@ establishment of additional reuseable interpreters as necessary to match the configuration of each task. +.. _method-buildah: + +Buildah +~~~~~~~ + +Like `buildah +`_ except +connection delegation is supported. + +* ``ansible_host``: Name of Buildah container (default: inventory hostname). +* ``ansible_user``: Name of user within the container to execute as. + + .. _doas: Doas diff --git a/docs/api.rst b/docs/api.rst index 09aa8582..7ab3274e 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -87,6 +87,20 @@ Router Class Connection Methods ================== +.. currentmodule:: mitogen.parent +.. method:: Router.buildah (container=None, buildah_path=None, username=None, \**kwargs) + + Construct a context on the local machine over a ``buildah`` invocation. + Accepts all parameters accepted by :meth:`local`, in addition to: + + :param str container: + The name of the Buildah container to connect to. + :param str doas_path: + Filename or complete path to the ``buildah`` binary. ``PATH`` will be + searched if given as a filename. Defaults to ``buildah``. + :param str username: + Username to use, defaults to unset. + .. currentmodule:: mitogen.parent .. method:: Router.fork (on_fork=None, on_start=None, debug=False, profiling=False, via=None) diff --git a/docs/changelog.rst b/docs/changelog.rst index 77c311c0..8468e7bb 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -52,6 +52,11 @@ Enhancements place to support future removal of the final round-trip between a target fully booting and receiving its first function call. +* `#595 `_: the :meth:`Router.buildah + ` connection method is available to connect + to Buildah containers, and is exposed to Ansible as the ``buildah`` + transport. + * `d6faff06 `_, `807cbef9 `_, `e93762b3 `_, @@ -230,6 +235,7 @@ bug reports, testing, features and fixes in this release contributed by `El Mehdi CHAOUKI `_, `Florent Dutheil `_, `James Hogarth `_, +`Jordan Webb `_, `Marc Hartmayer `_, `Nigel Metheringham `_, `Orion Poplawski `_, From db8f0db5e717ccb2c92b66994c0b0b6a6199be61 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Thu, 15 Aug 2019 02:34:57 +0100 Subject: [PATCH 345/383] docs: lots more changelog --- docs/changelog.rst | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 8468e7bb..dc848248 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -32,30 +32,32 @@ Enhancements `_ are not yet handled. -* The ``MITOGEN_CPU_COUNT`` environment variable shards the connection - multiplexer into per-CPU workers. This improves throughput for large runs - especially involving file transfer, and is a prerequisite for future - in-process SSH support. One multiplexer starts by default, to match existing - behaviour. +* `Operon `_ no longer requires a custom + installation, both Operon and Ansible are supported by a unified release. * `#419 `_, `#470 `_, file descriptor usage during large runs is halved, as it is no longer necessary to manage read and - write sides distinctly in order to work around a design limitation. + write sides distinctly in order to work around a design problem. * `#419 `_: almost all connection - setup happens on one thread, reducing GIL contention and context switching - early in a run. + setup happens on one thread, reducing contention and context switching early + in a run. * `#419 `_: Connection setup is - pipelined, eliminating several network round-trips. Most infrastructure is in - place to support future removal of the final round-trip between a target - fully booting and receiving its first function call. - -* `#595 `_: the :meth:`Router.buildah - ` connection method is available to connect - to Buildah containers, and is exposed to Ansible as the ``buildah`` - transport. + better pipelined, eliminating some network round-trips. Most infrastructure + is in place to support future removal of the final round-trips between a + target fully booting and receiving function calls. + +* `#595 `_: the + :meth:`Router.buildah() ` connection method is + available to manipulate `Buildah `_ containers, and is + exposed to Ansible as the ``buildah`` transport. + +* The ``MITOGEN_CPU_COUNT`` environment variable shards the connection + multiplexer into per-CPU workers. This may improve throughput for runs + involving large file transfers, and is required for future in-process SSH + support. One multiplexer starts by default, to match existing behaviour. * `d6faff06 `_, `807cbef9 `_, From 98832f3b6420e326717362558a9be9d07cd7b9db Mon Sep 17 00:00:00 2001 From: David Wilson Date: Fri, 16 Aug 2019 16:56:50 +0100 Subject: [PATCH 346/383] issue #533: include object identity of Stream in repr() At least one of the causes of the #533 error appears to be that streams with the same name exist --- mitogen/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/core.py b/mitogen/core.py index 9f7079f5..45a7f379 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -1681,7 +1681,7 @@ class Stream(object): self.transmit_side = Side(self, wfp) def __repr__(self): - return "" % (self.name,) + return "" % (self.name, id(self) & 0xffff,) def on_receive(self, broker): """ From 069285a588a561018196d44ba3188c095f026ac7 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 02:13:35 +0100 Subject: [PATCH 347/383] issue #615: ansible: import Ansible fetch.py action plug-in From ansible/ansible#9773a1f2896a914d237cb9926e3b5cdc0f004d1a --- .../plugins/action/mitogen_fetch.py | 206 ++++++++++++++++++ 1 file changed, 206 insertions(+) create mode 100644 ansible_mitogen/plugins/action/mitogen_fetch.py diff --git a/ansible_mitogen/plugins/action/mitogen_fetch.py b/ansible_mitogen/plugins/action/mitogen_fetch.py new file mode 100644 index 00000000..515dd017 --- /dev/null +++ b/ansible_mitogen/plugins/action/mitogen_fetch.py @@ -0,0 +1,206 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import base64 + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_bytes +from ansible.module_utils.six import string_types +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display +from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash +from ansible.utils.path import makedirs_safe + +display = Display() + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + ''' handler for fetch operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + try: + if self._play_context.check_mode: + result['skipped'] = True + result['msg'] = 'check mode not (yet) supported for this module' + return result + + source = self._task.args.get('src', None) + dest = self._task.args.get('dest', None) + flat = boolean(self._task.args.get('flat'), strict=False) + fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False) + validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False) + + # validate source and dest are strings FIXME: use basic.py and module specs + if not isinstance(source, string_types): + result['msg'] = "Invalid type supplied for source option, it must be a string" + + if not isinstance(dest, string_types): + result['msg'] = "Invalid type supplied for dest option, it must be a string" + + if source is None or dest is None: + result['msg'] = "src and dest are required" + + if result.get('msg'): + result['failed'] = True + return result + + source = self._connection._shell.join_path(source) + source = self._remote_expand_user(source) + + remote_checksum = None + if not self._play_context.become: + # calculate checksum for the remote file, don't bother if using become as slurp will be used + # Force remote_checksum to follow symlinks because fetch always follows symlinks + remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) + + # use slurp if permissions are lacking or privilege escalation is needed + remote_data = None + if remote_checksum in ('1', '2', None): + slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars) + if slurpres.get('failed'): + if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'): + result['msg'] = "the remote file does not exist, not transferring, ignored" + result['file'] = source + result['changed'] = False + else: + result.update(slurpres) + return result + else: + if slurpres['encoding'] == 'base64': + remote_data = base64.b64decode(slurpres['content']) + if remote_data is not None: + remote_checksum = checksum_s(remote_data) + # the source path may have been expanded on the + # target system, so we compare it here and use the + # expanded version if it's different + remote_source = slurpres.get('source') + if remote_source and remote_source != source: + source = remote_source + + # calculate the destination name + if os.path.sep not in self._connection._shell.join_path('a', ''): + source = self._connection._shell._unquote(source) + source_local = source.replace('\\', '/') + else: + source_local = source + + dest = os.path.expanduser(dest) + if flat: + if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep): + result['msg'] = "dest is an existing directory, use a trailing slash if you want to fetch src into that directory" + result['file'] = dest + result['failed'] = True + return result + if dest.endswith(os.sep): + # if the path ends with "/", we'll use the source filename as the + # destination filename + base = os.path.basename(source_local) + dest = os.path.join(dest, base) + if not dest.startswith("/"): + # if dest does not start with "/", we'll assume a relative path + dest = self._loader.path_dwim(dest) + else: + # files are saved in dest dir, with a subdir for each host, then the filename + if 'inventory_hostname' in task_vars: + target_name = task_vars['inventory_hostname'] + else: + target_name = self._play_context.remote_addr + dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) + + dest = dest.replace("//", "/") + + if remote_checksum in ('0', '1', '2', '3', '4', '5'): + result['changed'] = False + result['file'] = source + if remote_checksum == '0': + result['msg'] = "unable to calculate the checksum of the remote file" + elif remote_checksum == '1': + result['msg'] = "the remote file does not exist" + elif remote_checksum == '2': + result['msg'] = "no read permission on remote file" + elif remote_checksum == '3': + result['msg'] = "remote file is a directory, fetch cannot work on directories" + elif remote_checksum == '4': + result['msg'] = "python isn't present on the system. Unable to compute checksum" + elif remote_checksum == '5': + result['msg'] = "stdlib json was not found on the remote machine. Only the raw module can work without those installed" + # Historically, these don't fail because you may want to transfer + # a log file that possibly MAY exist but keep going to fetch other + # log files. Today, this is better achieved by adding + # ignore_errors or failed_when to the task. Control the behaviour + # via fail_when_missing + if fail_on_missing: + result['failed'] = True + del result['changed'] + else: + result['msg'] += ", not transferring, ignored" + return result + + # calculate checksum for the local file + local_checksum = checksum(dest) + + if remote_checksum != local_checksum: + # create the containing directories, if needed + makedirs_safe(os.path.dirname(dest)) + + # fetch the file and check for changes + if remote_data is None: + self._connection.fetch_file(source, dest) + else: + try: + f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') + f.write(remote_data) + f.close() + except (IOError, OSError) as e: + raise AnsibleError("Failed to fetch the file: %s" % e) + new_checksum = secure_hash(dest) + # For backwards compatibility. We'll return None on FIPS enabled systems + try: + new_md5 = md5(dest) + except ValueError: + new_md5 = None + + if validate_checksum and new_checksum != remote_checksum: + result.update(dict(failed=True, md5sum=new_md5, + msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, + checksum=new_checksum, remote_checksum=remote_checksum)) + else: + result.update({'changed': True, 'md5sum': new_md5, 'dest': dest, + 'remote_md5sum': None, 'checksum': new_checksum, + 'remote_checksum': remote_checksum}) + else: + # For backwards compatibility. We'll return None on FIPS enabled systems + try: + local_md5 = md5(dest) + except ValueError: + local_md5 = None + result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) + + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result From 52c8ed7715f6ce268bd652d736789b14c64cae83 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 02:20:09 +0100 Subject: [PATCH 348/383] issue #615: extricate slurp brainwrong from mitogen_fetch --- .../plugins/action/mitogen_fetch.py | 73 +++++-------------- 1 file changed, 17 insertions(+), 56 deletions(-) diff --git a/ansible_mitogen/plugins/action/mitogen_fetch.py b/ansible_mitogen/plugins/action/mitogen_fetch.py index 515dd017..ffa737e5 100644 --- a/ansible_mitogen/plugins/action/mitogen_fetch.py +++ b/ansible_mitogen/plugins/action/mitogen_fetch.py @@ -18,18 +18,23 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os -import base64 -from ansible.errors import AnsibleError from ansible.module_utils._text import to_bytes from ansible.module_utils.six import string_types from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.action import ActionBase -from ansible.utils.display import Display -from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash +from ansible.utils.hashing import checksum, md5, secure_hash from ansible.utils.path import makedirs_safe -display = Display() + +REMOTE_CHECKSUM_ERRORS = { + '0': "unable to calculate the checksum of the remote file", + '1': "the remote file does not exist", + '2': "no read permission on remote file", + '3': "remote file is a directory, fetch cannot work on directories", + '4': "python isn't present on the system. Unable to compute checksum", + '5': "stdlib json was not found on the remote machine. Only the raw module can work without those installed", +} class ActionModule(ActionBase): @@ -71,35 +76,10 @@ class ActionModule(ActionBase): source = self._connection._shell.join_path(source) source = self._remote_expand_user(source) - remote_checksum = None - if not self._play_context.become: - # calculate checksum for the remote file, don't bother if using become as slurp will be used - # Force remote_checksum to follow symlinks because fetch always follows symlinks - remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) - - # use slurp if permissions are lacking or privilege escalation is needed - remote_data = None - if remote_checksum in ('1', '2', None): - slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars) - if slurpres.get('failed'): - if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'): - result['msg'] = "the remote file does not exist, not transferring, ignored" - result['file'] = source - result['changed'] = False - else: - result.update(slurpres) - return result - else: - if slurpres['encoding'] == 'base64': - remote_data = base64.b64decode(slurpres['content']) - if remote_data is not None: - remote_checksum = checksum_s(remote_data) - # the source path may have been expanded on the - # target system, so we compare it here and use the - # expanded version if it's different - remote_source = slurpres.get('source') - if remote_source and remote_source != source: - source = remote_source + # calculate checksum for the remote file, don't bother if using + # become as slurp will be used Force remote_checksum to follow + # symlinks because fetch always follows symlinks + remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) # calculate the destination name if os.path.sep not in self._connection._shell.join_path('a', ''): @@ -133,21 +113,10 @@ class ActionModule(ActionBase): dest = dest.replace("//", "/") - if remote_checksum in ('0', '1', '2', '3', '4', '5'): + if remote_checksum in REMOTE_CHECKSUM_ERRORS: result['changed'] = False result['file'] = source - if remote_checksum == '0': - result['msg'] = "unable to calculate the checksum of the remote file" - elif remote_checksum == '1': - result['msg'] = "the remote file does not exist" - elif remote_checksum == '2': - result['msg'] = "no read permission on remote file" - elif remote_checksum == '3': - result['msg'] = "remote file is a directory, fetch cannot work on directories" - elif remote_checksum == '4': - result['msg'] = "python isn't present on the system. Unable to compute checksum" - elif remote_checksum == '5': - result['msg'] = "stdlib json was not found on the remote machine. Only the raw module can work without those installed" + result['msg'] = REMOTE_CHECKSUM_ERRORS[remote_checksum] # Historically, these don't fail because you may want to transfer # a log file that possibly MAY exist but keep going to fetch other # log files. Today, this is better achieved by adding @@ -168,15 +137,7 @@ class ActionModule(ActionBase): makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes - if remote_data is None: - self._connection.fetch_file(source, dest) - else: - try: - f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') - f.write(remote_data) - f.close() - except (IOError, OSError) as e: - raise AnsibleError("Failed to fetch the file: %s" % e) + self._connection.fetch_file(source, dest) new_checksum = secure_hash(dest) # For backwards compatibility. We'll return None on FIPS enabled systems try: From 03d2bc6c593b42ac0898c516ce79f90d8a022c07 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 02:23:46 +0100 Subject: [PATCH 349/383] issue #615: redirect 'fetch' action to 'mitogen_fetch'. --- ansible_mitogen/strategy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ansible_mitogen/strategy.py b/ansible_mitogen/strategy.py index 755b9113..8f093999 100644 --- a/ansible_mitogen/strategy.py +++ b/ansible_mitogen/strategy.py @@ -127,6 +127,8 @@ def wrap_action_loader__get(name, *args, **kwargs): action plugins outside the Ansible tree. """ get_kwargs = {'class_only': True} + if name in ('fetch',): + name = 'mitogen_' + name if ansible.__version__ >= '2.8': get_kwargs['collection_list'] = kwargs.pop('collection_list', None) From 151b490890e712e480208e0240e42958116ae526 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 02:23:58 +0100 Subject: [PATCH 350/383] issue #615: fetch_file() might be called with AnsibleUnicode. --- ansible_mitogen/connection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/connection.py b/ansible_mitogen/connection.py index 06a152b2..2dd3bfa9 100644 --- a/ansible_mitogen/connection.py +++ b/ansible_mitogen/connection.py @@ -953,7 +953,8 @@ class Connection(ansible.plugins.connection.ConnectionBase): self._connect() ansible_mitogen.target.transfer_file( context=self.context, - in_path=in_path, + # in_path may be AnsibleUnicode + in_path=mitogen.utils.cast(in_path), out_path=out_path ) From 3f5ff17c8ca5f5cfa70630c82b13a5519ca99e0a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 03:07:12 +0100 Subject: [PATCH 351/383] issue #615: route a dead message to recipients when no reply is expected --- mitogen/core.py | 45 +++++++++++++++++++++++++++++++++++--------- tests/router_test.py | 20 +++++++++++++++++++- 2 files changed, 55 insertions(+), 10 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 45a7f379..f9099e9a 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -2112,8 +2112,8 @@ class MitogenProtocol(Protocol): return False if msg_len > self._router.max_message_size: - LOG.error('Maximum message size exceeded (got %d, max %d)', - msg_len, self._router.max_message_size) + LOG.error('%r: Maximum message size exceeded (got %d, max %d)', + self, msg_len, self._router.max_message_size) self.stream.on_disconnect(broker) return False @@ -3191,28 +3191,55 @@ class Router(object): fn(Message.dead(self.respondent_disconnect_msg)) del self._handle_map[handle] - def _maybe_send_dead(self, msg, reason, *args): + def _maybe_send_dead(self, unreachable, msg, reason, *args): + """ + Send a dead message to either the original sender or the intended + recipient of `msg`, if the original sender was expecting a reply + (because its `reply_to` was set), otherwise assume the message is a + reply of some sort, and send the dead message to the original + destination. + + :param bool unreachable: + If :data:`True`, the recipient is known to be dead or routing + failed due to a security precaution, so don't attempt to fallback + to sending the dead message to the recipient if the original sender + did not include a reply address. + :param mitogen.core.Message msg: + Message that triggered the dead message. + :param str reason: + Human-readable error reason. + :param tuple args: + Elements to interpolate with `reason`. + """ if args: reason %= args LOG.debug('%r: %r is dead: %r', self, msg, reason) if msg.reply_to and not msg.is_dead: msg.reply(Message.dead(reason=reason), router=self) + elif not unreachable: + self._async_route( + Message.dead( + dst_id=msg.dst_id, + handle=msg.handle, + reason=reason, + ) + ) def _invoke(self, msg, stream): # IOLOG.debug('%r._invoke(%r)', self, msg) try: persist, fn, policy, respondent = self._handle_map[msg.handle] except KeyError: - self._maybe_send_dead(msg, reason=self.invalid_handle_msg) + self._maybe_send_dead(True, msg, reason=self.invalid_handle_msg) return if respondent and not (msg.is_dead or msg.src_id == respondent.context_id): - self._maybe_send_dead(msg, 'reply from unexpected context') + self._maybe_send_dead(True, msg, 'reply from unexpected context') return if policy and not policy(msg, stream): - self._maybe_send_dead(msg, self.refused_msg) + self._maybe_send_dead(True, msg, self.refused_msg) return if not persist: @@ -3240,7 +3267,7 @@ class Router(object): _vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream) if len(msg.data) > self.max_message_size: - self._maybe_send_dead(msg, self.too_large_msg % ( + self._maybe_send_dead(False, msg, self.too_large_msg % ( self.max_message_size, )) return @@ -3275,14 +3302,14 @@ class Router(object): out_stream = self._stream_by_id.get(mitogen.parent_id) if out_stream is None: - self._maybe_send_dead(msg, self.no_route_msg, + self._maybe_send_dead(True, msg, self.no_route_msg, msg.dst_id, mitogen.context_id) return if in_stream and self.unidirectional and not \ (in_stream.protocol.is_privileged or out_stream.protocol.is_privileged): - self._maybe_send_dead(msg, self.unidirectional_msg, + self._maybe_send_dead(True, msg, self.unidirectional_msg, in_stream.protocol.remote_id, out_stream.protocol.remote_id, mitogen.context_id) diff --git a/tests/router_test.py b/tests/router_test.py index 1cde016d..ef3fc4d5 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -11,6 +11,7 @@ import mitogen.core import mitogen.master import mitogen.parent import mitogen.utils +from mitogen.core import b try: import Queue @@ -258,6 +259,23 @@ class MessageSizeTest(testlib.BrokerMixin, testlib.TestCase): self.assertTrue(expect in logs.stop()) + def test_remote_dead_message(self): + # Router should send dead message to original recipient when reply_to + # is unset. + router = self.klass(broker=self.broker, max_message_size=4096) + + # Try function call. Receiver should be woken by a dead message sent by + # router due to message size exceeded. + child = router.local() + recv = mitogen.core.Receiver(router) + + recv.to_sender().send(b('x') * 4097) + e = self.assertRaises(mitogen.core.ChannelError, + lambda: recv.get().unpickle() + ) + expect = router.too_large_msg % (4096,) + self.assertEquals(e.args[0], expect) + def test_remote_configured(self): router = self.klass(broker=self.broker, max_message_size=64*1024) remote = router.local() @@ -510,7 +528,7 @@ class ShutdownTest(testlib.RouterMixin, testlib.TestCase): mitogen.context_id, )) - def test_disconnet_all(self): + def test_disconnect_all(self): l1 = self.router.local() l2 = self.router.local() From 3c8c11b36088b852c1520f98b5d041902230ae1d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 03:16:41 +0100 Subject: [PATCH 352/383] issue #615: update Changelog. --- docs/changelog.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index dc848248..7f0ff234 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -200,6 +200,14 @@ Core Library * `#612 `_: fix various errors introduced by stream refactoring. +* `#615 `_: when routing fails to + deliver a message for some reason other than the sender cannot or should not + reach the recipient, and no reply-to address is present on the message, + instead send a dead message to the original recipient. This ensures a + descriptive messages is delivered to a thread sleeping on the reply to a + function call, where the reply might be dropped due to exceeding the maximum + configured message size. + * `a5536c35 `_: avoid quadratic buffer management when logging lines received from a child's redirected standard IO. From 67759371f9d2b85c922a973d3e31166cd207a5d3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 03:19:32 +0100 Subject: [PATCH 353/383] issue #615: ensure 4GB max_message_size is configured for task workers. This 4GB limit was already set for MuxProcess and inherited by all descendents including the context running on the target host, but it was not applied to the WorkerProcess router. That explains why the error from the ticket is being raised by the router within the WorkerProcess rather than the router on the original target. --- ansible_mitogen/process.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 503e9bb7..10d55fdf 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -72,6 +72,8 @@ ANSIBLE_PKG_OVERRIDE = ( u"__author__ = %r\n" ) +MAX_MESSAGE_SIZE = 4096 * 1048576 + worker_model_msg = ( 'Mitogen connection types may only be instantiated when one of the ' '"mitogen_*" or "operon_*" strategies are active.' @@ -502,6 +504,7 @@ class ClassicWorkerModel(WorkerModel): # with_items loops. raise ansible.errors.AnsibleError(shutting_down_msg % (e,)) + self.router.max_message_size = MAX_MESSAGE_SIZE self.listener_path = path def _on_process_exit(self): @@ -692,7 +695,7 @@ class MuxProcess(object): self.broker = mitogen.master.Broker(install_watcher=False) self.router = mitogen.master.Router( broker=self.broker, - max_message_size=4096 * 1048576, + max_message_size=MAX_MESSAGE_SIZE, ) _setup_responder(self.router.responder) mitogen.core.listen(self.broker, 'shutdown', self._on_broker_shutdown) From 207f57537aff88d58187b754c169871ecef02d2f Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 03:32:46 +0100 Subject: [PATCH 354/383] issue #615: update Changelog. --- docs/changelog.rst | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 7f0ff234..5e527ea2 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -32,9 +32,6 @@ Enhancements `_ are not yet handled. -* `Operon `_ no longer requires a custom - installation, both Operon and Ansible are supported by a unified release. - * `#419 `_, `#470 `_, file descriptor usage during large runs is halved, as it is no longer necessary to manage read and @@ -54,6 +51,17 @@ Enhancements available to manipulate `Buildah `_ containers, and is exposed to Ansible as the ``buildah`` transport. +* `#615 `_: the ``mitogen_fetch`` + action is included, and the standard Ansible ``fetch`` action is redirected + to it. This implements streaming file transfer in every case, including when + ``become`` is active, preventing excessive CPU usage and memory spikes, and + significantly improving throughput. A copy of 2 files of 512 MiB each drops + from 47 seconds to just under 7 seconds, with peak memory usage dropping from + 10.7 GiB to 64.8 MiB. + +* `Operon `_ no longer requires a custom + installation, both Operon and Ansible are supported by a unified release. + * The ``MITOGEN_CPU_COUNT`` environment variable shards the connection multiplexer into per-CPU workers. This may improve throughput for runs involving large file transfers, and is required for future in-process SSH From 0e489625ed6eb22359248bf5cfd2b93c1b91015b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 05:25:31 +0100 Subject: [PATCH 355/383] issue #615: regression test --- tests/ansible/regression/all.yml | 1 + .../issue_615__streaming_transfer.yml | 21 +++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 tests/ansible/regression/issue_615__streaming_transfer.yml diff --git a/tests/ansible/regression/all.yml b/tests/ansible/regression/all.yml index f75a050c..81780bb3 100644 --- a/tests/ansible/regression/all.yml +++ b/tests/ansible/regression/all.yml @@ -11,3 +11,4 @@ - include: issue_558_unarchive_failed.yml - include: issue_590__sys_modules_crap.yml - include: issue_591__setuptools_cwd_crash.yml +- include: issue_615__streaming_transfer.yml diff --git a/tests/ansible/regression/issue_615__streaming_transfer.yml b/tests/ansible/regression/issue_615__streaming_transfer.yml new file mode 100644 index 00000000..aa7c62c4 --- /dev/null +++ b/tests/ansible/regression/issue_615__streaming_transfer.yml @@ -0,0 +1,21 @@ +# issue #615: 'fetch' with become: was internally using slurp. + +- hosts: target + any_errors_fatal: True + gather_facts: no + become: true + vars: + mitogen_ssh_compression: false + tasks: + - shell: | + dd if=/dev/zero of=/tmp/512mb.zero bs=1048576 count=512; + chmod go= /tmp/512mb.zero + + - fetch: + src: /tmp/512mb.zero + dest: /tmp/fetch-out + + - file: + path: /tmp/fetch-out + state: absent + delegate_to: localhost From 8a870f140255c67fdc42dad5b6a70ea1cc45ece0 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 11:51:25 +0100 Subject: [PATCH 356/383] issue #625: use exec() instead of subprocess in mitogen_ansible_playbook This is just to make CTRL+C handling less confusing. Alternate would be ignoring SIGINT, but this is simpler. --- tests/ansible/mitogen_ansible_playbook.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ansible/mitogen_ansible_playbook.py b/tests/ansible/mitogen_ansible_playbook.py index 3af1791c..54fd4283 100755 --- a/tests/ansible/mitogen_ansible_playbook.py +++ b/tests/ansible/mitogen_ansible_playbook.py @@ -3,4 +3,8 @@ import os import subprocess import sys os.environ['ANSIBLE_STRATEGY'] = 'mitogen_linear' -subprocess.check_call(['./run_ansible_playbook.py'] + sys.argv[1:]) +os.execlp( + './run_ansible_playbook.py', + './run_ansible_playbook.py', + *sys.argv[1:] +) From e02be898799754d6ac4b9ca85db8e7f043d235f9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 11:56:31 +0100 Subject: [PATCH 357/383] issue #625: ignore SIGINT within MuxProcess Without this, MuxProcess will start dying too early, before Ansible / TaskQueueManager.cleanup() has a chance to wait on worker processes. That would allow WorkerProcess to see ECONNREFUSED from the MuxProcess socket much more easily. --- ansible_mitogen/process.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 10d55fdf..1fc7bf80 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -33,6 +33,7 @@ import multiprocessing import os import resource import socket +import signal import sys try: @@ -659,6 +660,12 @@ class MuxProcess(object): connected to the parent to be closed (indicating the parent has died). """ save_pid('mux') + + # #623: MuxProcess ignores SIGINT because it wants to live until every + # Ansible worker process has been cleaned up by + # TaskQueueManager.cleanup(), otherwise harmles yet scary warnings + # about being unable connect to MuxProess could be printed. + signal.signal(signal.SIGINT, signal.SIG_IGN) ansible_mitogen.logging.set_process_name('mux') ansible_mitogen.affinity.policy.assign_muxprocess(self.index) From f4cf67f0bdc9bfe2324f76696678965bf0ea7d75 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 12:01:30 +0100 Subject: [PATCH 358/383] issue #615: remove meaningless test It has been dead code since at least 2015 --- ansible_mitogen/plugins/action/mitogen_fetch.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/ansible_mitogen/plugins/action/mitogen_fetch.py b/ansible_mitogen/plugins/action/mitogen_fetch.py index ffa737e5..1844efd8 100644 --- a/ansible_mitogen/plugins/action/mitogen_fetch.py +++ b/ansible_mitogen/plugins/action/mitogen_fetch.py @@ -45,30 +45,25 @@ class ActionModule(ActionBase): task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) - del tmp # tmp no longer has any effect - try: if self._play_context.check_mode: result['skipped'] = True result['msg'] = 'check mode not (yet) supported for this module' return result - source = self._task.args.get('src', None) - dest = self._task.args.get('dest', None) flat = boolean(self._task.args.get('flat'), strict=False) fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False) validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False) # validate source and dest are strings FIXME: use basic.py and module specs + source = self._task.args.get('src') if not isinstance(source, string_types): result['msg'] = "Invalid type supplied for source option, it must be a string" + dest = self._task.args.get('dest') if not isinstance(dest, string_types): result['msg'] = "Invalid type supplied for dest option, it must be a string" - if source is None or dest is None: - result['msg'] = "src and dest are required" - if result.get('msg'): result['failed'] = True return result From 8f99ebdf6ff3f776298d1de8c532d2b249ab654e Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 14:34:25 +0100 Subject: [PATCH 359/383] tests: hide memory-mapped files from lsof output Seems to be no saner way to do this. --- tests/testlib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testlib.py b/tests/testlib.py index 73d3438d..b702fa05 100644 --- a/tests/testlib.py +++ b/tests/testlib.py @@ -338,7 +338,7 @@ class TestCase(unittest2.TestCase): def _teardown_check_fds(self): mitogen.core.Latch._on_fork() if get_fd_count() != self._fd_count_before: - import os; os.system('lsof +E -w -p %s' % (os.getpid(),)) + import os; os.system('lsof +E -w -p %s | grep -vw mem' % (os.getpid(),)) assert 0, "%s leaked FDs. Count before: %s, after: %s" % ( self, self._fd_count_before, get_fd_count(), ) From 11923431a6e39bbb44b971970a33ce3288217cb6 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 14:35:03 +0100 Subject: [PATCH 360/383] tests: one case from doas_test was invoking su --- tests/doas_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/doas_test.py b/tests/doas_test.py index 560ada99..73758476 100644 --- a/tests/doas_test.py +++ b/tests/doas_test.py @@ -57,7 +57,7 @@ class DoasTest(testlib.DockerMixin, testlib.TestCase): username='mitogen__has_sudo', password='has_sudo_password', ) - context = self.router.su(via=ssh, password='rootpassword') + context = self.router.doas(via=ssh, password='has_sudo_password') self.assertEquals(0, context.call(os.getuid)) From 3d72cf82e3141da781600984d7f1cc6ac2263af9 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 14:35:31 +0100 Subject: [PATCH 361/383] tests: use defer_sync() Rather than defer() + ancient sync_with_broker() --- tests/router_test.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/router_test.py b/tests/router_test.py index ef3fc4d5..fb4da501 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -62,12 +62,12 @@ class SourceVerifyTest(testlib.RouterMixin, testlib.TestCase): recv = mitogen.core.Receiver(self.router) self.child2_msg.handle = recv.handle - self.broker.defer(self.router._async_route, - self.child2_msg, - in_stream=self.child1_stream) - - # Wait for IO loop to finish everything above. - self.sync_with_broker() + self.broker.defer_sync( + lambda: self.router._async_route( + self.child2_msg, + in_stream=self.child1_stream + ) + ) # Ensure message wasn't forwarded. self.assertTrue(recv.empty()) From bcca47df3c8e0fda7734dfbc7f6909d3eb049b74 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 14:48:08 +0100 Subject: [PATCH 362/383] issue #533: update routing to account for DEL_ROUTE propagation race --- docs/changelog.rst | 12 +++++++++++ mitogen/core.py | 48 ++++++++++++++++++++++++++++---------------- tests/router_test.py | 28 ++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 17 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 5e527ea2..998821e2 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -198,6 +198,18 @@ Core Library `closed` flag, preventing historical bugs where a double close could destroy descriptors belonging to unrelated streams. +* `#533 `_: routing accounts for + a race between a parent sending a message to a child via an intermediary, + where the child had recently disconnected, and ``DEL_ROUTE`` propagating from + the intermediary to the parent, informing it that the child no longer exists. + This condition is detected at the intermediary and a dead message is returned + to the parent. + + Previously since the intermediary had already removed its route for the + child, the *route messages upwards* rule would be triggered, causing the + message (with a privileged ``src_id``/``auth_id``) to be sent upstream, + resulting in a ``bad auth_id`` log message and a hang. + * `#586 `_: fix import of :mod:`__main__` on later versions of Python 3 when running from the interactive console. diff --git a/mitogen/core.py b/mitogen/core.py index f9099e9a..30c0e948 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3272,34 +3272,48 @@ class Router(object): )) return - # Perform source verification. + parent_stream = self._stream_by_id.get(mitogen.parent_id) + src_stream = self._stream_by_id.get(msg.src_id, parent_stream) + + # When the ingress stream is known, verify the message was received on + # the same as the stream we would expect to receive messages from the + # src_id and auth_id. This is like Reverse Path Filtering in IP, and + # ensures messages from a privileged context cannot be spoofed by a + # child. if in_stream: - parent = self._stream_by_id.get(mitogen.parent_id) - expect = self._stream_by_id.get(msg.auth_id, parent) - if in_stream != expect: + auth_stream = self._stream_by_id.get(msg.auth_id, parent_stream) + if in_stream != auth_stream: LOG.error('%r: bad auth_id: got %r via %r, not %r: %r', - self, msg.auth_id, in_stream, expect, msg) + self, msg.auth_id, in_stream, auth_stream, msg) return - if msg.src_id != msg.auth_id: - expect = self._stream_by_id.get(msg.src_id, parent) - if in_stream != expect: - LOG.error('%r: bad src_id: got %r via %r, not %r: %r', - self, msg.src_id, in_stream, expect, msg) - return + if msg.src_id != msg.auth_id and in_stream != src_stream: + LOG.error('%r: bad src_id: got %r via %r, not %r: %r', + self, msg.src_id, in_stream, src_stream, msg) + return + # If the stream's MitogenProtocol has auth_id set, copy it to the + # message. This allows subtrees to become privileged by stamping a + # parent's context ID. It is used by mitogen.unix to mark client + # streams (like Ansible WorkerProcess) as having the same rights as + # the parent. if in_stream.protocol.auth_id is not None: msg.auth_id = in_stream.protocol.auth_id - # Maintain a set of IDs the source ever communicated with. + # Record the IDs the source ever communicated with. in_stream.protocol.egress_ids.add(msg.dst_id) if msg.dst_id == mitogen.context_id: return self._invoke(msg, in_stream) out_stream = self._stream_by_id.get(msg.dst_id) - if out_stream is None: - out_stream = self._stream_by_id.get(mitogen.parent_id) + if (not out_stream) and (parent_stream != src_stream or not in_stream): + # No downstream route exists. The message could be from a child or + # ourselves for a parent, in which case we must forward it + # upstream, or it could be from a parent for a dead child, in which + # case its src_id/auth_id would fail verification if returned to + # the parent, so in that case reply with a dead message instead. + out_stream = parent_stream if out_stream is None: self._maybe_send_dead(True, msg, self.no_route_msg, @@ -3310,9 +3324,9 @@ class Router(object): (in_stream.protocol.is_privileged or out_stream.protocol.is_privileged): self._maybe_send_dead(True, msg, self.unidirectional_msg, - in_stream.protocol.remote_id, - out_stream.protocol.remote_id, - mitogen.context_id) + in_stream.protocol.remote_id, + out_stream.protocol.remote_id, + mitogen.context_id) return out_stream.protocol._send(msg) diff --git a/tests/router_test.py b/tests/router_test.py index fb4da501..58ab637a 100644 --- a/tests/router_test.py +++ b/tests/router_test.py @@ -76,6 +76,34 @@ class SourceVerifyTest(testlib.RouterMixin, testlib.TestCase): expect = 'bad auth_id: got %r via' % (self.child2_msg.auth_id,) self.assertTrue(expect in log.stop()) + def test_parent_unaware_of_disconnect(self): + # Parent -> Child A -> Child B. B disconnects concurrent to Parent + # sending message. Parent does not yet know B has disconnected, A + # receives message from Parent with Parent's auth_id, for a stream that + # no longer exists. + c1 = self.router.local() + strm = self.router.stream_by_id(c1.context_id) + recv = mitogen.core.Receiver(self.router) + + self.broker.defer(lambda: + strm.protocol._send( + mitogen.core.Message( + dst_id=1234, # nonexistent child + handle=1234, + src_id=mitogen.context_id, + reply_to=recv.handle, + ) + ) + ) + + e = self.assertRaises(mitogen.core.ChannelError, + lambda: recv.get().unpickle() + ) + self.assertEquals(e.args[0], self.router.no_route_msg % ( + 1234, + c1.context_id, + )) + def test_bad_src_id(self): # Deliver a message locally from child2 with the correct auth_id, but # the wrong src_id. From 01a1914a1f4ff4347d9776e09eeed579cf394399 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 15:06:41 +0100 Subject: [PATCH 363/383] docs: tweaks to better explain changelog race --- docs/changelog.rst | 15 ++++++++------- docs/howitworks.rst | 8 +++++--- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 998821e2..2deed36b 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -199,16 +199,17 @@ Core Library descriptors belonging to unrelated streams. * `#533 `_: routing accounts for - a race between a parent sending a message to a child via an intermediary, - where the child had recently disconnected, and ``DEL_ROUTE`` propagating from - the intermediary to the parent, informing it that the child no longer exists. - This condition is detected at the intermediary and a dead message is returned - to the parent. + a race between a parent (or cousin) sending a message to a child via an + intermediary, where the child had recently disconnected, and + :data:`DEL_ROUTE ` propagating from the intermediary + to the sender, informing it that the child no longer exists. This condition + is detected at the intermediary and a dead message is returned to the sender. Previously since the intermediary had already removed its route for the child, the *route messages upwards* rule would be triggered, causing the - message (with a privileged ``src_id``/``auth_id``) to be sent upstream, - resulting in a ``bad auth_id`` log message and a hang. + message (with a privileged :ref:`src_id/auth_id `) to be + sent upstream, resulting in a ``bad auth_id`` error logged at the first + upstream parent, and a possible hang due to a request message being dropped. * `#586 `_: fix import of :mod:`__main__` on later versions of Python 3 when running from the diff --git a/docs/howitworks.rst b/docs/howitworks.rst index 20c4f948..05c097e5 100644 --- a/docs/howitworks.rst +++ b/docs/howitworks.rst @@ -434,8 +434,9 @@ also listen on the following handles: Receives `target_id` integer from downstream, verifies a route exists to `target_id` via the stream on which the message was received, removes that - route from its local table, then propagates the message upward towards its - own parent. + route from its local table, triggers the ``disconnect`` signal on any + :class:`mitogen.core.Context` instance in the local process, then + propagates the message upward towards its own parent. .. currentmodule:: mitogen.core .. data:: DETACHING @@ -629,7 +630,8 @@ The `auth_id` field is separate from `src_id` in order to support granting privilege to contexts that do not follow the tree's natural trust chain. This supports cases where siblings are permitted to execute code on one another, or where isolated processes can connect to a listener and communicate with an -already established established tree. +already established established tree, such as where a :mod:`mitogen.unix` +client receives the same privilege as the process it connects to. Differences Between Master And Child Brokers From 6b180a4091f9155d8a653d48479edc5fd5f6e633 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 15:26:16 +0100 Subject: [PATCH 364/383] docs: link IS_DEAD in changelog --- docs/changelog.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 2deed36b..72888f01 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -203,7 +203,8 @@ Core Library intermediary, where the child had recently disconnected, and :data:`DEL_ROUTE ` propagating from the intermediary to the sender, informing it that the child no longer exists. This condition - is detected at the intermediary and a dead message is returned to the sender. + is detected at the intermediary and a :ref:`dead message ` is + returned to the sender. Previously since the intermediary had already removed its route for the child, the *route messages upwards* rule would be triggered, causing the @@ -224,10 +225,10 @@ Core Library * `#615 `_: when routing fails to deliver a message for some reason other than the sender cannot or should not reach the recipient, and no reply-to address is present on the message, - instead send a dead message to the original recipient. This ensures a - descriptive messages is delivered to a thread sleeping on the reply to a - function call, where the reply might be dropped due to exceeding the maximum - configured message size. + instead send a :ref:`dead message ` to the original recipient. This + ensures a descriptive messages is delivered to a thread sleeping on the reply + to a function call, where the reply might be dropped due to exceeding the + maximum configured message size. * `a5536c35 `_: avoid quadratic buffer management when logging lines received from a child's redirected From d75c9cffc39afd0bf3b80d542ba4a9a5182302a3 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 18:25:55 +0100 Subject: [PATCH 365/383] docs: add domainrefs plugin to make link aliases everywhere \o/ PATENT PENDING --- docs/ansible_detailed.rst | 122 +++----- docs/changelog.rst | 633 ++++++++++++++++++-------------------- docs/conf.py | 46 ++- docs/domainrefs.py | 41 +++ docs/index.rst | 6 +- mitogen/fork.py | 4 +- 6 files changed, 439 insertions(+), 413 deletions(-) create mode 100644 docs/domainrefs.py diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index fba7a86a..65f68efb 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -175,16 +175,10 @@ Noteworthy Differences your_ssh_username = (ALL) NOPASSWD:/usr/bin/python -c* -* The `buildah `_, - `docker `_, - `jail `_, - `kubectl `_, - `local `_, - `lxc `_, - `lxd `_, - and `ssh `_ - built-in connection types are supported, along with Mitogen-specific - :ref:`machinectl `, :ref:`mitogen_doas `, +* The :ans:conn:`~buildah`, :ans:conn:`~docker`, :ans:conn:`~jail`, + :ans:conn:`~kubectl`, :ans:conn:`~local`, :ans:conn:`~lxd`, and + :ans:conn:`~ssh` built-in connection types are supported, along with + Mitogen-specific :ref:`machinectl `, :ref:`mitogen_doas `, :ref:`mitogen_su `, :ref:`mitogen_sudo `, and :ref:`setns ` types. File bugs to register interest in others. @@ -199,16 +193,14 @@ Noteworthy Differences artificial serialization, causing slowdown equivalent to `task_duration * num_targets`. This will be addressed soon. -* The Ansible 2.7 `reboot - `_ module - may require a ``pre_reboot_delay`` on systemd hosts, as insufficient time - exists for the reboot command's exit status to be reported before necessary - processes are torn down. +* The Ansible 2.7 :ans:mod:`reboot` may require a ``pre_reboot_delay`` on + systemd hosts, as insufficient time exists for the reboot command's exit + status to be reported before necessary processes are torn down. * On OS X when a SSH password is specified and the default connection type of - ``smart`` is used, Ansible may select the Paramiko plug-in rather than - Mitogen. If you specify a password on OS X, ensure ``connection: ssh`` - appears in your playbook, ``ansible.cfg``, or as ``-c ssh`` on the + :ans:conn:`~smart` is used, Ansible may select the :ans:conn:`paramiko_ssh` + rather than Mitogen. If you specify a password on OS X, ensure ``connection: + ssh`` appears in your playbook, ``ansible.cfg``, or as ``-c ssh`` on the command-line. * Ansible permits up to ``forks`` connections to be setup in parallel, whereas @@ -345,19 +337,12 @@ command line, or as host and group variables. File Transfer ~~~~~~~~~~~~~ -Normally `sftp(1)`_ or `scp(1)`_ are used to copy files by the -`assemble `_, -`copy `_, -`patch `_, -`script `_, -`template `_, and -`unarchive `_ -actions, or when uploading modules with pipelining disabled. With Mitogen -copies are implemented natively using the same interpreters, connection tree, -and routed message bus that carries RPCs. - -.. _scp(1): https://linux.die.net/man/1/scp -.. _sftp(1): https://linux.die.net/man/1/sftp +Normally :linux:man1:`sftp` or :linux:man1:`scp` are used to copy files by the +:ans:mod:`~assemble`, :ans:mod:`~aws_s3`, :ans:mod:`~copy`, :ans:mod:`~patch`, +:ans:mod:`~script`, :ans:mod:`~template`, :ans:mod:`~unarchive`, and +:ans:mod:`~uri` actions, or when uploading modules with pipelining disabled. +With Mitogen copies are implemented natively using the same interpreters, +connection tree, and routed message bus that carries RPCs. This permits direct streaming between endpoints regardless of execution environment, without necessitating temporary copies in intermediary accounts or @@ -373,15 +358,15 @@ Safety ^^^^^^ Transfers proceed to a hidden file in the destination directory, with content -and metadata synced using `fsync(2) `_ prior -to rename over any existing file. This ensures the file remains consistent at -all times, in the event of a crash, or when overlapping `ansible-playbook` runs -deploy differing file contents. +and metadata synced using :linux:man2:`fsync` prior to rename over any existing +file. This ensures the file remains consistent at all times, in the event of a +crash, or when overlapping `ansible-playbook` runs deploy differing file +contents. -The `sftp(1)`_ and `scp(1)`_ tools may cause undetected data corruption -in the form of truncated files, or files containing intermingled data segments -from overlapping runs. As part of normal operation, both tools expose a window -where readers may observe inconsistent file contents. +The :linux:man1:`sftp` and :linux:man1:`scp` tools may cause undetected data +corruption in the form of truncated files, or files containing intermingled +data segments from overlapping runs. As part of normal operation, both tools +expose a window where readers may observe inconsistent file contents. Performance @@ -499,11 +484,11 @@ Ansible may: * Create a directory owned by the SSH user either under ``remote_tmp``, or a system-default directory, * Upload action dependencies such as non-new style modules or rendered - templates to that directory via `sftp(1)`_ or `scp(1)`_. + templates to that directory via :linux:man1:`sftp` or :linux:man1:`scp`. * Attempt to modify the directory's access control list to grant access to the - target user using `setfacl(1) `_, - requiring that tool to be installed and a supported filesystem to be in use, - or for the ``allow_world_readable_tmpfiles`` setting to be :data:`True`. + target user using :linux:man1:`setfacl`, requiring that tool to be installed + and a supported filesystem to be in use, or for the + ``allow_world_readable_tmpfiles`` setting to be :data:`True`. * Create a directory owned by the target user either under ``remote_tmp``, or a system-default directory, if a new-style module needs a temporary directory and one was not previously created for a supporting file earlier in the @@ -569,9 +554,9 @@ in regular Ansible: operations relating to modifying the directory to support cross-account access are avoided. -* An explicit work-around is included to avoid the `copy` and `template` - actions needlessly triggering a round-trip to set their temporary file as - executable. +* An explicit work-around is included to avoid the :ans:mod:`~copy` and + :ans:mod:`~template` actions needlessly triggering a round-trip to set their + temporary file as executable. * During task shutdown, it is not necessary to wait to learn if the target has succeeded in deleting a temporary directory, since any error that may occur @@ -601,10 +586,10 @@ DNS Resolution ^^^^^^^^^^^^^^ Modifications to ``/etc/resolv.conf`` cause the glibc resolver configuration to -be reloaded via `res_init(3) `_. This -isn't necessary on some Linux distributions carrying glibc patches to -automatically check ``/etc/resolv.conf`` periodically, however it is necessary -on at least Debian and BSD derivatives. +be reloaded via :linux:man3:`res_init`. This isn't necessary on some Linux +distributions carrying glibc patches to automatically check +``/etc/resolv.conf`` periodically, however it is necessary on at least Debian +and BSD derivatives. ``/etc/environment`` @@ -728,9 +713,7 @@ configuration of each task. Buildah ~~~~~~~ -Like `buildah -`_ except -connection delegation is supported. +Like the :ans:conn:`buildah` except connection delegation is supported. * ``ansible_host``: Name of Buildah container (default: inventory hostname). * ``ansible_user``: Name of user within the container to execute as. @@ -771,9 +754,7 @@ When used as the ``mitogen_doas`` connection method: Docker ~~~~~~ -Like `docker -`_ except -connection delegation is supported. +Like the :ans:conn:`docker` except connection delegation is supported. * ``ansible_host``: Name of Docker container (default: inventory hostname). * ``ansible_user``: Name of user within the container to execute as. @@ -789,9 +770,7 @@ connection delegation is supported. FreeBSD Jail ~~~~~~~~~~~~ -Like `jail -`_ except -connection delegation is supported. +Like the :ans:conn:`jail` except connection delegation is supported. * ``ansible_host``: Name of jail (default: inventory hostname). * ``ansible_user``: Name of user within the jail to execute as. @@ -807,9 +786,7 @@ connection delegation is supported. Kubernetes Pod ~~~~~~~~~~~~~~ -Like `kubectl -`_ except -connection delegation is supported. +Like the :ans:conn:`kubectl` except connection delegation is supported. * ``ansible_host``: Name of pod (default: inventory hostname). * ``ansible_user``: Name of user to authenticate to API as. @@ -823,9 +800,7 @@ connection delegation is supported. Local ~~~~~ -Like `local -`_ except -connection delegation is supported. +Like the :ans:conn:`local` except connection delegation is supported. * ``ansible_python_interpreter`` @@ -852,10 +827,9 @@ additional differences exist that may break existing playbooks. LXC ~~~ -Connect to classic LXC containers, like `lxc -`_ except -connection delegation is supported, and ``lxc-attach`` is always used rather -than the LXC Python bindings, as is usual with ``lxc``. +Connect to classic LXC containers, like the :ans:conn:`lxc` except connection +delegation is supported, and ``lxc-attach`` is always used rather than the LXC +Python bindings, as is usual with ``lxc``. * ``ansible_python_interpreter`` * ``ansible_host``: Name of LXC container (default: inventory hostname). @@ -873,10 +847,9 @@ than the LXC Python bindings, as is usual with ``lxc``. LXD ~~~ -Connect to modern LXD containers, like `lxd -`_ except -connection delegation is supported. The ``lxc`` command must be available on -the host machine. +Connect to modern LXD containers, like the :ans:conn:`lxd` except connection +delegation is supported. The ``lxc`` command must be available on the host +machine. * ``ansible_python_interpreter`` * ``ansible_host``: Name of LXC container (default: inventory hostname). @@ -1001,8 +974,7 @@ When used as the ``mitogen_sudo`` connection method: SSH ~~~ -Like `ssh `_ -except connection delegation is supported. +Like the :ans:conn:`ssh` except connection delegation is supported. * ``ansible_ssh_timeout`` * ``ansible_host``, ``ansible_ssh_host`` diff --git a/docs/changelog.rst b/docs/changelog.rst index 72888f01..525543b9 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -24,35 +24,32 @@ To avail of fixes in an unreleased version, please download a ZIP file Enhancements ~~~~~~~~~~~~ -* `#556 `_, - `#587 `_: Ansible 2.8 is partially +* :gh:issue:`556`, + :gh:issue:`587`: Ansible 2.8 is partially supported. `Become plugins `_ and `interpreter discovery `_ are not yet handled. -* `#419 `_, - `#470 `_, file descriptor usage - during large runs is halved, as it is no longer necessary to manage read and - write sides distinctly in order to work around a design problem. +* :gh:issue:`419`, :gh:issue:`470`, file descriptor usage during large runs is + halved, as it is no longer necessary to manage read and write sides + distinctly in order to work around a design problem. -* `#419 `_: almost all connection - setup happens on one thread, reducing contention and context switching early - in a run. +* :gh:issue:`419`: almost all connection setup happens on one thread, reducing + contention and context switching early in a run. -* `#419 `_: Connection setup is - better pipelined, eliminating some network round-trips. Most infrastructure - is in place to support future removal of the final round-trips between a - target fully booting and receiving function calls. +* :gh:issue:`419`: Connection setup is better pipelined, eliminating some + network round-trips. Most infrastructure is in place to support future + removal of the final round-trips between a target fully booting and receiving + function calls. -* `#595 `_: the - :meth:`Router.buildah() ` connection method is - available to manipulate `Buildah `_ containers, and is - exposed to Ansible as the ``buildah`` transport. +* :gh:pull:`595`: the :meth:`~mitogen.parent.Router.buildah` connection method + is available to manipulate `Buildah `_ containers, and + is exposed to Ansible as the :ans:conn:`buildah`. -* `#615 `_: the ``mitogen_fetch`` - action is included, and the standard Ansible ``fetch`` action is redirected +* :gh:issue:`615`: the ``mitogen_fetch`` + action is included, and the standard Ansible :ans:mod:`fetch` is redirected to it. This implements streaming file transfer in every case, including when ``become`` is active, preventing excessive CPU usage and memory spikes, and significantly improving throughput. A copy of 2 files of 512 MiB each drops @@ -67,10 +64,10 @@ Enhancements involving large file transfers, and is required for future in-process SSH support. One multiplexer starts by default, to match existing behaviour. -* `d6faff06 `_, - `807cbef9 `_, - `e93762b3 `_, - `50bfe4c7 `_: locking is +* :gh:commit:`d6faff06`, + :gh:commit:`807cbef9`, + :gh:commit:`e93762b3`, + :gh:commit:`50bfe4c7`: locking is avoided on hot paths, and some locks are released earlier, before waking a thread that must immediately take the same lock. @@ -78,20 +75,18 @@ Enhancements Mitogen for Ansible ~~~~~~~~~~~~~~~~~~~ -* `#363 `_: fix an obscure race - matching *Permission denied* errors from some versions of ``su`` running on - heavily loaded machines. +* :gh:issue:`363`: fix an obscure race matching *Permission denied* errors from + some versions of ``su`` running on heavily loaded machines. -* `#410 `_: Use of ``AF_UNIX`` - sockets automatically replaced with plain UNIX pipes when SELinux is - detected, to work around a broken heuristic in popular SELinux policies that - prevents inheriting ``AF_UNIX`` sockets across privilege domains. +* :gh:issue:`410`: Uses of :linux:man7:`unix` sockets are replaced with pairs + of plain UNIX pipes when SELinux is detected, to work around a broken + heuristic in popular SELinux policies that prevents inheriting + :linux:man7:`unix` sockets across privilege domains. * `#467 `_: an incompatibility running Mitogen under Molecule was resolved. -* `#547 `_, - `#598 `_: fix a serious deadlock +* :gh:issue:`547`, :gh:issue:`598`: fix a serious deadlock possible while initializing the service pool of any child, such as during connection, ``async`` tasks, tasks using custom :mod:`module_utils`, ``mitogen_task_isolation: fork`` modules, and those present on an internal @@ -102,60 +97,57 @@ Mitogen for Ansible a *Connection timed out* error, for forked tasks it could manifest as a timeout or an apparent hang. -* `#549 `_: the open file descriptor - limit for the Ansible process is increased to the available hard limit. It is - common for distributions to ship with a much higher hard limit than their - default soft limit, allowing *"too many open files"* errors to be avoided - more often in large runs without user configuration. - -* `#558 `_, - `#582 `_: on Ansible 2.3 a remote - directory was unconditionally deleted after the first module belonging to an - action plug-in had executed, causing the ``unarchive`` module to fail. - -* `#578 `_: the extension could crash - while rendering an error message, due to an incorrect format string. - -* `#590 `_: the importer can handle - modules that replace themselves in :mod:`sys.modules` during import. - -* `#591 `_: the target's current - working directory is restored to a known-existent directory between tasks to - ensure :func:`os.getcwd` will not fail when called, in the same way that - :class:`AnsibleModule` restores it during initialization. However this - restore happens before the module ever executes, ensuring any code that calls - :func:`os.getcwd` prior to :class:`AnsibleModule` initialization, such as the - Ansible 2.7 ``pip`` module, cannot fail due to the behavior of a prior task. - -* `#593 `_: the SSH connection method - exposes ``mitogen_ssh_keepalive_interval`` and - ``mitogen_ssh_keepalive_count`` variables, and the default timeout for an SSH - server has been increased from `15*3` seconds to `30*10` seconds. - -* `#600 `_: functionality to reflect - changes to ``/etc/environment`` did not account for Unicode file contents. - The file may now use any single byte encoding. - -* `#602 `_: connection configuration - is more accurately inferred for `meta: reset_connection`, the `synchronize` - module, and for any action plug-ins that establish additional connections. - -* `#598 `_, - `#605 `_: fix a deadlock managing a - shared counter used for load balancing. - -* `#615 `_: streaming file transfer - is implemented for ``fetch`` and other actions that transfer files from the - target to the controller. Previously the file was sent in one message, - requiring it to fit in RAM and be smaller than the internal message size - limit. - -* `7ae926b3 `_: the - ``lineinfile`` module began leaking writable temporary file descriptors since - Ansible 2.7.0. When ``lineinfile`` was used to create or modify a script, and - that script was later executed, the execution could fail with "*text file - busy*" due to the leaked descriptor. Temporary descriptors are now tracked - and cleaned up on exit for all modules. +* :gh:issue:`549`: the open file descriptor limit for the Ansible process is + increased to the available hard limit. It is common for distributions to ship + with a much higher hard limit than their default soft limit, allowing *"too + many open files"* errors to be avoided more often in large runs without user + configuration. + +* :gh:issue:`558`, :gh:issue:`582`: on Ansible 2.3 a remote directory was + unconditionally deleted after the first module belonging to an action plug-in + had executed, causing the :ans:mod:`unarchive` module to fail. + +* :gh:issue:`578`: the extension could crash while rendering an error message, + due to an incorrect format string. + +* :gh:issue:`590`: the importer can handle modules that replace themselves in + :mod:`sys.modules` during import. + +* :gh:issue:`591`: the target's current working directory is restored to a + known-existent directory between tasks to ensure :func:`os.getcwd` will not + fail when called, in the same way that :class:`AnsibleModule` restores it + during initialization. However this restore happens before the module ever + executes, ensuring any code that calls :func:`os.getcwd` prior to + :class:`AnsibleModule` initialization, such as the Ansible 2.7 + :ans:mod:`pip`, cannot fail due to the behavior of a prior task. + +* :gh:issue:`593`: the SSH connection method exposes + ``mitogen_ssh_keepalive_interval`` and ``mitogen_ssh_keepalive_count`` + variables, and the default timeout for an SSH server has been increased from + `15*3` seconds to `30*10` seconds. + +* :gh:issue:`600`: functionality to reflect changes to ``/etc/environment`` did + not account for Unicode file contents. The file may now use any single byte + encoding. + +* :gh:issue:`602`: connection configuration is more accurately inferred for + `meta: reset_connection`, the `synchronize` module, and for any action + plug-ins that establish additional connections. + +* :gh:issue:`598`, :gh:issue:`605`: fix a deadlock managing a shared counter + used for load balancing. + +* :gh:issue:`615`: streaming file transfer is implemented for ``fetch`` and + other actions that transfer files from the target to the controller. + Previously the file was sent in one message, requiring it to fit in RAM and + be smaller than the internal message size limit. + +* :gh:commit:`7ae926b3`: the Ansible :ans:mod:`lineinfile` began leaking + writable temporary file descriptors since Ansible 2.7.0. When + :ans:mod:`~lineinfile` was used to create or modify a script, and that script + was later executed, the execution could fail with "*text file busy*" due to + the leaked descriptor. Temporary descriptors are now tracked and cleaned up + on exit for all modules. Core Library @@ -170,38 +162,38 @@ Core Library It was never portable between Python versions, unused, and never made much sense to support. -* `#170 `_: to improve subprocess - management and asynchronous connect, a :class:`mitogen.parent.TimerList` +* :gh:issue:`170`: to improve subprocess + management and asynchronous connect, a :class:`~mitogen.parent.TimerList` interface is available, accessible as :attr:`Broker.timers` in an asynchronous context. -* `#419 `_: the internal - :class:`mitogen.core.Stream` has been refactored into 7 new classes, +* :gh:issue:`419`: the internal + :class:`~mitogen.core.Stream` has been refactored into 7 new classes, modularizing protocol behaviour, output buffering, line-oriented input parsing, option handling and connection management. Connection setup is internally asynchronous, laying almost all the groundwork needed for fully asynchronous connect, proxied Ansible become plug-ins, and integrating `libssh `_. -* `#169 `_, - `#419 `_: zombie child reaping has - vastly improved, by using timers to efficiently poll for a slow child to - finish exiting. Polling avoids relying on process-global configuration such - as a `SIGCHLD` handler, or :func:`signal.set_wakeup_fd` available in modern - Python. +* :gh:issue:`169`, + :gh:issue:`419`: zombie subprocess reaping + has vastly improved, by using timers to efficiently poll for a slow child to + finish exiting, and delaying broker shutdown while any subprocess remains. + Polling avoids relying on process-global configuration such as a `SIGCHLD` + handler, or :func:`signal.set_wakeup_fd` available in modern Python. -* `#256 `_, - `#419 `_: most :func:`os.dup` use +* :gh:issue:`256`, + :gh:issue:`419`: most :func:`os.dup` use was eliminated, along with almost all manual file descriptor management. Descriptors are trapped in :func:`os.fdopen` objects at creation, ensuring a leaked object will close itself, and ensuring every descriptor is fused to a `closed` flag, preventing historical bugs where a double close could destroy descriptors belonging to unrelated streams. -* `#533 `_: routing accounts for +* :gh:issue:`533`: routing accounts for a race between a parent (or cousin) sending a message to a child via an intermediary, where the child had recently disconnected, and - :data:`DEL_ROUTE ` propagating from the intermediary + :data:`~mitogen.core.DEL_ROUTE` propagating from the intermediary to the sender, informing it that the child no longer exists. This condition is detected at the intermediary and a :ref:`dead message ` is returned to the sender. @@ -212,17 +204,17 @@ Core Library sent upstream, resulting in a ``bad auth_id`` error logged at the first upstream parent, and a possible hang due to a request message being dropped. -* `#586 `_: fix import of +* :gh:issue:`586`: fix import of :mod:`__main__` on later versions of Python 3 when running from the interactive console. -* `#606 `_: fix example code on the +* :gh:issue:`606`: fix example code on the documentation front page. -* `#612 `_: fix various errors +* :gh:issue:`612`: fix various errors introduced by stream refactoring. -* `#615 `_: when routing fails to +* :gh:issue:`615`: when routing fails to deliver a message for some reason other than the sender cannot or should not reach the recipient, and no reply-to address is present on the message, instead send a :ref:`dead message ` to the original recipient. This @@ -230,22 +222,22 @@ Core Library to a function call, where the reply might be dropped due to exceeding the maximum configured message size. -* `a5536c35 `_: avoid quadratic +* :gh:commit:`a5536c35`: avoid quadratic buffer management when logging lines received from a child's redirected standard IO. -* `49a6446a `_: the - :meth:`empty` method of :class:`mitogen.core.Latch`, - :class:`mitogen.core.Receiver` and :class:`mitogen.select.Select` has been - replaced by a more general :meth:`size` method. :meth:`empty` will be removed - in 0.3 +* :gh:commit:`49a6446a`: the + :meth:`empty` methods of :class:`~mitogen.core.Latch`, + :class:`~mitogen.core.Receiver` and :class:`~mitogen.select.Select` are + obsoleted by a more general :meth:`size` method. :meth:`empty` will be + removed in 0.3 -* `ecc570cb `_: previously +* :gh:commit:`ecc570cb`: previously :meth:`mitogen.select.Select.add` would enqueue one wake event when adding an existing receiver, latch or subselect that contained multiple buffered items, causing :meth:`get` calls to block or fail even though data existed to return. -* `5924af15 `_: *[security]* +* :gh:commit:`5924af15`: *[security]* unidirectional routing, where contexts may optionally only communicate with parents and never siblings (so that air-gapped networks cannot be unintentionally bridged) was not inherited when a child was initiated @@ -296,27 +288,27 @@ on Ansible 2.8, which is not yet supported. Fixes ~~~~~ -* `#557 `_: fix a crash when running +* :gh:issue:`557`: fix a crash when running on machines with high CPU counts. -* `#570 `_: the ``firewalld`` module - internally caches a dbus name that changes across ``firewalld`` restarts, - causing a failure if the service is restarted between ``firewalld`` module invocations. +* :gh:issue:`570`: the :ans:mod:`firewalld` internally caches a dbus name that + changes across :ans:mod:`~firewalld` restarts, causing a failure if the + service is restarted between :ans:mod:`~firewalld` module invocations. -* `#575 `_: fix a crash when +* :gh:issue:`575`: fix a crash when rendering an error message to indicate no usable temporary directories could be found. -* `#576 `_: fix a crash during +* :gh:issue:`576`: fix a crash during startup on SuSE Linux 11, due to an incorrect version compatibility check in the Mitogen code. -* `#581 `_: a +* :gh:issue:`581`: a ``mitogen_mask_remote_name`` Ansible variable is exposed, to allow masking the username, hostname and process ID of ``ansible-playbook`` running on the controller machine. -* `#587 `_: display a friendly +* :gh:issue:`587`: display a friendly message when running on an unsupported version of Ansible, to cope with potential influx of 2.8-related bug reports. @@ -338,53 +330,53 @@ v0.2.6 (2019-03-06) Fixes ~~~~~ -* `#542 `_: some versions of OS X +* :gh:issue:`542`: some versions of OS X ship a default Python that does not support :func:`select.poll`. Restore the 0.2.3 behaviour of defaulting to Kqueue in this case, but still prefer :func:`select.poll` if it is available. -* `#545 `_: an optimization - introduced in `#493 `_ caused a +* :gh:issue:`545`: an optimization + introduced in :gh:issue:`493` caused a 64-bit integer to be assigned to a 32-bit field on ARM 32-bit targets, causing runs to fail. -* `#548 `_: `mitogen_via=` could fail +* :gh:issue:`548`: `mitogen_via=` could fail when the selected transport was set to ``smart``. -* `#550 `_: avoid some broken +* :gh:issue:`550`: avoid some broken TTY-related `ioctl()` calls on Windows Subsystem for Linux 2016 Anniversary Update. -* `#554 `_: third party Ansible +* :gh:issue:`554`: third party Ansible action plug-ins that invoked :func:`_make_tmp_path` repeatedly could trigger an assertion failure. -* `#555 `_: work around an old idiom +* :gh:issue:`555`: work around an old idiom that reloaded :mod:`sys` in order to change the interpreter's default encoding. -* `ffae0355 `_: needless +* :gh:commit:`ffae0355`: needless information was removed from the documentation and installation procedure. Core Library ~~~~~~~~~~~~ -* `#535 `_: to support function calls +* :gh:issue:`535`: to support function calls on a service pool from another thread, :class:`mitogen.select.Select` additionally permits waiting on :class:`mitogen.core.Latch`. -* `#535 `_: +* :gh:issue:`535`: :class:`mitogen.service.Pool.defer` allows any function to be enqueued for the thread pool from another thread. -* `#535 `_: a new +* :gh:issue:`535`: a new :mod:`mitogen.os_fork` module provides a :func:`os.fork` wrapper that pauses thread activity during fork. On Python<2.6, :class:`mitogen.core.Broker` and :class:`mitogen.service.Pool` automatically record their existence so that a :func:`os.fork` monkey-patch can automatically pause them for any attempt to start a subprocess. -* `ca63c26e `_: +* :gh:commit:`ca63c26e`: :meth:`mitogen.core.Latch.put`'s `obj` argument was made optional. @@ -409,47 +401,47 @@ v0.2.5 (2019-02-14) Fixes ~~~~~ -* `#511 `_, - `#536 `_: changes in 0.2.4 to +* :gh:issue:`511`, + :gh:issue:`536`: changes in 0.2.4 to repair ``delegate_to`` handling broke default ``ansible_python_interpreter`` handling. Test coverage was added. -* `#532 `_: fix a race in the service +* :gh:issue:`532`: fix a race in the service used to propagate Ansible modules, that could easily manifest when starting asynchronous tasks in a loop. -* `#536 `_: changes in 0.2.4 to +* :gh:issue:`536`: changes in 0.2.4 to support Python 2.4 interacted poorly with modules that imported ``simplejson`` from a controller that also loaded an incompatible newer version of ``simplejson``. -* `#537 `_: a swapped operator in the +* :gh:issue:`537`: a swapped operator in the CPU affinity logic meant 2 cores were reserved on 1`_: the source distribution +* :gh:issue:`538`: the source distribution includes a ``LICENSE`` file. -* `#539 `_: log output is no longer +* :gh:issue:`539`: log output is no longer duplicated when the Ansible ``log_path`` setting is enabled. -* `#540 `_: the ``stderr`` stream of +* :gh:issue:`540`: the ``stderr`` stream of async module invocations was previously discarded. -* `#541 `_: Python error logs +* :gh:issue:`541`: Python error logs originating from the ``boto`` package are quiesced, and only appear in ``-vvv`` output. This is since EC2 modules may trigger errors during normal operation, when retrying transiently failing requests. -* `748f5f67 `_, - `21ad299d `_, - `8ae6ca1d `_, - `7fd0d349 `_: +* :gh:commit:`748f5f67`, + :gh:commit:`21ad299d`, + :gh:commit:`8ae6ca1d`, + :gh:commit:`7fd0d349`: the ``ansible_ssh_host``, ``ansible_ssh_user``, ``ansible_user``, ``ansible_become_method``, and ``ansible_ssh_port`` variables more correctly match typical behaviour when ``mitogen_via=`` is active. -* `2a8567b4 `_: fix a race +* :gh:commit:`2a8567b4`: fix a race initializing a child's service thread pool on Python 3.4+, due to a change in locking scheme used by the Python import mechanism. @@ -477,57 +469,54 @@ on the connection multiplexer. Enhancements ^^^^^^^^^^^^ -* `#76 `_, - `#351 `_, - `#352 `_: disconnect propagation +* :gh:issue:`76`, + :gh:issue:`351`, + :gh:issue:`352`: disconnect propagation has improved, allowing Ansible to cancel waits for responses from abruptly disconnected targets. This ensures a task will reliably fail rather than hang, for example on network failure or EC2 instance maintenance. -* `#369 `_, - `#407 `_: :meth:`Connection.reset` - is implemented, allowing `meta: reset_connection - `_ to shut +* :gh:issue:`369`, + :gh:issue:`407`: :meth:`Connection.reset` + is implemented, allowing :ans:mod:`meta: reset_connection ` to shut down the remote interpreter as documented, and improving support for the - `reboot - `_ - module. + :ans:mod:`reboot`. -* `09aa27a6 `_: the +* :gh:commit:`09aa27a6`: the ``mitogen_host_pinned`` strategy wraps the ``host_pinned`` strategy introduced in Ansible 2.7. -* `#477 `_: Python 2.4 is fully +* :gh:issue:`477`: Python 2.4 is fully supported by the core library and tested automatically, in any parent/child combination of 2.4, 2.6, 2.7 and 3.6 interpreters. -* `#477 `_: Ansible 2.3 is fully +* :gh:issue:`477`: Ansible 2.3 is fully supported and tested automatically. In combination with the core library Python 2.4 support, this allows Red Hat Enterprise Linux 5 targets to be managed with Mitogen. The ``simplejson`` package need not be installed on such targets, as is usually required by Ansible. -* `#412 `_: to simplify diagnosing +* :gh:issue:`412`: to simplify diagnosing connection configuration problems, Mitogen ships a ``mitogen_get_stack`` action that is automatically added to the action plug-in path. See :ref:`mitogen-get-stack` for more information. -* `152effc2 `_, - `bd4b04ae `_: a CPU affinity +* :gh:commit:`152effc2`, + :gh:commit:`bd4b04ae`: a CPU affinity policy was added for Linux controllers, reducing latency and SMP overhead on hot paths exercised for every task. This yielded a 19% speedup in a 64-target job composed of many short tasks, and should easily be visible as a runtime improvement in many-host runs. -* `2b44d598 `_: work around a +* :gh:commit:`2b44d598`: work around a defective caching mechanism by pre-heating it before spawning workers. This saves 40% runtime on a synthetic repetitive task. -* `0979422a `_: an expensive +* :gh:commit:`0979422a`: an expensive dependency scanning step was redundantly invoked for every task, bottlenecking the connection multiplexer. -* `eaa990a97 `_: a new +* :gh:commit:`eaa990a97`: a new ``mitogen_ssh_compression`` variable is supported, allowing Mitogen's default SSH compression to be disabled. SSH compression is a large contributor to CPU usage in many-target runs, and severely limits file transfer. On a `"shell: @@ -535,124 +524,115 @@ Enhancements task with compression, rising to 3 KiB without. File transfer throughput rises from ~25MiB/s when enabled to ~200MiB/s when disabled. -* `#260 `_, - `a18a083c `_: brokers no +* :gh:issue:`260`, + :gh:commit:`a18a083c`: brokers no longer wait for readiness indication to transmit, and instead assume transmission will succeed. As this is usually true, one loop iteration and two poller reconfigurations are avoided, yielding a significant reduction in interprocess round-trip latency. -* `#415 `_, - `#491 `_, - `#493 `_: the interface employed - for in-process queues changed from `kqueue - `_ / `epoll - `_ to `poll() - `_, which requires no setup - or teardown, yielding a 38% latency reduction for inter-thread communication. +* :gh:issue:`415`, :gh:issue:`491`, :gh:issue:`493`: the interface employed + for in-process queues changed from :freebsd:man2:`kqueue` / + :linux:man7:`epoll` to :linux:man2:`poll`, which requires no setup or + teardown, yielding a 38% latency reduction for inter-thread communication. Fixes ^^^^^ -* `#251 `_, - `#359 `_, - `#396 `_, - `#401 `_, - `#404 `_, - `#412 `_, - `#434 `_, - `#436 `_, - `#465 `_: connection delegation and +* :gh:issue:`251`, + :gh:issue:`359`, + :gh:issue:`396`, + :gh:issue:`401`, + :gh:issue:`404`, + :gh:issue:`412`, + :gh:issue:`434`, + :gh:issue:`436`, + :gh:issue:`465`: connection delegation and ``delegate_to:`` handling suffered a major regression in 0.2.3. The 0.2.2 behaviour has been restored, and further work has been made to improve the compatibility of connection delegation's configuration building methods. -* `#323 `_, - `#333 `_: work around a Windows +* :gh:issue:`323`, + :gh:issue:`333`: work around a Windows Subsystem for Linux bug that caused tracebacks to appear during shutdown. -* `#334 `_: the SSH method +* :gh:issue:`334`: the SSH method tilde-expands private key paths using Ansible's logic. Previously the path was passed unmodified to SSH, which expanded it using :func:`pwd.getpwnam`. This differs from :func:`os.path.expanduser`, which uses the ``HOME`` environment variable if it is set, causing behaviour to diverge when Ansible was invoked across user accounts via ``sudo``. -* `#364 `_: file transfers from +* :gh:issue:`364`: file transfers from controllers running Python 2.7.2 or earlier could be interrupted due to a forking bug in the :mod:`tempfile` module. -* `#370 `_: the Ansible - `reboot `_ - module is supported. +* :gh:issue:`370`: the Ansible :ans:mod:`reboot` is supported. -* `#373 `_: the LXC and LXD methods - print a useful hint on failure, as no useful error is normally logged to the - console by these tools. +* :gh:issue:`373`: the LXC and LXD methods print a useful hint on failure, as + no useful error is normally logged to the console by these tools. -* `#374 `_, - `#391 `_: file transfer and module +* :gh:issue:`374`, + :gh:issue:`391`: file transfer and module execution from 2.x controllers to 3.x targets was broken due to a regression - caused by refactoring, and compounded by `#426 - `_. + caused by refactoring, and compounded by :gh:issue:`426`. -* `#400 `_: work around a threading +* :gh:issue:`400`: work around a threading bug in the AWX display callback when running with high verbosity setting. -* `#409 `_: the setns method was +* :gh:issue:`409`: the setns method was silently broken due to missing tests. Basic coverage was added to prevent a recurrence. -* `#409 `_: the LXC and LXD methods +* :gh:issue:`409`: the LXC and LXD methods support ``mitogen_lxc_path`` and ``mitogen_lxc_attach_path`` variables to control the location of third pary utilities. -* `#410 `_: the sudo method supports +* :gh:issue:`410`: the sudo method supports the SELinux ``--type`` and ``--role`` options. -* `#420 `_: if a :class:`Connection` +* :gh:issue:`420`: if a :class:`Connection` was constructed in the Ansible top-level process, for example while executing ``meta: reset_connection``, resources could become undesirably shared in subsequent children. -* `#426 `_: an oversight while +* :gh:issue:`426`: an oversight while porting to Python 3 meant no automated 2->3 tests were running. A significant number of 2->3 bugs were fixed, mostly in the form of Unicode/bytes mismatches. -* `#429 `_: the ``sudo`` method can +* :gh:issue:`429`: the ``sudo`` method can now recognize internationalized password prompts. -* `#362 `_, - `#435 `_: the previous fix for slow +* :gh:issue:`362`, + :gh:issue:`435`: the previous fix for slow Python 2.x subprocess creation on Red Hat caused newly spawned children to have a reduced open files limit. A more intrusive fix has been added to directly address the problem without modifying the subprocess environment. -* `#397 `_, - `#454 `_: the previous approach to +* :gh:issue:`397`, + :gh:issue:`454`: the previous approach to handling modern Ansible temporary file cleanup was too aggressive, and could trigger early finalization of Cython-based extension modules, leading to segmentation faults. -* `#499 `_: the ``allow_same_user`` +* :gh:issue:`499`: the ``allow_same_user`` Ansible configuration setting is respected. -* `#527 `_: crashes in modules are +* :gh:issue:`527`: crashes in modules are trapped and reported in a manner that matches Ansible. In particular, a module crash no longer leads to an exception that may crash the corresponding action plug-in. -* `dc1d4251 `_: the - ``synchronize`` module could fail with the Docker transport due to a missing - attribute. +* :gh:commit:`dc1d4251`: the :ans:mod:`synchronize` could fail with the Docker + transport due to a missing attribute. -* `599da068 `_: fix a race +* :gh:commit:`599da068`: fix a race when starting async tasks, where it was possible for the controller to observe no status file on disk before the task had a chance to write one. -* `2c7af9f04 `_: Ansible +* :gh:commit:`2c7af9f04`: Ansible modules were repeatedly re-transferred. The bug was hidden by the previously mandatorily enabled SSH compression. @@ -660,7 +640,7 @@ Fixes Core Library ~~~~~~~~~~~~ -* `#76 `_: routing records the +* :gh:issue:`76`: routing records the destination context IDs ever received on each stream, and when disconnection occurs, propagates :data:`mitogen.core.DEL_ROUTE` messages towards every stream that ever communicated with the disappearing peer, rather than simply @@ -669,166 +649,166 @@ Core Library receivers to wake with :class:`mitogen.core.ChannelError`, even when one participant is not a parent of the other. -* `#109 `_, - `57504ba6 `_: newer Python 3 +* :gh:issue:`109`, + :gh:commit:`57504ba6`: newer Python 3 releases explicitly populate :data:`sys.meta_path` with importer internals, causing Mitogen to install itself at the end of the importer chain rather than the front. -* `#310 `_: support has returned for +* :gh:issue:`310`: support has returned for trying to figure out the real source of non-module objects installed in :data:`sys.modules`, so they can be imported. This is needed to handle syntax sugar used by packages like :mod:`plumbum`. -* `#349 `_: an incorrect format +* :gh:issue:`349`: an incorrect format string could cause large stack traces when attempting to import built-in modules on Python 3. -* `#387 `_, - `#413 `_: dead messages include an +* :gh:issue:`387`, + :gh:issue:`413`: dead messages include an optional reason in their body. This is used to cause :class:`mitogen.core.ChannelError` to report far more useful diagnostics at the point the error occurs that previously would have been buried in debug log output from an unrelated context. -* `#408 `_: a variety of fixes were +* :gh:issue:`408`: a variety of fixes were made to restore Python 2.4 compatibility. -* `#399 `_, - `#437 `_: ignore a +* :gh:issue:`399`, + :gh:issue:`437`: ignore a :class:`DeprecationWarning` to avoid failure of the ``su`` method on Python 3.7. -* `#405 `_: if an oversized message +* :gh:issue:`405`: if an oversized message is rejected, and it has a ``reply_to`` set, a dead message is returned to the sender. This ensures function calls exceeding the configured maximum size crash rather than hang. -* `#406 `_: +* :gh:issue:`406`: :class:`mitogen.core.Broker` did not call :meth:`mitogen.core.Poller.close` during shutdown, leaking the underlying poller FD in masters and parents. -* `#406 `_: connections could leak +* :gh:issue:`406`: connections could leak FDs when a child process failed to start. -* `#288 `_, - `#406 `_, - `#417 `_: connections could leave +* :gh:issue:`288`, + :gh:issue:`406`, + :gh:issue:`417`: connections could leave FD wrapper objects that had not been closed lying around to be closed during garbage collection, causing reused FD numbers to be closed at random moments. -* `#411 `_: the SSH method typed +* :gh:issue:`411`: the SSH method typed "``y``" rather than the requisite "``yes``" when `check_host_keys="accept"` was configured. This would lead to connection timeouts due to the hung response. -* `#414 `_, - `#425 `_: avoid deadlock of forked +* :gh:issue:`414`, + :gh:issue:`425`: avoid deadlock of forked children by reinitializing the :mod:`mitogen.service` pool lock. -* `#416 `_: around 1.4KiB of memory +* :gh:issue:`416`: around 1.4KiB of memory was leaked on every RPC, due to a list of strong references keeping alive any handler ever registered for disconnect notification. -* `#418 `_: the +* :gh:issue:`418`: the :func:`mitogen.parent.iter_read` helper would leak poller FDs, because execution of its :keyword:`finally` block was delayed on Python 3. Now callers explicitly close the generator when finished. -* `#422 `_: the fork method could +* :gh:issue:`422`: the fork method could fail to start if :data:`sys.stdout` was opened in block buffered mode, and buffered data was pending in the parent prior to fork. -* `#438 `_: a descriptive error is +* :gh:issue:`438`: a descriptive error is logged when stream corruption is detected. -* `#439 `_: descriptive errors are +* :gh:issue:`439`: descriptive errors are raised when attempting to invoke unsupported function types. -* `#444 `_: messages regarding +* :gh:issue:`444`: messages regarding unforwardable extension module are no longer logged as errors. -* `#445 `_: service pools unregister +* :gh:issue:`445`: service pools unregister the :data:`mitogen.core.CALL_SERVICE` handle at shutdown, ensuring any outstanding messages are either processed by the pool as it shuts down, or have dead messages sent in reply to them, preventing peer contexts from hanging due to a forgotten buffered message. -* `#446 `_: given thread A calling +* :gh:issue:`446`: given thread A calling :meth:`mitogen.core.Receiver.close`, and thread B, C, and D sleeping in :meth:`mitogen.core.Receiver.get`, previously only one sleeping thread would be woken with :class:`mitogen.core.ChannelError` when the receiver was closed. Now all threads are woken per the docstring. -* `#447 `_: duplicate attempts to +* :gh:issue:`447`: duplicate attempts to invoke :meth:`mitogen.core.Router.add_handler` cause an error to be raised, ensuring accidental re-registration of service pools are reported correctly. -* `#448 `_: the import hook +* :gh:issue:`448`: the import hook implementation now raises :class:`ModuleNotFoundError` instead of :class:`ImportError` in Python 3.6 and above, to cope with an upcoming version of the :mod:`subprocess` module requiring this new subclass to be raised. -* `#453 `_: the loggers used in +* :gh:issue:`453`: the loggers used in children for standard IO redirection have propagation disabled, preventing accidental reconfiguration of the :mod:`logging` package in a child from setting up a feedback loop. -* `#456 `_: a descriptive error is +* :gh:issue:`456`: a descriptive error is logged when :meth:`mitogen.core.Broker.defer` is called after the broker has shut down, preventing new messages being enqueued that will never be sent, and subsequently producing a program hang. -* `#459 `_: the beginnings of a +* :gh:issue:`459`: the beginnings of a :meth:`mitogen.master.Router.get_stats` call has been added. The initial statistics cover the module loader only. -* `#462 `_: Mitogen could fail to +* :gh:issue:`462`: Mitogen could fail to open a PTY on broken Linux systems due to a bad interaction between the glibc :func:`grantpt` function and an incorrectly mounted ``/dev/pts`` filesystem. Since correct group ownership is not required in most scenarios, when this problem is detected, the PTY is allocated and opened directly by the library. -* `#479 `_: Mitogen could fail to +* :gh:issue:`479`: Mitogen could fail to import :mod:`__main__` on Python 3.4 and newer due to a breaking change in the :mod:`pkgutil` API. The program's main script is now handled specially. -* `#481 `_: the version of `sudo` +* :gh:issue:`481`: the version of `sudo` that shipped with CentOS 5 replaced itself with the program to be executed, and therefore did not hold any child PTY open on our behalf. The child context is updated to preserve any PTY FD in order to avoid the kernel sending `SIGHUP` early during startup. -* `#523 `_: the test suite didn't +* :gh:issue:`523`: the test suite didn't generate a code coverage report if any test failed. -* `#524 `_: Python 3.6+ emitted a +* :gh:issue:`524`: Python 3.6+ emitted a :class:`DeprecationWarning` for :func:`mitogen.utils.run_with_router`. -* `#529 `_: Code coverage of the +* :gh:issue:`529`: Code coverage of the test suite was not measured across all Python versions. -* `16ca111e `_: handle OpenSSH +* :gh:commit:`16ca111e`: handle OpenSSH 7.5 permission denied prompts when ``~/.ssh/config`` rewrites are present. -* `9ec360c2 `_: a new +* :gh:commit:`9ec360c2`: a new :meth:`mitogen.core.Broker.defer_sync` utility function is provided. -* `f20e0bba `_: +* :gh:commit:`f20e0bba`: :meth:`mitogen.service.FileService.register_prefix` permits granting unprivileged access to whole filesystem subtrees, rather than single files at a time. -* `8f85ee03 `_: +* :gh:commit:`8f85ee03`: :meth:`mitogen.core.Router.myself` returns a :class:`mitogen.core.Context` referring to the current process. -* `824c7931 `_: exceptions +* :gh:commit:`824c7931`: exceptions raised by the import hook were updated to include probable reasons for a failure. -* `57b652ed `_: a stray import +* :gh:commit:`57b652ed`: a stray import meant an extra roundtrip and ~4KiB of data was wasted for any context that imported :mod:`mitogen.parent`. @@ -885,51 +865,40 @@ Mitogen for Ansible Enhancements ^^^^^^^^^^^^ -* `#315 `_, - `#392 `_: Ansible 2.6 and 2.7 are +* :gh:pull:`315`, + :gh:issue:`392`: Ansible 2.6 and 2.7 are supported. -* `#321 `_, - `#336 `_: temporary file handling - was simplified, undoing earlier damage caused by compatibility fixes, - improving 2.6 compatibility, and avoiding two network roundtrips for every - related action - (`assemble `_, - `aws_s3 `_, - `copy `_, - `patch `_, - `script `_, - `template `_, - `unarchive `_, - `uri `_). See - :ref:`ansible_tempfiles` for a complete description. - -* `#376 `_, - `#377 `_: the ``kubectl`` connection - type is now supported. Contributed by Yannig Perré. - -* `084c0ac0 `_: avoid a - roundtrip in - `copy `_ and - `template `_ - due to an unfortunate default. - -* `7458dfae `_: avoid a +* :gh:issue:`321`, :gh:issue:`336`: temporary file handling was simplified, + undoing earlier damage caused by compatibility fixes, improving 2.6 + compatibility, and avoiding two network roundtrips for every related action + (:ans:mod:`~assemble`, :ans:mod:`~aws_s3`, :ans:mod:`~copy`, + :ans:mod:`~patch`, :ans:mod:`~script`, :ans:mod:`~template`, + :ans:mod:`~unarchive`, :ans:mod:`~uri`). See :ref:`ansible_tempfiles` for a + complete description. + +* :gh:pull:`376`, :gh:pull:`377`: the ``kubectl`` connection type is now + supported. Contributed by Yannig Perré. + +* :gh:commit:`084c0ac0`: avoid a roundtrip in :ans:mod:`~copy` and + :ans:mod:`~template` due to an unfortunate default. + +* :gh:commit:`7458dfae`: avoid a roundtrip when transferring files smaller than 124KiB. Copy and template actions are now 2-RTT, reducing runtime for a 20-iteration template loop over a 250 ms link from 30 seconds to 10 seconds compared to v0.2.2, down from 120 seconds compared to vanilla. -* `#337 `_: To avoid a scaling +* :gh:issue:`337`: To avoid a scaling limitation, a PTY is no longer allocated for an SSH connection unless the configuration specifies a password. -* `d62e6e2a `_: many-target +* :gh:commit:`d62e6e2a`: many-target runs executed the dependency scanner redundantly due to missing synchronization, wasting significant runtime in the connection multiplexer. In one case work was reduced by 95%, which may manifest as faster runs. -* `5189408e `_: threads are +* :gh:commit:`5189408e`: threads are cooperatively scheduled, minimizing `GIL `_ contention, and reducing context switching by around 90%. This manifests as an overall @@ -948,62 +917,62 @@ Enhancements Fixes ^^^^^ -* `#251 `_, - `#340 `_: Connection Delegation +* :gh:issue:`251`, + :gh:issue:`340`: Connection Delegation could establish connections to the wrong target when ``delegate_to:`` is present. -* `#291 `_: when Mitogen had +* :gh:issue:`291`: when Mitogen had previously been installed using ``pip`` or ``setuptools``, the globally installed version could conflict with a newer version bundled with an extension that had been installed using the documented steps. Now the bundled library always overrides over any system-installed copy. -* `#324 `_: plays with a +* :gh:issue:`324`: plays with a `custom module_utils `_ would fail due to fallout from the Python 3 port and related tests being disabled. -* `#331 `_: the connection +* :gh:issue:`331`: the connection multiplexer subprocess always exits before the main Ansible process, ensuring logs generated by it do not overwrite the user's prompt when ``-vvv`` is enabled. -* `#332 `_: support a new +* :gh:issue:`332`: support a new :func:`sys.excepthook`-based module exit mechanism added in Ansible 2.6. -* `#338 `_: compatibility: changes to +* :gh:issue:`338`: compatibility: changes to ``/etc/environment`` and ``~/.pam_environment`` made by a task are reflected in the runtime environment of subsequent tasks. See :ref:`ansible_process_env` for a complete description. -* `#343 `_: the sudo ``--login`` +* :gh:issue:`343`: the sudo ``--login`` option is supported. -* `#344 `_: connections no longer +* :gh:issue:`344`: connections no longer fail when the controller's login username contains slashes. -* `#345 `_: the ``IdentitiesOnly +* :gh:issue:`345`: the ``IdentitiesOnly yes`` option is no longer supplied to OpenSSH by default, better matching Ansible's behaviour. -* `#355 `_: tasks configured to run +* :gh:issue:`355`: tasks configured to run in an isolated forked subprocess were forked from the wrong parent context. This meant built-in modules overridden via a custom ``module_utils`` search path may not have had any effect. -* `#362 `_: to work around a slow +* :gh:issue:`362`: to work around a slow algorithm in the :mod:`subprocess` module, the maximum number of open files in processes running on the target is capped to 512, reducing the work required to start a subprocess by >2000x in default CentOS configurations. -* `#397 `_: recent Mitogen master +* :gh:issue:`397`: recent Mitogen master versions could fail to clean up temporary directories in a number of circumstances, and newer Ansibles moved to using :mod:`atexit` to effect temporary directory cleanup in some circumstances. -* `b9112a9c `_, - `2c287801 `_: OpenSSH 7.5 +* :gh:commit:`b9112a9c`, + :gh:commit:`2c287801`: OpenSSH 7.5 permission denied prompts are now recognized. Contributed by Alex Willmer. * A missing check caused an exception traceback to appear when using the @@ -1023,53 +992,53 @@ Core Library related function calls to a target context, cancelling the chain if an exception occurs. -* `#305 `_: fix a long-standing minor +* :gh:issue:`305`: fix a long-standing minor race relating to the logging framework, where *no route for Message..* would frequently appear during startup. -* `#313 `_: +* :gh:issue:`313`: :meth:`mitogen.parent.Context.call` was documented as capable of accepting static methods. While possible on Python 2.x the result is ugly, and in every case it should be trivial to replace with a classmethod. The documentation was fixed. -* `#337 `_: to avoid a scaling +* :gh:issue:`337`: to avoid a scaling limitation, a PTY is no longer allocated for each OpenSSH client if it can be avoided. PTYs are only allocated if a password is supplied, or when `host_key_checking=accept`. This is since Linux has a default of 4096 PTYs (``kernel.pty.max``), while OS X has a default of 127 and an absolute maximum of 999 (``kern.tty.ptmx_max``). -* `#339 `_: the LXD connection method +* :gh:issue:`339`: the LXD connection method was erroneously executing LXC Classic commands. -* `#345 `_: the SSH connection method +* :gh:issue:`345`: the SSH connection method allows optionally disabling ``IdentitiesOnly yes``. -* `#356 `_: if the master Python +* :gh:issue:`356`: if the master Python process does not have :data:`sys.executable` set, the default Python interpreter used for new children on the local machine defaults to ``"/usr/bin/python"``. -* `#366 `_, - `#380 `_: attempts by children to +* :gh:issue:`366`, + :gh:issue:`380`: attempts by children to import :mod:`__main__` where the main program module lacks an execution guard are refused, and an error is logged. This prevents a common and highly confusing error when prototyping new scripts. -* `#371 `_: the LXC connection method +* :gh:pull:`371`: the LXC connection method uses a more compatible method to establish an non-interactive session. Contributed by Brian Candler. -* `af2ded66 `_: add +* :gh:commit:`af2ded66`: add :func:`mitogen.fork.on_fork` to allow non-Mitogen managed process forks to clean up Mitogen resources in the child. -* `d6784242 `_: the setns method +* :gh:commit:`d6784242`: the setns method always resets ``HOME``, ``SHELL``, ``LOGNAME`` and ``USER`` environment variables to an account in the target container, defaulting to ``root``. -* `830966bf `_: the UNIX +* :gh:commit:`830966bf`: the UNIX listener no longer crashes if the peer process disappears in the middle of connection setup. @@ -1110,28 +1079,28 @@ v0.2.2 (2018-07-26) Mitogen for Ansible ~~~~~~~~~~~~~~~~~~~ -* `#291 `_: ``ansible_*_interpreter`` +* :gh:issue:`291`: ``ansible_*_interpreter`` variables are parsed using a restrictive shell-like syntax, supporting a common idiom where ``ansible_python_interpreter`` is set to ``/usr/bin/env python``. -* `#299 `_: fix the ``network_cli`` +* :gh:issue:`299`: fix the ``network_cli`` connection type when the Mitogen strategy is active. Mitogen cannot help network device connections, however it should still be possible to use device connections while Mitogen is active. -* `#301 `_: variables like ``$HOME`` in +* :gh:pull:`301`: variables like ``$HOME`` in the ``remote_tmp`` setting are evaluated correctly. -* `#303 `_: the :ref:`doas` become method +* :gh:pull:`303`: the :ref:`doas` become method is supported. Contributed by `Mike Walker `_. -* `#309 `_: fix a regression to +* :gh:issue:`309`: fix a regression to process environment cleanup, caused by the change in v0.2.1 to run local tasks with the correct environment. -* `#317 `_: respect the verbosity +* :gh:issue:`317`: respect the verbosity setting when writing to Ansible's ``log_path``, if it is enabled. Child log filtering was also incorrect, causing the master to needlessly wake many times. This nets a 3.5% runtime improvement running against the local @@ -1144,32 +1113,32 @@ Mitogen for Ansible Core Library ~~~~~~~~~~~~ -* `#291 `_: the ``python_path`` +* :gh:issue:`291`: the ``python_path`` parameter may specify an argument vector prefix rather than a string program path. -* `#300 `_: the broker could crash on +* :gh:issue:`300`: the broker could crash on OS X during shutdown due to scheduled `kqueue `_ filter changes for descriptors that were closed before the IO loop resumes. As a temporary workaround, kqueue's bulk change feature is not used. -* `#303 `_: the :ref:`doas` become method +* :gh:pull:`303`: the :ref:`doas` become method is now supported. Contributed by `Mike Walker `_. -* `#307 `_: SSH login banner output +* :gh:issue:`307`: SSH login banner output containing the word 'password' is no longer confused for a password prompt. -* `#319 `_: SSH connections would +* :gh:issue:`319`: SSH connections would fail immediately on Windows Subsystem for Linux, due to use of `TCSAFLUSH` with :func:`termios.tcsetattr`. The flag is omitted if WSL is detected. -* `#320 `_: The OS X poller +* :gh:issue:`320`: The OS X poller could spuriously wake up due to ignoring an error bit set on events returned by the kernel, manifesting as a failure to read from an unrelated descriptor. -* `#342 `_: The ``network_cli`` +* :gh:issue:`342`: The ``network_cli`` connection type would fail due to a missing internal SSH plugin method. * Standard IO forwarding accidentally configured the replacement ``stdout`` and @@ -1215,7 +1184,7 @@ v0.2.1 (2018-07-10) Mitogen for Ansible ~~~~~~~~~~~~~~~~~~~ -* `#297 `_: compatibility: local +* :gh:issue:`297`: compatibility: local actions set their working directory to that of their defining playbook, and inherit a process environment as if they were executed as a subprocess of the forked task worker. diff --git a/docs/conf.py b/docs/conf.py index aa91c8b8..86332cd2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -2,13 +2,14 @@ import os import sys sys.path.append('..') +sys.path.append('.') import mitogen VERSION = '%s.%s.%s' % mitogen.__version__ author = u'Network Genomics' copyright = u'2019, Network Genomics' exclude_patterns = ['_build', '.venv'] -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.programoutput'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.programoutput', 'domainrefs'] html_show_copyright = False html_show_sourcelink = False html_show_sphinx = False @@ -36,6 +37,49 @@ templates_path = ['_templates'] todo_include_todos = False version = VERSION +domainrefs = { + 'gh:commit': { + 'text': '%s', + 'url': 'https://github.com/dw/mitogen/commit/%s', + }, + 'gh:issue': { + 'text': '#%s', + 'url': 'https://github.com/dw/mitogen/issues/%s', + }, + 'gh:pull': { + 'text': '#%s', + 'url': 'https://github.com/dw/mitogen/pull/%s', + }, + 'ans:mod': { + 'text': '%s Module', + 'url': 'https://docs.ansible.com/ansible/latest/modules/%s_module.html', + }, + 'ans:conn': { + 'text': '%s Connection Plug-in', + 'url': 'https://docs.ansible.com/ansible/latest/plugins/connection/%s.html', + }, + 'freebsd:man2': { + 'text': '%s(2)', + 'url': 'https://www.freebsd.org/cgi/man.cgi?query=%s', + }, + 'linux:man1': { + 'text': '%s(1)', + 'url': 'http://man7.org/linux/man-pages/man1/%s.1.html', + }, + 'linux:man2': { + 'text': '%s(2)', + 'url': 'http://man7.org/linux/man-pages/man2/%s.2.html', + }, + 'linux:man3': { + 'text': '%s(3)', + 'url': 'http://man7.org/linux/man-pages/man3/%s.3.html', + }, + 'linux:man7': { + 'text': '%s(7)', + 'url': 'http://man7.org/linux/man-pages/man7/%s.7.html', + }, +} + rst_epilog = """ .. |mitogen_version| replace:: %(VERSION)s diff --git a/docs/domainrefs.py b/docs/domainrefs.py new file mode 100644 index 00000000..4ff29dc5 --- /dev/null +++ b/docs/domainrefs.py @@ -0,0 +1,41 @@ + +import functools +import re + +import docutils.nodes +import docutils.utils + + +CUSTOM_RE = re.compile('(.*) <(.*)>') + + +def role(config, role, rawtext, text, lineno, inliner, options={}, content=[]): + template = 'https://docs.ansible.com/ansible/latest/modules/%s_module.html' + + match = CUSTOM_RE.match(text) + if match: # "custom text " + title = match.group(1) + text = match.group(2) + elif text.startswith('~'): # brief + text = text[1:] + title = config.get('brief', '%s') % ( + docutils.utils.unescape(text), + ) + else: + title = config.get('text', '%s') % ( + docutils.utils.unescape(text), + ) + + node = docutils.nodes.reference( + rawsource=rawtext, + text=title, + refuri=config['url'] % (text,), + **options + ) + + return [node], [] + + +def setup(app): + for name, info in app.config._raw_config['domainrefs'].items(): + app.add_role(name, functools.partial(role, info)) diff --git a/docs/index.rst b/docs/index.rst index 17d183aa..3cd53d32 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -27,8 +27,8 @@ and efficient low-level API on which tools like `Salt`_, `Ansible`_, or `Fabric`_, ultimately it is not intended for direct use by consumer software. .. _Salt: https://docs.saltstack.com/en/latest/ -.. _Ansible: http://docs.ansible.com/ -.. _Fabric: http://www.fabfile.org/ +.. _Ansible: https://docs.ansible.com/ +.. _Fabric: https://www.fabfile.org/ The focus is to centralize and perfect the intricate dance required to run Python code safely and efficiently on a remote machine, while **avoiding @@ -132,7 +132,7 @@ any tool such as `py2exe`_ that correctly implement the protocols in PEP-302, allowing truly single file applications to run across multiple machines without further effort. -.. _py2exe: http://www.py2exe.org/ +.. _py2exe: https://www.py2exe.org/ Common sources of import latency and bandwidth consumption are mitigated: diff --git a/mitogen/fork.py b/mitogen/fork.py index 25d1b5a6..4172e96f 100644 --- a/mitogen/fork.py +++ b/mitogen/fork.py @@ -73,8 +73,8 @@ def reset_logging_framework(): threads in the parent may have been using the logging package at the moment of fork. - It is not possible to solve this problem in general; see - https://github.com/dw/mitogen/issues/150 for a full discussion. + It is not possible to solve this problem in general; see :gh:issue:`150` + for a full discussion. """ logging._lock = threading.RLock() From 8cbaa98ff95d225237a4eb84de2b05d725eeffaa Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 18:29:20 +0100 Subject: [PATCH 366/383] docs: more hyperlinks --- mitogen/parent.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mitogen/parent.py b/mitogen/parent.py index 82b4a7d1..9f93571b 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -901,9 +901,9 @@ class CallSpec(object): class PollPoller(mitogen.core.Poller): """ - Poller based on the POSIX poll(2) interface. Not available on some versions - of OS X, otherwise it is the preferred poller for small FD counts, as there - is no setup/teardown/configuration system call overhead. + Poller based on the POSIX :linux:man2:`poll` interface. Not available on + some versions of OS X, otherwise it is the preferred poller for small FD + counts, as there is no setup/teardown/configuration system call overhead. """ SUPPORTED = hasattr(select, 'poll') _repr = 'PollPoller()' @@ -949,7 +949,7 @@ class PollPoller(mitogen.core.Poller): class KqueuePoller(mitogen.core.Poller): """ - Poller based on the FreeBSD/Darwin kqueue(2) interface. + Poller based on the FreeBSD/Darwin :freebsd:man2:`kqueue` interface. """ SUPPORTED = hasattr(select, 'kqueue') _repr = 'KqueuePoller()' @@ -1027,7 +1027,7 @@ class KqueuePoller(mitogen.core.Poller): class EpollPoller(mitogen.core.Poller): """ - Poller based on the Linux epoll(2) interface. + Poller based on the Linux :linux:man2:`epoll` interface. """ SUPPORTED = hasattr(select, 'epoll') _repr = 'EpollPoller()' From 26a9fed396a981082e68288f019e82a88e3497f5 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 18:31:50 +0100 Subject: [PATCH 367/383] docs: some more hyperlink joy --- docs/changelog.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 525543b9..80a74785 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -78,9 +78,9 @@ Mitogen for Ansible * :gh:issue:`363`: fix an obscure race matching *Permission denied* errors from some versions of ``su`` running on heavily loaded machines. -* :gh:issue:`410`: Uses of :linux:man7:`unix` sockets are replaced with pairs - of plain UNIX pipes when SELinux is detected, to work around a broken - heuristic in popular SELinux policies that prevents inheriting +* :gh:issue:`410`: Uses of :linux:man7:`unix` sockets are replaced with + traditional :linux:man7:`pipe` pairs when SELinux is detected, to work around + a broken heuristic in popular SELinux policies that prevents inheriting :linux:man7:`unix` sockets across privilege domains. * `#467 `_: an incompatibility @@ -111,7 +111,7 @@ Mitogen for Ansible due to an incorrect format string. * :gh:issue:`590`: the importer can handle modules that replace themselves in - :mod:`sys.modules` during import. + :data:`sys.modules` during import. * :gh:issue:`591`: the target's current working directory is restored to a known-existent directory between tasks to ensure :func:`os.getcwd` will not From 5d6e20bc2136affb26a73e5ccbb8d15867a4292c Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 19:41:10 +0100 Subject: [PATCH 368/383] tests: add a few extra service tests. --- mitogen/core.py | 2 ++ mitogen/service.py | 2 +- tests/service_test.py | 46 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/mitogen/core.py b/mitogen/core.py index 30c0e948..87388620 100644 --- a/mitogen/core.py +++ b/mitogen/core.py @@ -3600,6 +3600,8 @@ class Dispatcher(object): mode, any exception that occurs is recorded, and causes all subsequent calls with the same `chain_id` to fail with the same exception. """ + _service_recv = None + def __repr__(self): return 'Dispatcher' diff --git a/mitogen/service.py b/mitogen/service.py index 168f0140..c8022c04 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -80,7 +80,7 @@ def get_or_create_pool(size=None, router=None): global _pool_pid my_pid = os.getpid() - if _pool is None or my_pid != _pool_pid: + if _pool is None or _pool.closed or my_pid != _pool_pid: # Avoid acquiring heavily contended lock if possible. _pool_lock.acquire() try: diff --git a/tests/service_test.py b/tests/service_test.py index 438766f7..a3e75e14 100644 --- a/tests/service_test.py +++ b/tests/service_test.py @@ -15,6 +15,13 @@ class MyService(mitogen.service.Service): self._counter += 1 return self._counter, id(self) + @mitogen.service.expose(policy=mitogen.service.AllowParents()) + @mitogen.service.arg_spec({ + 'foo': int + }) + def test_arg_spec(self, foo): + return foo + @mitogen.service.expose(policy=mitogen.service.AllowParents()) def privileged_op(self): return 'privileged!' @@ -24,7 +31,6 @@ class MyService(mitogen.service.Service): return 'unprivileged!' - class MyService2(MyService): """ A uniquely named service that lets us test framework activation and class @@ -36,6 +42,44 @@ def call_service_in(context, service_name, method_name): return context.call_service(service_name, method_name) +class CallTest(testlib.RouterMixin, testlib.TestCase): + def test_local(self): + pool = mitogen.service.get_or_create_pool(router=self.router) + self.assertEquals( + 'privileged!', + mitogen.service.call(MyService, 'privileged_op') + ) + pool.stop() + + def test_remote_bad_arg(self): + c1 = self.router.local() + self.assertRaises( + mitogen.core.CallError, + lambda: mitogen.service.call( + MyService.name(), + 'test_arg_spec', + foo='x', + call_context=c1 + ) + ) + + def test_local_unicode(self): + pool = mitogen.service.get_or_create_pool(router=self.router) + self.assertEquals( + 'privileged!', + mitogen.service.call(MyService.name(), 'privileged_op') + ) + pool.stop() + + def test_remote(self): + c1 = self.router.local() + self.assertEquals( + 'privileged!', + mitogen.service.call(MyService, 'privileged_op', + call_context=c1) + ) + + class ActivationTest(testlib.RouterMixin, testlib.TestCase): def test_parent_can_activate(self): l1 = self.router.local() From 4caca80962e19c4d27576a78a88b48d47c5f3710 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 19:43:52 +0100 Subject: [PATCH 369/383] issue #627: reduce the default pool size in a child to 2. Ansible has no blocking services running, or really any service that would have an outsized benefit from multiple IO waiters. Probably we only need 1, but let's start with 2 just in case. --- docs/changelog.rst | 4 ++++ mitogen/service.py | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 80a74785..51cdd2df 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -222,6 +222,10 @@ Core Library to a function call, where the reply might be dropped due to exceeding the maximum configured message size. +* :gh:issue:`624`: the number of threads used for a child's auto-started thread + pool has been reduced from 16 to 2. This may drop to 1 in future, and become + configurable via a :class:`Router` option. + * :gh:commit:`a5536c35`: avoid quadratic buffer management when logging lines received from a child's redirected standard IO. diff --git a/mitogen/service.py b/mitogen/service.py index c8022c04..6bd64eb0 100644 --- a/mitogen/service.py +++ b/mitogen/service.py @@ -55,7 +55,6 @@ except NameError: LOG = logging.getLogger(__name__) -DEFAULT_POOL_SIZE = 16 _pool = None _pool_pid = None #: Serialize pool construction. @@ -88,7 +87,7 @@ def get_or_create_pool(size=None, router=None): _pool = Pool( router, services=[], - size=size or DEFAULT_POOL_SIZE, + size=size or 2, overwrite=True, recv=mitogen.core.Dispatcher._service_recv, ) From bdf6f1b9a9e55dd264245463ea0b06f2c4d7b498 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 19:46:52 +0100 Subject: [PATCH 370/383] issue #590: rework ParentEnumerationMethod to recursively handle bad modules In the worst case it will start with sys.path and resolve everything from scratch. --- mitogen/master.py | 133 +++++++++++++++++++++++++----------- tests/module_finder_test.py | 39 +++++++++++ tests/responder_test.py | 10 +-- 3 files changed, 140 insertions(+), 42 deletions(-) diff --git a/mitogen/master.py b/mitogen/master.py index 11ef2b00..f9ddf3dd 100644 --- a/mitogen/master.py +++ b/mitogen/master.py @@ -535,18 +535,20 @@ class SysModulesMethod(FinderMethod): Find `fullname` using its :data:`__file__` attribute. """ module = sys.modules.get(fullname) - LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module) - if getattr(module, '__name__', None) != fullname: - LOG.debug('sys.modules[%r].__name__ does not match %r, assuming ' - 'this is a hacky module alias and ignoring it', - fullname, fullname) - return - if not isinstance(module, types.ModuleType): LOG.debug('%r: sys.modules[%r] absent or not a regular module', self, fullname) return + LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module) + alleged_name = getattr(module, '__name__', None) + if alleged_name != fullname: + LOG.debug('sys.modules[%r].__name__ is incorrect, assuming ' + 'this is a hacky module alias and ignoring it. ' + 'Got %r, module object: %r', + fullname, alleged_name, module) + return + path = _py_filename(getattr(module, '__file__', '')) if not path: return @@ -573,43 +575,57 @@ class SysModulesMethod(FinderMethod): class ParentEnumerationMethod(FinderMethod): """ Attempt to fetch source code by examining the module's (hopefully less - insane) parent package. Required for older versions of - :mod:`ansible.compat.six`, :mod:`plumbum.colors`, and Ansible 2.8 - :mod:`ansible.module_utils.distro`. + insane) parent package, and if no insane parents exist, simply use + :mod:`sys.path` to search for it from scratch on the filesystem using the + normal Python lookup mechanism. + + This is required for older versions of :mod:`ansible.compat.six`, + :mod:`plumbum.colors`, Ansible 2.8 :mod:`ansible.module_utils.distro` and + its submodule :mod:`ansible.module_utils.distro._distro`. + + When some package dynamically replaces itself in :data:`sys.modules`, but + only conditionally according to some program logic, it is possible that + children may attempt to load modules and subpackages from it that can no + longer be resolved by examining a (corrupted) parent. For cases like :mod:`ansible.module_utils.distro`, this must handle cases where a package transmuted itself into a totally unrelated module during - import and vice versa. + import and vice versa, where :data:`sys.modules` is replaced with junk that + makes it impossible to discover the loaded module using the in-memory + module object or any parent package's :data:`__path__`, since they have all + been overwritten. Some men just want to watch the world burn. """ - def find(self, fullname): + def _find_sane_parent(self, fullname): """ - See implementation for a description of how this works. + Iteratively search :data:`sys.modules` for the least indirect parent of + `fullname` that is loaded and contains a :data:`__path__` attribute. + + :return: + `(parent_name, path, modpath)` tuple, where: + + * `modname`: canonical name of the found package, or the empty + string if none is found. + * `search_path`: :data:`__path__` attribute of the least + indirect parent found, or :data:`None` if no indirect parent + was found. + * `modpath`: list of module name components leading from `path` + to the target module. """ - if fullname not in sys.modules: - # Don't attempt this unless a module really exists in sys.modules, - # else we could return junk. - return - - pkgname, _, modname = str_rpartition(to_text(fullname), u'.') - pkg = sys.modules.get(pkgname) - if pkg is None or not hasattr(pkg, '__file__'): - LOG.debug('%r: %r is not a package or lacks __file__ attribute', - self, pkgname) - return - - pkg_path = [os.path.dirname(pkg.__file__)] - try: - fp, path, (suffix, _, kind) = imp.find_module(modname, pkg_path) - except ImportError: - e = sys.exc_info()[1] - LOG.debug('%r: imp.find_module(%r, %r) -> %s', - self, modname, [pkg_path], e) - return None - - if kind == imp.PKG_DIRECTORY: - return self._found_package(fullname, path) - else: - return self._found_module(fullname, path, fp) + path = None + modpath = [] + while True: + pkgname, _, modname = str_rpartition(to_text(fullname), u'.') + modpath.insert(0, modname) + if not pkgname: + return [], None, modpath + + pkg = sys.modules.get(pkgname) + path = getattr(pkg, '__path__', None) + if pkg and path: + return pkgname.split('.'), path, modpath + + LOG.debug('%r: %r lacks __path__ attribute', self, pkgname) + fullname = pkgname def _found_package(self, fullname, path): path = os.path.join(path, '__init__.py') @@ -638,6 +654,47 @@ class ParentEnumerationMethod(FinderMethod): source = source.encode('utf-8') return path, source, is_pkg + def _find_one_component(self, modname, search_path): + try: + #fp, path, (suffix, _, kind) = imp.find_module(modname, search_path) + return imp.find_module(modname, search_path) + except ImportError: + e = sys.exc_info()[1] + LOG.debug('%r: imp.find_module(%r, %r) -> %s', + self, modname, [search_path], e) + return None + + def find(self, fullname): + """ + See implementation for a description of how this works. + """ + #if fullname not in sys.modules: + # Don't attempt this unless a module really exists in sys.modules, + # else we could return junk. + #return + + fullname = to_text(fullname) + modname, search_path, modpath = self._find_sane_parent(fullname) + while True: + tup = self._find_one_component(modpath.pop(0), search_path) + if tup is None: + return None + + fp, path, (suffix, _, kind) = tup + if modpath: + # Still more components to descent. Result must be a package + if fp: + fp.close() + if kind != imp.PKG_DIRECTORY: + LOG.debug('%r: %r appears to be child of non-package %r', + self, fullname, path) + return None + search_path = [path] + elif kind == imp.PKG_DIRECTORY: + return self._found_package(fullname, path) + else: + return self._found_module(fullname, path, fp) + class ModuleFinder(object): """ diff --git a/tests/module_finder_test.py b/tests/module_finder_test.py index e61c768f..fc3a17de 100644 --- a/tests/module_finder_test.py +++ b/tests/module_finder_test.py @@ -180,6 +180,7 @@ class GetModuleViaParentEnumerationTest(testlib.TestCase): 'pkg_like_ansible.module_utils.distro._distro' ) + # ensure we can resolve the subpackage. path, src, is_pkg = self.call('pkg_like_ansible.module_utils.distro') modpath = os.path.join(MODS_DIR, 'pkg_like_ansible/module_utils/distro/__init__.py') @@ -187,6 +188,44 @@ class GetModuleViaParentEnumerationTest(testlib.TestCase): self.assertEquals(src, open(modpath, 'rb').read()) self.assertEquals(is_pkg, True) + # ensure we can resolve a child of the subpackage. + path, src, is_pkg = self.call( + 'pkg_like_ansible.module_utils.distro._distro' + ) + modpath = os.path.join(MODS_DIR, + 'pkg_like_ansible/module_utils/distro/_distro.py') + self.assertEquals(path, modpath) + self.assertEquals(src, open(modpath, 'rb').read()) + self.assertEquals(is_pkg, False) + + def test_ansible_module_utils_system_distro_succeeds(self): + # #590: a package that turns itself into a module. + # #590: a package that turns itself into a module. + import pkg_like_ansible.module_utils.sys_distro as d + self.assertEquals(d.I_AM, "the system module that replaced the subpackage") + self.assertEquals( + sys.modules['pkg_like_ansible.module_utils.sys_distro'].__name__, + 'system_distro' + ) + + # ensure we can resolve the subpackage. + path, src, is_pkg = self.call('pkg_like_ansible.module_utils.sys_distro') + modpath = os.path.join(MODS_DIR, + 'pkg_like_ansible/module_utils/sys_distro/__init__.py') + self.assertEquals(path, modpath) + self.assertEquals(src, open(modpath, 'rb').read()) + self.assertEquals(is_pkg, True) + + # ensure we can resolve a child of the subpackage. + path, src, is_pkg = self.call( + 'pkg_like_ansible.module_utils.sys_distro._distro' + ) + modpath = os.path.join(MODS_DIR, + 'pkg_like_ansible/module_utils/sys_distro/_distro.py') + self.assertEquals(path, modpath) + self.assertEquals(src, open(modpath, 'rb').read()) + self.assertEquals(is_pkg, False) + class ResolveRelPathTest(testlib.TestCase): klass = mitogen.master.ModuleFinder diff --git a/tests/responder_test.py b/tests/responder_test.py index 285acd6f..60817747 100644 --- a/tests/responder_test.py +++ b/tests/responder_test.py @@ -158,14 +158,16 @@ class BrokenModulesTest(testlib.TestCase): self.assertEquals(1, len(router._async_route.mock_calls)) self.assertEquals(1, responder.get_module_count) - self.assertEquals(0, responder.good_load_module_count) - self.assertEquals(0, responder.good_load_module_size) - self.assertEquals(1, responder.bad_load_module_count) + self.assertEquals(1, responder.good_load_module_count) + self.assertEquals(7642, responder.good_load_module_size) + self.assertEquals(0, responder.bad_load_module_count) call = router._async_route.mock_calls[0] msg, = call[1] self.assertEquals(mitogen.core.LOAD_MODULE, msg.handle) - self.assertIsInstance(msg.unpickle(), tuple) + + tup = msg.unpickle() + self.assertIsInstance(tup, tuple) class ForwardTest(testlib.RouterMixin, testlib.TestCase): From c81f366fc613c5f99f1c2bec088906393bc7f218 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 22:02:32 +0100 Subject: [PATCH 371/383] issue #590: whoops, import missing test modules --- .../pkg_like_ansible/module_utils/sys_distro/__init__.py | 5 +++++ .../pkg_like_ansible/module_utils/sys_distro/_distro.py | 1 + tests/data/importer/system_distro.py | 2 ++ tests/responder_test.py | 1 - 4 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 tests/data/importer/pkg_like_ansible/module_utils/sys_distro/__init__.py create mode 100644 tests/data/importer/pkg_like_ansible/module_utils/sys_distro/_distro.py create mode 100644 tests/data/importer/system_distro.py diff --git a/tests/data/importer/pkg_like_ansible/module_utils/sys_distro/__init__.py b/tests/data/importer/pkg_like_ansible/module_utils/sys_distro/__init__.py new file mode 100644 index 00000000..f757e54c --- /dev/null +++ b/tests/data/importer/pkg_like_ansible/module_utils/sys_distro/__init__.py @@ -0,0 +1,5 @@ +# #590: a subpackage that turns itself into a module from elsewhere on sys.path. +I_AM = "the subpackage that was replaced with a system module" +import sys +import system_distro +sys.modules[__name__] = system_distro diff --git a/tests/data/importer/pkg_like_ansible/module_utils/sys_distro/_distro.py b/tests/data/importer/pkg_like_ansible/module_utils/sys_distro/_distro.py new file mode 100644 index 00000000..16c32b2a --- /dev/null +++ b/tests/data/importer/pkg_like_ansible/module_utils/sys_distro/_distro.py @@ -0,0 +1 @@ +I_AM = "the module inside the replaced subpackage" diff --git a/tests/data/importer/system_distro.py b/tests/data/importer/system_distro.py new file mode 100644 index 00000000..78fb1601 --- /dev/null +++ b/tests/data/importer/system_distro.py @@ -0,0 +1,2 @@ +# #590: a system module that replaces some subpackage +I_AM = "the system module that replaced the subpackage" diff --git a/tests/responder_test.py b/tests/responder_test.py index 60817747..2653589c 100644 --- a/tests/responder_test.py +++ b/tests/responder_test.py @@ -159,7 +159,6 @@ class BrokenModulesTest(testlib.TestCase): self.assertEquals(1, responder.get_module_count) self.assertEquals(1, responder.good_load_module_count) - self.assertEquals(7642, responder.good_load_module_size) self.assertEquals(0, responder.bad_load_module_count) call = router._async_route.mock_calls[0] From e86f371b93442d659e01a9bbb1dccddffad64d99 Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 18 Aug 2019 00:59:34 +0100 Subject: [PATCH 372/383] issue #621: send ADD_ROUTE earlier and add test for early logging. Logs were broken because ADD_ROUTE was being sent *after* messages started flowing from the new child. That's an explicit non-goal of the design, so fix it. --- mitogen/parent.py | 2 +- tests/log_handler_test.py | 90 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 tests/log_handler_test.py diff --git a/mitogen/parent.py b/mitogen/parent.py index 9f93571b..6e99bb66 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -1546,6 +1546,7 @@ class Connection(object): remote_id=self.context.context_id, ) ) + self._router.route_monitor.notice_stream(self.stdio_stream) self.latch.put() def _fail_connection(self, exc): @@ -2423,7 +2424,6 @@ class Router(mitogen.core.Router): except mitogen.core.TimeoutError: raise mitogen.core.StreamError(self.connection_timeout_msg) - self.route_monitor.notice_stream(conn.stdio_stream) return context def connect(self, method_name, name=None, **kwargs): diff --git a/tests/log_handler_test.py b/tests/log_handler_test.py new file mode 100644 index 00000000..c5d257a9 --- /dev/null +++ b/tests/log_handler_test.py @@ -0,0 +1,90 @@ + +import logging +import mock + +import unittest2 +import testlib +import mitogen.core +import mitogen.master +import mitogen.parent +import mitogen.utils +from mitogen.core import b + + +def ping(): + pass + + +class BufferingTest(testlib.TestCase): + klass = mitogen.core.LogHandler + + def record(self): + return logging.LogRecord( + name='name', + level=99, + pathname='pathname', + lineno=123, + msg='msg', + args=(), + exc_info=None, + ) + + def build(self): + context = mock.Mock() + return context, self.klass(context) + + def test_initially_buffered(self): + context, handler = self.build() + rec = self.record() + handler.emit(rec) + self.assertEquals(0, context.send.call_count) + self.assertEquals(1, len(handler._buffer)) + + def test_uncork(self): + context, handler = self.build() + rec = self.record() + handler.emit(rec) + handler.uncork() + + self.assertEquals(1, context.send.call_count) + self.assertEquals(None, handler._buffer) + + _, args, _ = context.send.mock_calls[0] + msg, = args + + self.assertEquals(mitogen.core.FORWARD_LOG, msg.handle) + self.assertEquals(b('name\x0099\x00msg'), msg.data) + + +class StartupTest(testlib.RouterMixin, testlib.TestCase): + def test_earliest_messages_logged(self): + log = testlib.LogCapturer() + log.start() + + c1 = self.router.local() + c1.shutdown(wait=True) + + logs = log.stop() + self.assertTrue('Python version is' in logs) + self.assertTrue('Parent is context 0 (master)' in logs) + + def test_earliest_messages_logged_via(self): + c1 = self.router.local(name='c1') + # ensure any c1-related msgs are processed before beginning capture. + c1.call(ping) + + log = testlib.LogCapturer() + log.start() + + c2 = self.router.local(via=c1, name='c2', debug=True) + c2.shutdown(wait=True) + + logs = log.stop() + self.assertTrue('Python version is' in logs) + + expect = 'Parent is context %s (%s)' % (c1.context_id, 'parent') + self.assertTrue(expect in logs) + + +if __name__ == '__main__': + unittest2.main() From a56930e913958ea4e461193f4b8db30809db1fcd Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 22:15:23 +0100 Subject: [PATCH 373/383] issue #590: update Changelog. --- docs/changelog.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/changelog.rst b/docs/changelog.rst index 51cdd2df..fdc16e2f 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -264,6 +264,7 @@ bug reports, testing, features and fixes in this release contributed by `Florent Dutheil `_, `James Hogarth `_, `Jordan Webb `_, +`Julian Andres Klode `_, `Marc Hartmayer `_, `Nigel Metheringham `_, `Orion Poplawski `_, @@ -271,6 +272,7 @@ bug reports, testing, features and fixes in this release contributed by `Stefane Fermigier `_, `Szabó Dániel Ernő `_, `Ulrich Schreiner `_, +`Vincent S. Cojot `_, `yen `_, `Yuki Nishida `_, `@alexhexabeam `_, From 80bc5c7d8bcf7ebeb394f5204e2401e8bb07ac9d Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 23:10:08 +0100 Subject: [PATCH 374/383] docs: changelog fixes/tweaks --- docs/changelog.rst | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index fdc16e2f..1b47e6ca 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -92,10 +92,9 @@ Mitogen for Ansible ``mitogen_task_isolation: fork`` modules, and those present on an internal blacklist of misbehaving modules. - This deadlock is relatively easy hit, has been present since 0.2.0, and is - likely to have impacted many users. For new connections it could manifest as - a *Connection timed out* error, for forked tasks it could manifest as a - timeout or an apparent hang. + This deadlock is relatively easy hit, has been present since 0.2.0, and + likely impacted many users. For new connections it manifested as a timeout, + for forked tasks it could manifest as a timeout or an apparent hang. * :gh:issue:`549`: the open file descriptor limit for the Ansible process is increased to the available hard limit. It is common for distributions to ship @@ -105,7 +104,7 @@ Mitogen for Ansible * :gh:issue:`558`, :gh:issue:`582`: on Ansible 2.3 a remote directory was unconditionally deleted after the first module belonging to an action plug-in - had executed, causing the :ans:mod:`unarchive` module to fail. + had executed, causing the :ans:mod:`unarchive` to fail. * :gh:issue:`578`: the extension could crash while rendering an error message, due to an incorrect format string. @@ -131,7 +130,7 @@ Mitogen for Ansible encoding. * :gh:issue:`602`: connection configuration is more accurately inferred for - `meta: reset_connection`, the `synchronize` module, and for any action + `meta: reset_connection`, the :ans:mod:`synchronize`, and for any action plug-ins that establish additional connections. * :gh:issue:`598`, :gh:issue:`605`: fix a deadlock managing a shared counter @@ -264,7 +263,7 @@ bug reports, testing, features and fixes in this release contributed by `Florent Dutheil `_, `James Hogarth `_, `Jordan Webb `_, -`Julian Andres Klode `_, +`Julian Andres Klode `_, `Marc Hartmayer `_, `Nigel Metheringham `_, `Orion Poplawski `_, From 9b6792829d948ac240f2d24c403fb575cc9c699b Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sat, 17 Aug 2019 23:21:21 +0100 Subject: [PATCH 375/383] docs: update supported Ansible version, mention unsupported features --- docs/ansible_detailed.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index 65f68efb..22d7223f 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -140,7 +140,7 @@ Testimonials Noteworthy Differences ---------------------- -* Ansible 2.3-2.7 are supported along with Python 2.6, 2.7, 3.6 and 3.7. Verify +* Ansible 2.3-2.8 are supported along with Python 2.6, 2.7, 3.6 and 3.7. Verify your installation is running one of these versions by checking ``ansible --version`` output. @@ -164,6 +164,12 @@ Noteworthy Differences - initech_app - y2k_fix +* Ansible 2.8 `interpreter discovery + `_ + and `become plugins + `_ are not yet + supported. + * The ``doas``, ``su`` and ``sudo`` become methods are available. File bugs to register interest in more. From 8844d5f5cca9a10c9de025607455ba4cffe8147a Mon Sep 17 00:00:00 2001 From: David Wilson Date: Sun, 18 Aug 2019 01:11:38 +0100 Subject: [PATCH 376/383] docs: versionless --- docs/conf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/conf.py b/docs/conf.py index 86332cd2..dc5c6c93 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,6 +10,10 @@ author = u'Network Genomics' copyright = u'2019, Network Genomics' exclude_patterns = ['_build', '.venv'] extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.programoutput', 'domainrefs'] + +# get rid of version from <title>, it messes with piwik +html_title = 'Mitogen Documentation' + html_show_copyright = False html_show_sourcelink = False html_show_sphinx = False From 52c070efc03fe0e5c16681cbaed5b0fd01460fb6 Mon Sep 17 00:00:00 2001 From: David Wilson <dw@botanicus.net> Date: Sun, 18 Aug 2019 01:06:41 +0100 Subject: [PATCH 377/383] docs: reorder chapters --- docs/toc.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/toc.rst b/docs/toc.rst index 2bbd0f9a..e43326f1 100644 --- a/docs/toc.rst +++ b/docs/toc.rst @@ -7,11 +7,11 @@ Table Of Contents index Mitogen for Ansible <ansible_detailed> - contributors changelog + contributors howitworks - getting_started api + getting_started examples internals From efe91092dc19dc8a2dca182948814bd25cd36747 Mon Sep 17 00:00:00 2001 From: David Wilson <dw@botanicus.net> Date: Sun, 18 Aug 2019 02:00:21 +0100 Subject: [PATCH 378/383] docs: more changelog tweaks --- docs/changelog.rst | 61 +++++++++++++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 1b47e6ca..ef8cb07d 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -25,8 +25,7 @@ Enhancements ~~~~~~~~~~~~ * :gh:issue:`556`, - :gh:issue:`587`: Ansible 2.8 is partially - supported. `Become plugins + :gh:issue:`587`: Ansible 2.8 is supported. `Become plugins <https://docs.ansible.com/ansible/latest/plugins/become.html>`_ and `interpreter discovery <https://docs.ansible.com/ansible/latest/reference_appendices/interpreter_discovery.html>`_ @@ -80,11 +79,12 @@ Mitogen for Ansible * :gh:issue:`410`: Uses of :linux:man7:`unix` sockets are replaced with traditional :linux:man7:`pipe` pairs when SELinux is detected, to work around - a broken heuristic in popular SELinux policies that prevents inheriting + a broken heuristic in common SELinux policies that prevents inheriting :linux:man7:`unix` sockets across privilege domains. * `#467 <httpe://github.com/dw/mitogen/issues/467>`_: an incompatibility - running Mitogen under Molecule was resolved. + running Mitogen under `Molecule + <https://molecule.readthedocs.io/en/stable/>`_ was resolved. * :gh:issue:`547`, :gh:issue:`598`: fix a serious deadlock possible while initializing the service pool of any child, such as during @@ -96,11 +96,11 @@ Mitogen for Ansible likely impacted many users. For new connections it manifested as a timeout, for forked tasks it could manifest as a timeout or an apparent hang. -* :gh:issue:`549`: the open file descriptor limit for the Ansible process is - increased to the available hard limit. It is common for distributions to ship - with a much higher hard limit than their default soft limit, allowing *"too - many open files"* errors to be avoided more often in large runs without user - configuration. +* :gh:issue:`549`: the open file limit for the Ansible process is increased to + the available hard limit. It is common for distributions to ship with a + higher hard limit than the default soft limit, allowing *"too many open + files"* errors to be avoided more often in large runs without user + intervention. * :gh:issue:`558`, :gh:issue:`582`: on Ansible 2.3 a remote directory was unconditionally deleted after the first module belonging to an action plug-in @@ -110,9 +110,10 @@ Mitogen for Ansible due to an incorrect format string. * :gh:issue:`590`: the importer can handle modules that replace themselves in - :data:`sys.modules` during import. + :data:`sys.modules` with completely unrelated modules during import, as in + the case of Ansible 2.8 :mod:`ansible.module_utils.distro`. -* :gh:issue:`591`: the target's current working directory is restored to a +* :gh:issue:`591`: the target's working directory is restored to a known-existent directory between tasks to ensure :func:`os.getcwd` will not fail when called, in the same way that :class:`AnsibleModule` restores it during initialization. However this restore happens before the module ever @@ -130,16 +131,18 @@ Mitogen for Ansible encoding. * :gh:issue:`602`: connection configuration is more accurately inferred for - `meta: reset_connection`, the :ans:mod:`synchronize`, and for any action - plug-ins that establish additional connections. + :ans:mod:`meta: reset_connection <meta>` the :ans:mod:`synchronize`, and for + any action plug-ins that establish additional connections. * :gh:issue:`598`, :gh:issue:`605`: fix a deadlock managing a shared counter - used for load balancing. + used for load balancing, present since 0.2.4. -* :gh:issue:`615`: streaming file transfer is implemented for ``fetch`` and - other actions that transfer files from the target to the controller. - Previously the file was sent in one message, requiring it to fit in RAM and - be smaller than the internal message size limit. +* :gh:issue:`615`: streaming file transfer is implemented for the + :ans:mod:`fetch` and other actions that transfer files from the target to the + controller. Previously files delivered from target to controller were sent in + one message, requiring them to fit in RAM and be smaller than an internal + message size sanity check. Transfers from controller to target have been + streaming since 0.2.0. * :gh:commit:`7ae926b3`: the Ansible :ans:mod:`lineinfile` began leaking writable temporary file descriptors since Ansible 2.7.0. When @@ -157,6 +160,20 @@ Core Library human-readable output where possible. For example, *"Stream(ssh:123).connect()"* might be written *"connecting to ssh:123"*. +* In preparation for reducing default log output, many messages are delivered + to per-component loggers, including messages originating from children, + enabling :mod:`logging` aggregation to function as designed. An importer + message like:: + + 12:00:00 D mitogen.ctx.remotehost mitogen: loading module "foo" + + Might instead be logged to the ``mitogen.importer.[remotehost]`` logger:: + + 12:00:00 D mitogen.importer.[remotehost] loading module "foo" + + Allowing a filter or handler for ``mitogen.importer`` to select that logger + in every process. + * :func:`bytearray` was removed from the list of supported serialization types. It was never portable between Python versions, unused, and never made much sense to support. @@ -217,13 +234,13 @@ Core Library deliver a message for some reason other than the sender cannot or should not reach the recipient, and no reply-to address is present on the message, instead send a :ref:`dead message <IS_DEAD>` to the original recipient. This - ensures a descriptive messages is delivered to a thread sleeping on the reply + ensures a descriptive message is delivered to a thread sleeping on the reply to a function call, where the reply might be dropped due to exceeding the maximum configured message size. -* :gh:issue:`624`: the number of threads used for a child's auto-started thread - pool has been reduced from 16 to 2. This may drop to 1 in future, and become - configurable via a :class:`Router` option. +* :gh:issue:`624`: the number of threads used for a child's automatically + initialized service thread pool has been reduced from 16 to 2. This may drop + to 1 in future, and become configurable via a :class:`Router` option. * :gh:commit:`a5536c35`: avoid quadratic buffer management when logging lines received from a child's redirected From 8a11dea07584c07bc59ae9474869f85597192c63 Mon Sep 17 00:00:00 2001 From: David Wilson <dw@botanicus.net> Date: Sun, 18 Aug 2019 13:20:18 +0100 Subject: [PATCH 379/383] docs: changelog concision --- docs/changelog.rst | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index ef8cb07d..9d6dee9d 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -47,13 +47,12 @@ Enhancements is available to manipulate `Buildah <https://buildah.io/>`_ containers, and is exposed to Ansible as the :ans:conn:`buildah`. -* :gh:issue:`615`: the ``mitogen_fetch`` - action is included, and the standard Ansible :ans:mod:`fetch` is redirected - to it. This implements streaming file transfer in every case, including when - ``become`` is active, preventing excessive CPU usage and memory spikes, and - significantly improving throughput. A copy of 2 files of 512 MiB each drops - from 47 seconds to just under 7 seconds, with peak memory usage dropping from - 10.7 GiB to 64.8 MiB. +* :gh:issue:`615`: a modified Ansible :ans:mod:`fetch` is included that + implement streaming file transfer in every case, including when ``become`` is + active, preventing excessive CPU usage and memory spikes, and significantly + improving performance. A copy of 2 files of 512 MiB each drops from 47 + seconds to 7 seconds, with peak memory usage dropping from 10.7 GiB to 64.8 + MiB. * `Operon <https://networkgenomics.com/operon/>`_ no longer requires a custom installation, both Operon and Ansible are supported by a unified release. From 4bd0a234ce9d878a166ed32a6e6f0e19fbab1b4b Mon Sep 17 00:00:00 2001 From: David Wilson <dw@botanicus.net> Date: Sun, 18 Aug 2019 15:49:24 +0100 Subject: [PATCH 380/383] docs: lots more changelog concision --- docs/changelog.rst | 144 ++++++++++++++++++++------------------------- docs/conf.py | 4 +- 2 files changed, 67 insertions(+), 81 deletions(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 9d6dee9d..84e73427 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -31,43 +31,39 @@ Enhancements <https://docs.ansible.com/ansible/latest/reference_appendices/interpreter_discovery.html>`_ are not yet handled. -* :gh:issue:`419`, :gh:issue:`470`, file descriptor usage during large runs is - halved, as it is no longer necessary to manage read and write sides - distinctly in order to work around a design problem. +* :gh:issue:`419`, :gh:issue:`470`, file descriptor usage is approximately + halved, as it is no longer necessary to separately manage read and write + sides to work around a design problem. -* :gh:issue:`419`: almost all connection setup happens on one thread, reducing - contention and context switching early in a run. +* :gh:issue:`419`: setup for all connections happens almost entirely on one + thread, reducing contention and context switching early in a run. * :gh:issue:`419`: Connection setup is better pipelined, eliminating some network round-trips. Most infrastructure is in place to support future - removal of the final round-trips between a target fully booting and receiving + removal of the final round-trips between a target booting and receiving function calls. * :gh:pull:`595`: the :meth:`~mitogen.parent.Router.buildah` connection method is available to manipulate `Buildah <https://buildah.io/>`_ containers, and is exposed to Ansible as the :ans:conn:`buildah`. -* :gh:issue:`615`: a modified Ansible :ans:mod:`fetch` is included that - implement streaming file transfer in every case, including when ``become`` is - active, preventing excessive CPU usage and memory spikes, and significantly - improving performance. A copy of 2 files of 512 MiB each drops from 47 - seconds to 7 seconds, with peak memory usage dropping from 10.7 GiB to 64.8 - MiB. +* :gh:issue:`615`: a modified :ans:mod:`fetch` implements streaming transfer + even when ``become`` is active, avoiding excess CPU usage and memory spikes, + and improving performance. A copy of two 512 MiB files drops from 47 seconds + to 7 seconds, with peak memory usage dropping from 10.7 GiB to 64.8 MiB. * `Operon <https://networkgenomics.com/operon/>`_ no longer requires a custom - installation, both Operon and Ansible are supported by a unified release. + library installation, both Ansible and Operon are supported by a single + Mitogen release. -* The ``MITOGEN_CPU_COUNT`` environment variable shards the connection - multiplexer into per-CPU workers. This may improve throughput for runs - involving large file transfers, and is required for future in-process SSH - support. One multiplexer starts by default, to match existing behaviour. +* The ``MITOGEN_CPU_COUNT`` variable shards the connection multiplexer into + per-CPU workers. This may improve throughput for large runs involving file + transfer, and is required for future functionality. One multiplexer starts by + default, to match existing behaviour. -* :gh:commit:`d6faff06`, - :gh:commit:`807cbef9`, - :gh:commit:`e93762b3`, - :gh:commit:`50bfe4c7`: locking is - avoided on hot paths, and some locks are released earlier, before waking a - thread that must immediately take the same lock. +* :gh:commit:`d6faff06`, :gh:commit:`807cbef9`, :gh:commit:`e93762b3`, + :gh:commit:`50bfe4c7`: locking is avoided on hot paths, and some locks are + released before waking a thread that must immediately acquire the same lock. Mitogen for Ansible @@ -85,40 +81,34 @@ Mitogen for Ansible running Mitogen under `Molecule <https://molecule.readthedocs.io/en/stable/>`_ was resolved. -* :gh:issue:`547`, :gh:issue:`598`: fix a serious deadlock - possible while initializing the service pool of any child, such as during - connection, ``async`` tasks, tasks using custom :mod:`module_utils`, - ``mitogen_task_isolation: fork`` modules, and those present on an internal - blacklist of misbehaving modules. +* :gh:issue:`547`, :gh:issue:`598`: fix a deadlock during initialization of + connections, ``async`` tasks, tasks using custom :mod:`module_utils`, + ``mitogen_task_isolation: fork`` modules, and modules present on an internal + blacklist. This would manifest as a timeout or hang, was easily hit, had been + present since 0.2.0, and likely impacted many users. - This deadlock is relatively easy hit, has been present since 0.2.0, and - likely impacted many users. For new connections it manifested as a timeout, - for forked tasks it could manifest as a timeout or an apparent hang. +* :gh:issue:`549`: the open file limit is increased to the permitted hard + limit. It is common for distributions to ship with a higher hard limit than + the default soft limit, allowing *"too many open files"* errors to be avoided + more often in large runs without user intervention. -* :gh:issue:`549`: the open file limit for the Ansible process is increased to - the available hard limit. It is common for distributions to ship with a - higher hard limit than the default soft limit, allowing *"too many open - files"* errors to be avoided more often in large runs without user - intervention. - -* :gh:issue:`558`, :gh:issue:`582`: on Ansible 2.3 a remote directory was +* :gh:issue:`558`, :gh:issue:`582`: on Ansible 2.3 a directory was unconditionally deleted after the first module belonging to an action plug-in had executed, causing the :ans:mod:`unarchive` to fail. -* :gh:issue:`578`: the extension could crash while rendering an error message, - due to an incorrect format string. +* :gh:issue:`578`: the extension could crash while rendering an error due to an + incorrect format string. * :gh:issue:`590`: the importer can handle modules that replace themselves in :data:`sys.modules` with completely unrelated modules during import, as in the case of Ansible 2.8 :mod:`ansible.module_utils.distro`. -* :gh:issue:`591`: the target's working directory is restored to a - known-existent directory between tasks to ensure :func:`os.getcwd` will not - fail when called, in the same way that :class:`AnsibleModule` restores it - during initialization. However this restore happens before the module ever - executes, ensuring any code that calls :func:`os.getcwd` prior to +* :gh:issue:`591`: the working directory is reset between tasks to ensure + :func:`os.getcwd` cannot fail, in the same way :class:`AnsibleModule` + resets it during initialization. However this restore happens before the + module executes, ensuring code that calls :func:`os.getcwd` prior to :class:`AnsibleModule` initialization, such as the Ansible 2.7 - :ans:mod:`pip`, cannot fail due to the behavior of a prior task. + :ans:mod:`pip`, cannot fail due to the actions of a prior task. * :gh:issue:`593`: the SSH connection method exposes ``mitogen_ssh_keepalive_interval`` and ``mitogen_ssh_keepalive_count`` @@ -136,28 +126,26 @@ Mitogen for Ansible * :gh:issue:`598`, :gh:issue:`605`: fix a deadlock managing a shared counter used for load balancing, present since 0.2.4. -* :gh:issue:`615`: streaming file transfer is implemented for the - :ans:mod:`fetch` and other actions that transfer files from the target to the - controller. Previously files delivered from target to controller were sent in - one message, requiring them to fit in RAM and be smaller than an internal - message size sanity check. Transfers from controller to target have been - streaming since 0.2.0. +* :gh:issue:`615`: streaming is implemented for the :ans:mod:`fetch` and other + actions that transfer files from targets to the controller. Previously files + delivered were sent in one message, requiring them to fit in RAM and be + smaller than an internal message size sanity check. Transfers from controller + to targets have been streaming since 0.2.0. -* :gh:commit:`7ae926b3`: the Ansible :ans:mod:`lineinfile` began leaking - writable temporary file descriptors since Ansible 2.7.0. When - :ans:mod:`~lineinfile` was used to create or modify a script, and that script - was later executed, the execution could fail with "*text file busy*" due to - the leaked descriptor. Temporary descriptors are now tracked and cleaned up - on exit for all modules. +* :gh:commit:`7ae926b3`: the :ans:mod:`lineinfile` leaks writable temporary + file descriptors since Ansible 2.7.0. When :ans:mod:`~lineinfile` created or + modified a script, and that script was later executed, the execution could + fail with "*text file busy*". Temporary descriptors are now tracked and + cleaned up on exit for all modules. Core Library ~~~~~~~~~~~~ -* Log readability is improving, and many :func:`repr` strings are more - descriptive. The old pseudo-function-call format is slowly migrating to - human-readable output where possible. For example, - *"Stream(ssh:123).connect()"* might be written *"connecting to ssh:123"*. +* Log readability is improving and many :func:`repr` strings are more + descriptive. The old pseudo-function-call format is migrating to + readable output where possible. For example, *"Stream(ssh:123).connect()"* + might be written *"connecting to ssh:123"*. * In preparation for reducing default log output, many messages are delivered to per-component loggers, including messages originating from children, @@ -171,7 +159,8 @@ Core Library 12:00:00 D mitogen.importer.[remotehost] loading module "foo" Allowing a filter or handler for ``mitogen.importer`` to select that logger - in every process. + in every process. This introduces a small risk of leaking memory in + long-lived programs, as logger objects are internally persistent. * :func:`bytearray` was removed from the list of supported serialization types. It was never portable between Python versions, unused, and never made much @@ -183,27 +172,24 @@ Core Library asynchronous context. * :gh:issue:`419`: the internal - :class:`~mitogen.core.Stream` has been refactored into 7 new classes, + :class:`~mitogen.core.Stream` has been refactored into many new classes, modularizing protocol behaviour, output buffering, line-oriented input parsing, option handling and connection management. Connection setup is - internally asynchronous, laying almost all the groundwork needed for fully - asynchronous connect, proxied Ansible become plug-ins, and integrating - `libssh <https://www.libssh.org/>`_. + internally asynchronous, laying most groundwork for fully asynchronous + connect, proxied Ansible become plug-ins, and in-process SSH. * :gh:issue:`169`, :gh:issue:`419`: zombie subprocess reaping - has vastly improved, by using timers to efficiently poll for a slow child to - finish exiting, and delaying broker shutdown while any subprocess remains. - Polling avoids relying on process-global configuration such as a `SIGCHLD` - handler, or :func:`signal.set_wakeup_fd` available in modern Python. - -* :gh:issue:`256`, - :gh:issue:`419`: most :func:`os.dup` use - was eliminated, along with almost all manual file descriptor management. - Descriptors are trapped in :func:`os.fdopen` objects at creation, ensuring a - leaked object will close itself, and ensuring every descriptor is fused to a - `closed` flag, preventing historical bugs where a double close could destroy - descriptors belonging to unrelated streams. + has vastly improved, by using timers to efficiently poll for a child to exit, + and delaying shutdown while any subprocess remains. Polling avoids + process-global configuration such as a `SIGCHLD` handler, or + :func:`signal.set_wakeup_fd` available in modern Python. + +* :gh:issue:`256`, :gh:issue:`419`: most :func:`os.dup` use was eliminated, + along with most manual file descriptor management. Descriptors are trapped in + :func:`os.fdopen` objects at creation, ensuring a leaked object will close + itself, and ensuring every descriptor is fused to a `closed` flag, preventing + historical bugs where a double close could destroy unrelated descriptors. * :gh:issue:`533`: routing accounts for a race between a parent (or cousin) sending a message to a child via an diff --git a/docs/conf.py b/docs/conf.py index dc5c6c93..1a6a117b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -55,11 +55,11 @@ domainrefs = { 'url': 'https://github.com/dw/mitogen/pull/%s', }, 'ans:mod': { - 'text': '%s Module', + 'text': '%s module', 'url': 'https://docs.ansible.com/ansible/latest/modules/%s_module.html', }, 'ans:conn': { - 'text': '%s Connection Plug-in', + 'text': '%s connection plug-in', 'url': 'https://docs.ansible.com/ansible/latest/plugins/connection/%s.html', }, 'freebsd:man2': { From b21be478e40e16e17d64a61c524d7c5458c77e8d Mon Sep 17 00:00:00 2001 From: David Wilson <dw@botanicus.net> Date: Sun, 18 Aug 2019 16:03:30 +0100 Subject: [PATCH 381/383] issue #627: add test and tweak Reaper behaviour. --- mitogen/parent.py | 4 ++-- tests/reaper_test.py | 54 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 tests/reaper_test.py diff --git a/mitogen/parent.py b/mitogen/parent.py index 6e99bb66..1c3e1874 100644 --- a/mitogen/parent.py +++ b/mitogen/parent.py @@ -2607,9 +2607,9 @@ class Reaper(object): if not self.kill: pass - elif self._tries == 1: + elif self._tries == 2: self._signal_child(signal.SIGTERM) - elif self._tries == 5: # roughly 4 seconds + elif self._tries == 6: # roughly 4 seconds self._signal_child(signal.SIGKILL) diff --git a/tests/reaper_test.py b/tests/reaper_test.py new file mode 100644 index 00000000..e78fdbf2 --- /dev/null +++ b/tests/reaper_test.py @@ -0,0 +1,54 @@ + +import signal +import unittest2 +import testlib +import mock + +import mitogen.parent + + +class ReaperTest(testlib.TestCase): + @mock.patch('os.kill') + def test_calc_delay(self, kill): + broker = mock.Mock() + proc = mock.Mock() + proc.poll.return_value = None + reaper = mitogen.parent.Reaper(broker, proc, True, True) + self.assertEquals(50, int(1000 * reaper._calc_delay(0))) + self.assertEquals(86, int(1000 * reaper._calc_delay(1))) + self.assertEquals(147, int(1000 * reaper._calc_delay(2))) + self.assertEquals(254, int(1000 * reaper._calc_delay(3))) + self.assertEquals(437, int(1000 * reaper._calc_delay(4))) + self.assertEquals(752, int(1000 * reaper._calc_delay(5))) + self.assertEquals(1294, int(1000 * reaper._calc_delay(6))) + + @mock.patch('os.kill') + def test_reap_calls(self, kill): + broker = mock.Mock() + proc = mock.Mock() + proc.poll.return_value = None + + reaper = mitogen.parent.Reaper(broker, proc, True, True) + + reaper.reap() + self.assertEquals(0, kill.call_count) + + reaper.reap() + self.assertEquals(1, kill.call_count) + + reaper.reap() + reaper.reap() + reaper.reap() + self.assertEquals(1, kill.call_count) + + reaper.reap() + self.assertEquals(2, kill.call_count) + + self.assertEquals(kill.mock_calls, [ + mock.call(proc.pid, signal.SIGTERM), + mock.call(proc.pid, signal.SIGKILL), + ]) + + +if __name__ == '__main__': + unittest2.main() From 7a391022ef300fb27c42c14d08e7e9d9885acebd Mon Sep 17 00:00:00 2001 From: David Wilson <dw@botanicus.net> Date: Sun, 18 Aug 2019 16:23:03 +0100 Subject: [PATCH 382/383] docs: update Changelog for 0.2.8. --- docs/changelog.rst | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/changelog.rst b/docs/changelog.rst index 84e73427..fe15ca27 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -15,12 +15,22 @@ Release Notes </style> -v0.2.8 (unreleased) +v0.2.9 (unreleased) ------------------- To avail of fixes in an unreleased version, please download a ZIP file `directly from GitHub <https://github.com/dw/mitogen/>`_. +*(no changes)* + + +v0.2.8 (2019-08-18) +------------------- + +This release includes Ansible 2.8 and SELinux support, fixes for two deadlocks, +and major internal design overhauls in preparation for future functionality. + + Enhancements ~~~~~~~~~~~~ From c82112a34b057391c7d111c5325c992ac540a111 Mon Sep 17 00:00:00 2001 From: David Wilson <dw@botanicus.net> Date: Sun, 18 Aug 2019 16:23:42 +0100 Subject: [PATCH 383/383] Bump version for release. --- mitogen/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mitogen/__init__.py b/mitogen/__init__.py index 5e2e29b6..47e570ab 100644 --- a/mitogen/__init__.py +++ b/mitogen/__init__.py @@ -35,7 +35,7 @@ be expected. On the slave, it is built dynamically during startup. #: Library version as a tuple. -__version__ = (0, 2, 7) +__version__ = (0, 2, 8) #: This is :data:`False` in slave contexts. Previously it was used to prevent