ansible-test - Replace pytest-forked (#80525)

- Unit tests now report warnings generated during test runs.
- Python 3.12 warnings about `os.fork` usage with threads (due to `pytest-xdist`) are suppressed.
- Added integration tests to verify forked test behavior.
pull/80528/head
Matt Clay 2 years ago committed by GitHub
parent 6aac0e2460
commit 676b731e6f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,5 @@
minor_changes:
- ansible-test - Replace the ``pytest-forked`` pytest plugin with a custom plugin.
bugfixes:
- ansible-test - Unit tests now report warnings generated during test runs.
Previously only warnings generated during test collection were reported.

@ -8,7 +8,7 @@ options=$("${TEST_DIR}"/../ansible-test/venv-pythons.py --only-versions)
IFS=', ' read -r -a pythons <<< "${options}"
for python in "${pythons[@]}"; do
if ansible-test units --color --truncate 0 --python "${python}" --requirements "${@}" 2>&1 | tee pytest.log; then
if ansible-test units --truncate 0 --python "${python}" --requirements "${@}" 2>&1 | tee pytest.log; then
echo "Test did not fail as expected."
exit 1
fi

@ -0,0 +1,5 @@
shippable/posix/group3 # runs in the distro test containers
shippable/generic/group1 # runs in the default test container
context/controller
needs/target/collection
needs/target/ansible-test

@ -0,0 +1,43 @@
"""Unit tests to verify the functionality of the ansible-forked pytest plugin."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import pytest
import signal
import sys
import warnings
warnings.warn("This verifies that warnings generated during test collection are reported.")
@pytest.mark.xfail
def test_kill_xfail():
os.kill(os.getpid(), signal.SIGKILL) # causes pytest to report stdout and stderr
def test_kill():
os.kill(os.getpid(), signal.SIGKILL) # causes pytest to report stdout and stderr
@pytest.mark.xfail
def test_exception_xfail():
sys.stdout.write("This stdout message should be hidden due to xfail.")
sys.stderr.write("This stderr message should be hidden due to xfail.")
raise Exception("This error is expected, but should be hidden due to xfail.")
def test_exception():
sys.stdout.write("This stdout message should be reported since we're throwing an exception.")
sys.stderr.write("This stderr message should be reported since we're throwing an exception.")
raise Exception("This error is expected and should be visible.")
def test_warning():
warnings.warn("This verifies that warnings generated at test time are reported.")
def test_passed():
pass

@ -0,0 +1,45 @@
#!/usr/bin/env bash
source ../collection/setup.sh
set -x
options=$("${TEST_DIR}"/../ansible-test/venv-pythons.py --only-versions)
IFS=', ' read -r -a pythons <<< "${options}"
for python in "${pythons[@]}"; do
echo "*** Checking Python ${python} ***"
if ansible-test units --truncate 0 --target-python "venv/${python}" "${@}" > output.log 2>&1 ; then
cat output.log
echo "Unit tests on Python ${python} did not fail as expected. See output above."
exit 1
fi
cat output.log
echo "Unit tests on Python ${python} failed as expected. See output above. Checking for expected output ..."
# Verify that the appropriate tests pased, failed or xfailed.
grep 'PASSED tests/unit/plugins/modules/test_ansible_forked.py::test_passed' output.log
grep 'PASSED tests/unit/plugins/modules/test_ansible_forked.py::test_warning' output.log
grep 'XFAIL tests/unit/plugins/modules/test_ansible_forked.py::test_kill_xfail' output.log
grep 'FAILED tests/unit/plugins/modules/test_ansible_forked.py::test_kill' output.log
grep 'FAILED tests/unit/plugins/modules/test_ansible_forked.py::test_exception' output.log
grep 'XFAIL tests/unit/plugins/modules/test_ansible_forked.py::test_exception_xfail' output.log
# Verify that warnings are properly surfaced.
grep 'UserWarning: This verifies that warnings generated at test time are reported.' output.log
grep 'UserWarning: This verifies that warnings generated during test collection are reported.' output.log
# Verify there are no unexpected warnings.
grep 'Warning' output.log | grep -v 'UserWarning: This verifies that warnings generated ' && exit 1
# Verify that details from failed tests are properly surfaced.
grep "^Test CRASHED with exit code -9.$" output.log
grep "^This stdout message should be reported since we're throwing an exception.$" output.log
grep "^This stderr message should be reported since we're throwing an exception.$" output.log
grep '^> *raise Exception("This error is expected and should be visible.")$' output.log
grep "^E *Exception: This error is expected and should be visible.$" output.log
echo "*** Done Checking Python ${python} ***"
done

@ -2,5 +2,4 @@ mock
pytest
pytest-mock
pytest-xdist
pytest-forked
pyyaml # required by the collection loader (only needed for collections)

@ -253,7 +253,6 @@ def command_units(args: UnitsConfig) -> None:
cmd = [
'pytest',
'--forked',
'-r', 'a',
'-n', str(args.num_workers) if args.num_workers else 'auto',
'--color', 'yes' if args.color else 'no',
@ -275,6 +274,8 @@ def command_units(args: UnitsConfig) -> None:
if data_context().content.collection:
plugins.append('ansible_pytest_collections')
plugins.append('ansible_forked')
if plugins:
env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'pytest/plugins')
env['PYTEST_PLUGINS'] = ','.join(plugins)

@ -57,3 +57,5 @@ preferred-modules =
# Listing them here makes it possible to enable the import-error check.
ignored-modules =
py,
pytest,
_pytest.runner,

@ -0,0 +1,103 @@
"""Run each test in its own fork. PYTEST_DONT_REWRITE"""
# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
# Based on code originally from:
# https://github.com/pytest-dev/pytest-forked
# https://github.com/pytest-dev/py
# TIP: Disable pytest-xdist when debugging internal errors in this plugin.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import pickle
import tempfile
import warnings
from pytest import Item, hookimpl
try:
from pytest import TestReport
except ImportError:
from _pytest.runner import TestReport # Backwards compatibility with pytest < 7. Remove once Python 2.7 is not supported.
from _pytest.runner import runtestprotocol
@hookimpl(tryfirst=True)
def pytest_runtest_protocol(item, nextitem): # type: (Item, Item | None) -> object | None
"""Entry point for enabling this plugin."""
# This is needed because pytest-xdist creates an OS thread (using execnet).
# See: https://github.com/pytest-dev/execnet/blob/d6aa1a56773c2e887515d63e50b1d08338cb78a7/execnet/gateway_base.py#L51
warnings.filterwarnings("ignore", "^This process .* is multi-threaded, use of .* may lead to deadlocks in the child.$", DeprecationWarning)
item_hook = item.ihook
item_hook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
reports = run_item(item, nextitem)
for report in reports:
item_hook.pytest_runtest_logreport(report=report)
item_hook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return True
def run_item(item, nextitem): # type: (Item, Item | None) -> list[TestReport]
"""Run the item in a child process and return a list of reports."""
with tempfile.NamedTemporaryFile() as temp_file:
pid = os.fork()
if not pid:
temp_file.delete = False
run_child(item, nextitem, temp_file.name)
return run_parent(item, pid, temp_file.name)
def run_child(item, nextitem, result_path): # type: (Item, Item | None, str) -> None
"""Run the item, record the result and exit. Called in the child process."""
with warnings.catch_warnings(record=True) as captured_warnings:
reports = runtestprotocol(item, nextitem=nextitem, log=False)
with open(result_path, "wb") as result_file:
pickle.dump((reports, captured_warnings), result_file)
os._exit(0) # noqa
def run_parent(item, pid, result_path): # type: (Item, int, str) -> list[TestReport]
"""Wait for the child process to exit and return the test reports. Called in the parent process."""
exit_code = waitstatus_to_exitcode(os.waitpid(pid, 0)[1])
if exit_code:
reason = "Test CRASHED with exit code {}.".format(exit_code)
report = TestReport(item.nodeid, item.location, {x: 1 for x in item.keywords}, "failed", reason, "call", user_properties=item.user_properties)
if item.get_closest_marker("xfail"):
report.outcome = "skipped"
report.wasxfail = reason
reports = [report]
else:
with open(result_path, "rb") as result_file:
reports, captured_warnings = pickle.load(result_file) # type: list[TestReport], list[warnings.WarningMessage]
for warning in captured_warnings:
warnings.warn_explicit(warning.message, warning.category, warning.filename, warning.lineno)
return reports
def waitstatus_to_exitcode(status): # type: (int) -> int
"""Convert a wait status to an exit code."""
# This function was added in Python 3.9.
# See: https://docs.python.org/3/library/os.html#os.waitstatus_to_exitcode
if os.WIFEXITED(status):
return os.WEXITSTATUS(status)
if os.WIFSIGNALED(status):
return -os.WTERMSIG(status)
raise ValueError(status)
Loading…
Cancel
Save