Code cleanup in ansible-test. (#60318)

* Move ansible-test units code into separate dir.
* Fix --explain on "code smell" sanity tests.
* Add --strict-markers to pytest invoctaion.
* Fix classification of units code changes.
pull/60321/head
Matt Clay 5 years ago committed by GitHub
parent 1405e48157
commit 2a9964ede8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -748,6 +748,21 @@ class PathMapper:
'sanity': 'all', # test infrastructure, run all sanity checks
}
if path.startswith('test/lib/ansible_test/_internal/units/'):
return {
'units': 'all', # test infrastructure, run all unit tests
}
if path.startswith('test/lib/ansible_test/_data/units/'):
return {
'units': 'all', # test infrastructure, run all unit tests
}
if path.startswith('test/lib/ansible_test/_data/pytest/'):
return {
'units': 'all', # test infrastructure, run all unit tests
}
if path.startswith('test/lib/ansible_test/_data/requirements/'):
if name in (
'integration',

@ -33,7 +33,6 @@ from .executor import (
command_posix_integration,
command_network_integration,
command_windows_integration,
command_units,
command_shell,
SUPPORTED_PYTHON_VERSIONS,
ApplicationWarning,
@ -64,6 +63,10 @@ from .sanity import (
sanity_get_tests,
)
from .units import (
command_units,
)
from .target import (
find_target_completion,
walk_posix_integration_targets,

@ -10,7 +10,6 @@ import re
import time
import textwrap
import functools
import sys
import hashlib
import difflib
import filecmp
@ -62,8 +61,6 @@ from .util import (
cmd_quote,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_DATA_ROOT,
get_available_python_versions,
is_subdir,
)
from .util_common import (
@ -97,7 +94,6 @@ from .target import (
walk_posix_integration_targets,
walk_network_integration_targets,
walk_windows_integration_targets,
walk_units_targets,
)
from .changes import (
@ -120,7 +116,6 @@ from .config import (
NetworkIntegrationConfig,
PosixIntegrationConfig,
ShellConfig,
UnitsConfig,
WindowsIntegrationConfig,
)
@ -134,10 +129,6 @@ from .integration import (
setup_common_temp_dir,
)
from .coverage_util import (
coverage_context,
)
from .data import (
data_context,
)
@ -1353,108 +1344,6 @@ def command_integration_role(args, target, start_at_task, test_dir, inventory_pa
intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path, module_coverage=module_coverage)
def command_units(args):
"""
:type args: UnitsConfig
"""
changes = get_changes_filter(args)
require = args.require + changes
include = walk_internal_targets(walk_units_targets(), args.include, args.exclude, require)
paths = [target.path for target in include]
remote_paths = [path for path in paths
if is_subdir(path, data_context().content.unit_module_path)
or is_subdir(path, data_context().content.unit_module_utils_path)]
if not paths:
raise AllTargetsSkipped()
if args.python and args.python in REMOTE_ONLY_PYTHON_VERSIONS and not remote_paths:
raise AllTargetsSkipped()
if args.delegate:
raise Delegate(require=changes, exclude=args.exclude)
version_commands = []
available_versions = get_available_python_versions(list(SUPPORTED_PYTHON_VERSIONS))
for version in SUPPORTED_PYTHON_VERSIONS:
# run all versions unless version given, in which case run only that version
if args.python and version != args.python_version:
continue
if not args.python and version not in available_versions:
display.warning("Skipping unit tests on Python %s due to missing interpreter." % version)
continue
if args.requirements_mode != 'skip':
install_command_requirements(args, version)
env = ansible_environment(args)
cmd = [
'pytest',
'--boxed',
'-r', 'a',
'-n', str(args.num_workers) if args.num_workers else 'auto',
'--color',
'yes' if args.color else 'no',
'-p', 'no:cacheprovider',
'-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest.ini'),
'--junit-xml',
'test/results/junit/python%s-units.xml' % version,
]
plugins = []
if args.coverage:
plugins.append('ansible_pytest_coverage')
if data_context().content.collection:
plugins.append('ansible_pytest_collections')
if plugins:
env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest/plugins')
for plugin in plugins:
cmd.extend(['-p', plugin])
if args.collect_only:
cmd.append('--collect-only')
if args.verbosity:
cmd.append('-' + ('v' * args.verbosity))
if version in REMOTE_ONLY_PYTHON_VERSIONS:
test_paths = remote_paths
else:
test_paths = paths
if not test_paths:
continue
cmd.extend(test_paths)
version_commands.append((version, cmd, env))
if args.requirements_mode == 'only':
sys.exit()
for version, command, env in version_commands:
check_pyyaml(args, version)
display.info('Unit test with Python %s' % version)
try:
with coverage_context(args):
intercept_command(args, command, target_name='units', env=env, python_version=version)
except SubprocessError as ex:
# pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case
if ex.status != 5:
raise
def get_changes_filter(args):
"""
:type args: TestConfig

@ -765,6 +765,9 @@ class SanityCodeSmellTest(SanityTest):
stderr = ex.stderr
status = ex.status
if args.explain:
return SanitySuccess(self.name)
if stdout and not stderr:
if pattern:
matches = parse_to_list_of_dict(pattern, stdout)

@ -0,0 +1,155 @@
"""Execute unit tests using pytest."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from ..util import (
ANSIBLE_TEST_DATA_ROOT,
display,
get_available_python_versions,
is_subdir,
SubprocessError,
)
from ..util_common import (
intercept_command,
)
from ..ansible_util import (
ansible_environment,
check_pyyaml,
)
from ..target import (
walk_internal_targets,
walk_units_targets,
)
from ..config import (
UnitsConfig,
)
from ..coverage_util import (
coverage_context,
)
from ..data import (
data_context,
)
from ..executor import (
AllTargetsSkipped,
Delegate,
get_changes_filter,
install_command_requirements,
REMOTE_ONLY_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
def command_units(args):
"""
:type args: UnitsConfig
"""
changes = get_changes_filter(args)
require = args.require + changes
include = walk_internal_targets(walk_units_targets(), args.include, args.exclude, require)
paths = [target.path for target in include]
remote_paths = [path for path in paths
if is_subdir(path, data_context().content.unit_module_path)
or is_subdir(path, data_context().content.unit_module_utils_path)]
if not paths:
raise AllTargetsSkipped()
if args.python and args.python in REMOTE_ONLY_PYTHON_VERSIONS and not remote_paths:
raise AllTargetsSkipped()
if args.delegate:
raise Delegate(require=changes, exclude=args.exclude)
version_commands = []
available_versions = get_available_python_versions(list(SUPPORTED_PYTHON_VERSIONS))
for version in SUPPORTED_PYTHON_VERSIONS:
# run all versions unless version given, in which case run only that version
if args.python and version != args.python_version:
continue
if not args.python and version not in available_versions:
display.warning("Skipping unit tests on Python %s due to missing interpreter." % version)
continue
if args.requirements_mode != 'skip':
install_command_requirements(args, version)
env = ansible_environment(args)
cmd = [
'pytest',
'--boxed',
'-r', 'a',
'-n', str(args.num_workers) if args.num_workers else 'auto',
'--color',
'yes' if args.color else 'no',
'-p', 'no:cacheprovider',
'-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest.ini'),
'--junit-xml',
'test/results/junit/python%s-units.xml' % version,
]
if version != '2.6':
# added in pytest 4.5.0, which requires python 2.7+
cmd.append('--strict-markers')
plugins = []
if args.coverage:
plugins.append('ansible_pytest_coverage')
if data_context().content.collection:
plugins.append('ansible_pytest_collections')
if plugins:
env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest/plugins')
for plugin in plugins:
cmd.extend(['-p', plugin])
if args.collect_only:
cmd.append('--collect-only')
if args.verbosity:
cmd.append('-' + ('v' * args.verbosity))
if version in REMOTE_ONLY_PYTHON_VERSIONS:
test_paths = remote_paths
else:
test_paths = paths
if not test_paths:
continue
cmd.extend(test_paths)
version_commands.append((version, cmd, env))
if args.requirements_mode == 'only':
sys.exit()
for version, command, env in version_commands:
check_pyyaml(args, version)
display.info('Unit test with Python %s' % version)
try:
with coverage_context(args):
intercept_command(args, command, target_name='units', env=env, python_version=version)
except SubprocessError as ex:
# pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case
if ex.status != 5:
raise
Loading…
Cancel
Save