mirror of https://github.com/ansible/ansible.git
Add code coverage target analysis to ansible-test. (#67141)
* Refactor coverage file enumeration. * Relocate sanitize_filename function. * Support sets when writing JSON files. * Generalize setting of info_stderr mode. * Split out coverage path checking. * Split out collection regex logic. * Improve sanitize_filename type hints and docs. * Clean up coverage erase command. * Fix docs and type hints for initialize_coverage. * Update type hints on CoverageConfig. * Split out logic for finding modules. * Split out arc enumeration. * Split out powershell coverage enumeration. * Raise verbosity level of empty coverage warnings. * Add code coverage target analysis to ansible-test.pull/67151/head
parent
68b981ae21
commit
5e68bb3d93
@ -0,0 +1,2 @@
|
||||
minor_changes:
|
||||
- "ansible-test - Added a ``ansible-test coverage analyze targets`` command to analyze integration test code coverage by test target."
|
||||
@ -0,0 +1,19 @@
|
||||
"""Common logic for the `coverage analyze` subcommand."""
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ... import types as t
|
||||
|
||||
from .. import (
|
||||
CoverageConfig,
|
||||
)
|
||||
|
||||
|
||||
class CoverageAnalyzeConfig(CoverageConfig):
|
||||
"""Configuration for the `coverage analyze` command."""
|
||||
def __init__(self, args): # type: (t.Any) -> None
|
||||
super(CoverageAnalyzeConfig, self).__init__(args)
|
||||
|
||||
# avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands
|
||||
# this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used
|
||||
self.info_stderr = True
|
||||
@ -0,0 +1,115 @@
|
||||
"""Analyze integration test target code coverage."""
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
from .... import types as t
|
||||
|
||||
from ....io import (
|
||||
read_json_file,
|
||||
write_json_file,
|
||||
)
|
||||
|
||||
from ....util import (
|
||||
ApplicationError,
|
||||
display,
|
||||
)
|
||||
|
||||
from .. import (
|
||||
CoverageAnalyzeConfig,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]]
|
||||
Lines = t.Dict[str, t.Dict[int, t.Set[int]]]
|
||||
TargetIndexes = t.Dict[str, int]
|
||||
TargetSetIndexes = t.Dict[t.FrozenSet[int], int]
|
||||
|
||||
|
||||
def make_report(target_indexes, arcs, lines): # type: (TargetIndexes, Arcs, Lines) -> t.Dict[str, t.Any]
|
||||
"""Condense target indexes, arcs and lines into a compact report."""
|
||||
set_indexes = {}
|
||||
arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items())
|
||||
line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items())
|
||||
|
||||
report = dict(
|
||||
targets=[name for name, index in sorted(target_indexes.items(), key=lambda kvp: kvp[1])],
|
||||
target_sets=[sorted(data) for data, index in sorted(set_indexes.items(), key=lambda kvp: kvp[1])],
|
||||
arcs=arc_refs,
|
||||
lines=line_refs,
|
||||
)
|
||||
|
||||
return report
|
||||
|
||||
|
||||
def load_report(report): # type: (t.Dict[str, t.Any]) -> t.Tuple[t.List[str], Arcs, Lines]
|
||||
"""Extract target indexes, arcs and lines from an existing report."""
|
||||
try:
|
||||
target_indexes = report['targets'] # type: t.List[str]
|
||||
target_sets = report['target_sets'] # type: t.List[t.List[int]]
|
||||
arc_data = report['arcs'] # type: t.Dict[str, t.Dict[str, int]]
|
||||
line_data = report['lines'] # type: t.Dict[str, t.Dict[int, int]]
|
||||
except KeyError as ex:
|
||||
raise ApplicationError('Document is missing key "%s".' % ex.args)
|
||||
except TypeError:
|
||||
raise ApplicationError('Document is type "%s" instead of "dict".' % type(report).__name__)
|
||||
|
||||
arcs = dict((path, dict((parse_arc(arc), set(target_sets[index])) for arc, index in data.items())) for path, data in arc_data.items())
|
||||
lines = dict((path, dict((int(line), set(target_sets[index])) for line, index in data.items())) for path, data in line_data.items())
|
||||
|
||||
return target_indexes, arcs, lines
|
||||
|
||||
|
||||
def read_report(path): # type: (str) -> t.Tuple[t.List[str], Arcs, Lines]
|
||||
"""Read a JSON report from disk."""
|
||||
try:
|
||||
report = read_json_file(path)
|
||||
except Exception as ex:
|
||||
raise ApplicationError('File "%s" is not valid JSON: %s' % (path, ex))
|
||||
|
||||
try:
|
||||
return load_report(report)
|
||||
except ApplicationError as ex:
|
||||
raise ApplicationError('File "%s" is not an aggregated coverage data file. %s' % (path, ex))
|
||||
|
||||
|
||||
def write_report(args, report, path): # type: (CoverageAnalyzeTargetsConfig, t.Dict[str, t.Any], str) -> None
|
||||
"""Write a JSON report to disk."""
|
||||
if args.explain:
|
||||
return
|
||||
|
||||
write_json_file(path, report, formatted=False)
|
||||
|
||||
display.info('Generated %d byte report with %d targets covering %d files.' % (
|
||||
os.path.getsize(path), len(report['targets']), len(set(report['arcs'].keys()) | set(report['lines'].keys())),
|
||||
), verbosity=1)
|
||||
|
||||
|
||||
def format_arc(value): # type: (t.Tuple[int, int]) -> str
|
||||
"""Format an arc tuple as a string."""
|
||||
return '%d:%d' % value
|
||||
|
||||
|
||||
def parse_arc(value): # type: (str) -> t.Tuple[int, int]
|
||||
"""Parse an arc string into a tuple."""
|
||||
first, last = tuple(map(int, value.split(':')))
|
||||
return first, last
|
||||
|
||||
|
||||
def get_target_set_index(data, target_set_indexes): # type: (t.Set[int], TargetSetIndexes) -> int
|
||||
"""Find or add the target set in the result set and return the target set index."""
|
||||
return target_set_indexes.setdefault(frozenset(data), len(target_set_indexes))
|
||||
|
||||
|
||||
def get_target_index(name, target_indexes): # type: (str, TargetIndexes) -> int
|
||||
"""Find or add the target in the result set and return the target index."""
|
||||
return target_indexes.setdefault(name, len(target_indexes))
|
||||
|
||||
|
||||
class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig):
|
||||
"""Configuration for the `coverage analyze targets` command."""
|
||||
def __init__(self, args): # type: (t.Any) -> None
|
||||
super(CoverageAnalyzeTargetsConfig, self).__init__(args)
|
||||
|
||||
self.info_stderr = True
|
||||
@ -0,0 +1,63 @@
|
||||
"""Combine integration test target code coverage reports."""
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from .... import types as t
|
||||
|
||||
from . import (
|
||||
CoverageAnalyzeTargetsConfig,
|
||||
get_target_index,
|
||||
make_report,
|
||||
read_report,
|
||||
write_report,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from . import (
|
||||
Arcs,
|
||||
Lines,
|
||||
TargetIndexes,
|
||||
)
|
||||
|
||||
|
||||
def command_coverage_analyze_targets_combine(args): # type: (CoverageAnalyzeTargetsCombineConfig) -> None
|
||||
"""Combine integration test target code coverage reports."""
|
||||
combined_target_indexes = {} # type: TargetIndexes
|
||||
combined_path_arcs = {} # type: Arcs
|
||||
combined_path_lines = {} # type: Lines
|
||||
|
||||
for report_path in args.input_files:
|
||||
covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path)
|
||||
|
||||
merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes)
|
||||
merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes)
|
||||
|
||||
report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines)
|
||||
|
||||
write_report(args, report, args.output_file)
|
||||
|
||||
|
||||
def merge_indexes(
|
||||
source_data, # type: t.Dict[str, t.Dict[t.Any, t.Set[int]]]
|
||||
source_index, # type: t.List[str]
|
||||
combined_data, # type: t.Dict[str, t.Dict[t.Any, t.Set[int]]]
|
||||
combined_index, # type: TargetIndexes
|
||||
): # type: (...) -> None
|
||||
"""Merge indexes from the source into the combined data set (arcs or lines)."""
|
||||
for covered_path, covered_points in source_data.items():
|
||||
combined_points = combined_data.setdefault(covered_path, {})
|
||||
|
||||
for covered_point, covered_target_indexes in covered_points.items():
|
||||
combined_point = combined_points.setdefault(covered_point, set())
|
||||
|
||||
for covered_target_index in covered_target_indexes:
|
||||
combined_point.add(get_target_index(source_index[covered_target_index], combined_index))
|
||||
|
||||
|
||||
class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig):
|
||||
"""Configuration for the `coverage analyze targets combine` command."""
|
||||
def __init__(self, args): # type: (t.Any) -> None
|
||||
super(CoverageAnalyzeTargetsCombineConfig, self).__init__(args)
|
||||
|
||||
self.input_files = args.input_file # type: t.List[str]
|
||||
self.output_file = args.output_file # type: str
|
||||
@ -0,0 +1,58 @@
|
||||
"""Expand target names in an aggregated coverage file."""
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from .... import types as t
|
||||
|
||||
from ....io import (
|
||||
SortedSetEncoder,
|
||||
write_json_file,
|
||||
)
|
||||
|
||||
from . import (
|
||||
CoverageAnalyzeTargetsConfig,
|
||||
format_arc,
|
||||
read_report,
|
||||
)
|
||||
|
||||
|
||||
def command_coverage_analyze_targets_expand(args): # type: (CoverageAnalyzeTargetsExpandConfig) -> None
|
||||
"""Expand target names in an aggregated coverage file."""
|
||||
covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
|
||||
|
||||
report = dict(
|
||||
arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc),
|
||||
lines=expand_indexes(covered_path_lines, covered_targets, str),
|
||||
)
|
||||
|
||||
if not args.explain:
|
||||
write_json_file(args.output_file, report, encoder=SortedSetEncoder)
|
||||
|
||||
|
||||
def expand_indexes(
|
||||
source_data, # type: t.Dict[str, t.Dict[t.Any, t.Set[int]]]
|
||||
source_index, # type: t.List[str]
|
||||
format_func, # type: t.Callable[t.Tuple[t.Any], str]
|
||||
): # type: (...) -> t.Dict[str, t.Dict[t.Any, t.Set[str]]]
|
||||
"""Merge indexes from the source into the combined data set (arcs or lines)."""
|
||||
combined_data = {} # type: t.Dict[str, t.Dict[t.Any, t.Set[str]]]
|
||||
|
||||
for covered_path, covered_points in source_data.items():
|
||||
combined_points = combined_data.setdefault(covered_path, {})
|
||||
|
||||
for covered_point, covered_target_indexes in covered_points.items():
|
||||
combined_point = combined_points.setdefault(format_func(covered_point), set())
|
||||
|
||||
for covered_target_index in covered_target_indexes:
|
||||
combined_point.add(source_index[covered_target_index])
|
||||
|
||||
return combined_data
|
||||
|
||||
|
||||
class CoverageAnalyzeTargetsExpandConfig(CoverageAnalyzeTargetsConfig):
|
||||
"""Configuration for the `coverage analyze targets expand` command."""
|
||||
def __init__(self, args): # type: (t.Any) -> None
|
||||
super(CoverageAnalyzeTargetsExpandConfig, self).__init__(args)
|
||||
|
||||
self.input_file = args.input_file # type: str
|
||||
self.output_file = args.output_file # type: str
|
||||
@ -0,0 +1,143 @@
|
||||
"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
from .... import types as t
|
||||
|
||||
from ....encoding import (
|
||||
to_text,
|
||||
)
|
||||
|
||||
from ....data import (
|
||||
data_context,
|
||||
)
|
||||
|
||||
from ....util_common import (
|
||||
ResultType,
|
||||
)
|
||||
|
||||
from ... import (
|
||||
enumerate_powershell_lines,
|
||||
enumerate_python_arcs,
|
||||
get_collection_path_regexes,
|
||||
get_powershell_coverage_files,
|
||||
get_python_coverage_files,
|
||||
get_python_modules,
|
||||
initialize_coverage,
|
||||
PathChecker,
|
||||
)
|
||||
|
||||
from . import (
|
||||
CoverageAnalyzeTargetsConfig,
|
||||
get_target_index,
|
||||
make_report,
|
||||
write_report,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from . import (
|
||||
Arcs,
|
||||
Lines,
|
||||
TargetIndexes,
|
||||
)
|
||||
|
||||
|
||||
def command_coverage_analyze_targets_generate(args): # type: (CoverageAnalyzeTargetsGenerateConfig) -> None
|
||||
"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
|
||||
root = data_context().content.root
|
||||
target_indexes = {}
|
||||
arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, target_indexes).items())
|
||||
lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, target_indexes).items())
|
||||
report = make_report(target_indexes, arcs, lines)
|
||||
write_report(args, report, args.output_file)
|
||||
|
||||
|
||||
def analyze_python_coverage(
|
||||
args, # type: CoverageAnalyzeTargetsConfig
|
||||
target_indexes, # type: TargetIndexes
|
||||
): # type: (...) -> Arcs
|
||||
"""Analyze Python code coverage."""
|
||||
results = {} # type: Arcs
|
||||
collection_search_re, collection_sub_re = get_collection_path_regexes()
|
||||
modules = get_python_modules()
|
||||
python_files = get_python_coverage_files()
|
||||
coverage = initialize_coverage(args)
|
||||
|
||||
for python_file in python_files:
|
||||
if not is_integration_coverage_file(python_file):
|
||||
continue
|
||||
|
||||
target_name = get_target_name(python_file)
|
||||
target_index = get_target_index(target_name, target_indexes)
|
||||
|
||||
for filename, covered_arcs in enumerate_python_arcs(python_file, coverage, modules, collection_search_re, collection_sub_re):
|
||||
arcs = results.setdefault(filename, {})
|
||||
|
||||
for covered_arc in covered_arcs:
|
||||
arc = arcs.setdefault(covered_arc, set())
|
||||
arc.add(target_index)
|
||||
|
||||
prune_invalid_filenames(args, results, collection_search_re=collection_search_re)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def analyze_powershell_coverage(
|
||||
args, # type: CoverageAnalyzeTargetsConfig
|
||||
target_indexes, # type: TargetIndexes
|
||||
): # type: (...) -> Lines
|
||||
"""Analyze PowerShell code coverage"""
|
||||
results = {} # type: Lines
|
||||
powershell_files = get_powershell_coverage_files()
|
||||
|
||||
for powershell_file in powershell_files:
|
||||
if not is_integration_coverage_file(powershell_file):
|
||||
continue
|
||||
|
||||
target_name = get_target_name(powershell_file)
|
||||
target_index = get_target_index(target_name, target_indexes)
|
||||
|
||||
for filename, hits in enumerate_powershell_lines(powershell_file):
|
||||
lines = results.setdefault(filename, {})
|
||||
|
||||
for covered_line in hits:
|
||||
line = lines.setdefault(covered_line, set())
|
||||
line.add(target_index)
|
||||
|
||||
prune_invalid_filenames(args, results)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def prune_invalid_filenames(
|
||||
args, # type: CoverageAnalyzeTargetsConfig
|
||||
results, # type: t.Dict[str, t.Any]
|
||||
collection_search_re=None, # type: t.Optional[str]
|
||||
): # type: (...) -> None
|
||||
"""Remove invalid filenames from the given result set."""
|
||||
path_checker = PathChecker(args, collection_search_re)
|
||||
|
||||
for path in list(results.keys()):
|
||||
if not path_checker.check_path(path):
|
||||
del results[path]
|
||||
|
||||
|
||||
def get_target_name(path): # type: (str) -> str
|
||||
"""Extract the test target name from the given coverage path."""
|
||||
return to_text(os.path.basename(path).split('=')[1])
|
||||
|
||||
|
||||
def is_integration_coverage_file(path): # type: (str) -> bool
|
||||
"""Returns True if the coverage file came from integration tests, otherwise False."""
|
||||
return os.path.basename(path).split('=')[0] in ('integration', 'windows-integration', 'network-integration')
|
||||
|
||||
|
||||
class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig):
|
||||
"""Configuration for the `coverage analyze targets generate` command."""
|
||||
def __init__(self, args): # type: (t.Any) -> None
|
||||
super(CoverageAnalyzeTargetsGenerateConfig, self).__init__(args)
|
||||
|
||||
self.input_dir = args.input_dir or ResultType.COVERAGE.path # type: str
|
||||
self.output_file = args.output_file # type: str
|
||||
@ -0,0 +1,110 @@
|
||||
"""Identify aggregated coverage in one file missing from another."""
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import os
|
||||
|
||||
from .... import types as t
|
||||
|
||||
from ....encoding import (
|
||||
to_bytes,
|
||||
)
|
||||
|
||||
from . import (
|
||||
CoverageAnalyzeTargetsConfig,
|
||||
get_target_index,
|
||||
make_report,
|
||||
read_report,
|
||||
write_report,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from . import (
|
||||
TargetIndexes,
|
||||
)
|
||||
|
||||
TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int])
|
||||
|
||||
|
||||
def command_coverage_analyze_targets_missing(args): # type: (CoverageAnalyzeTargetsMissingConfig) -> None
|
||||
"""Identify aggregated coverage in one file missing from another."""
|
||||
from_targets, from_path_arcs, from_path_lines = read_report(args.from_file)
|
||||
to_targets, to_path_arcs, to_path_lines = read_report(args.to_file)
|
||||
target_indexes = {}
|
||||
|
||||
if args.only_gaps:
|
||||
arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists)
|
||||
lines = find_gaps(from_path_lines, from_targets, to_path_lines, target_indexes, args.only_exists)
|
||||
else:
|
||||
arcs = find_missing(from_path_arcs, from_targets, to_path_arcs, to_targets, target_indexes, args.only_exists)
|
||||
lines = find_missing(from_path_lines, from_targets, to_path_lines, to_targets, target_indexes, args.only_exists)
|
||||
|
||||
report = make_report(target_indexes, arcs, lines)
|
||||
write_report(args, report, args.output_file)
|
||||
|
||||
|
||||
def find_gaps(
|
||||
from_data, # type: t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
|
||||
from_index, # type: t.List[str]
|
||||
to_data, # type: t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
|
||||
target_indexes, # type: TargetIndexes,
|
||||
only_exists, # type: bool
|
||||
): # type: (...) -> t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
|
||||
"""Find gaps in coverage between the from and to data sets."""
|
||||
target_data = {}
|
||||
|
||||
for from_path, from_points in from_data.items():
|
||||
if only_exists and not os.path.isfile(to_bytes(from_path)):
|
||||
continue
|
||||
|
||||
to_points = to_data.get(from_path, {})
|
||||
|
||||
gaps = set(from_points.keys()) - set(to_points.keys())
|
||||
|
||||
if gaps:
|
||||
gap_points = dict((key, value) for key, value in from_points.items() if key in gaps)
|
||||
target_data[from_path] = dict((gap, set(get_target_index(from_index[i], target_indexes) for i in indexes)) for gap, indexes in gap_points.items())
|
||||
|
||||
return target_data
|
||||
|
||||
|
||||
def find_missing(
|
||||
from_data, # type: t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
|
||||
from_index, # type: t.List[str]
|
||||
to_data, # type: t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
|
||||
to_index, # type: t.List[str]
|
||||
target_indexes, # type: TargetIndexes,
|
||||
only_exists, # type: bool
|
||||
): # type: (...) -> t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
|
||||
"""Find coverage in from_data not present in to_data (arcs or lines)."""
|
||||
target_data = {}
|
||||
|
||||
for from_path, from_points in from_data.items():
|
||||
if only_exists and not os.path.isfile(to_bytes(from_path)):
|
||||
continue
|
||||
|
||||
to_points = to_data.get(from_path, {})
|
||||
|
||||
for from_point, from_target_indexes in from_points.items():
|
||||
to_target_indexes = to_points.get(from_point, set())
|
||||
|
||||
remaining_targets = set(from_index[i] for i in from_target_indexes) - set(to_index[i] for i in to_target_indexes)
|
||||
|
||||
if remaining_targets:
|
||||
target_index = target_data.setdefault(from_path, {}).setdefault(from_point, set())
|
||||
target_index.update(get_target_index(name, target_indexes) for name in remaining_targets)
|
||||
|
||||
return target_data
|
||||
|
||||
|
||||
class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig):
|
||||
"""Configuration for the `coverage analyze targets missing` command."""
|
||||
def __init__(self, args): # type: (t.Any) -> None
|
||||
super(CoverageAnalyzeTargetsMissingConfig, self).__init__(args)
|
||||
|
||||
self.from_file = args.from_file # type: str
|
||||
self.to_file = args.to_file # type: str
|
||||
self.output_file = args.output_file # type: str
|
||||
|
||||
self.only_gaps = args.only_gaps # type: bool
|
||||
self.only_exists = args.only_exists # type: bool
|
||||
Loading…
Reference in New Issue