Overhaul additional sanity tests. (#36803)

* Remove unnecessary sys.exit calls.
* Add files filtering for code-smell tests.
* Enhance test-constraints code-smell test.
* Simplify compile sanity test.
* Pass paths to importer on stdin.
* Pass paths to yamllinter on stdin.
* Add work-around for unicode path filtering.
* Enhance configure-remoting-ps1 code-smell test.
* Enhance integration-aliases code-smell test.
* Enhance azure-requirements code-smell test.
* Enhance no-illegal-filenames code-smell test.
pull/36810/head
Matt Clay 6 years ago committed by GitHub
parent 5b5a79917d
commit ac1698099d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,6 +6,7 @@ import glob
import json
import os
import re
import sys
from lib.util import (
ApplicationError,
@ -233,6 +234,8 @@ class SanityCodeSmellTest(SanityTest):
output = config.get('output')
extensions = config.get('extensions')
prefixes = config.get('prefixes')
files = config.get('files')
always = config.get('always')
if output == 'path-line-column-message':
pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
@ -243,18 +246,29 @@ class SanityCodeSmellTest(SanityTest):
paths = sorted(i.path for i in targets.include)
if always:
paths = []
# short-term work-around for paths being str instead of unicode on python 2.x
if sys.version_info[0] == 2:
paths = [p.decode('utf-8') for p in paths]
if extensions:
paths = [p for p in paths if os.path.splitext(p)[1] in extensions or (p.startswith('bin/') and '.py' in extensions)]
if prefixes:
paths = [p for p in paths if any(p.startswith(pre) for pre in prefixes)]
if not paths:
if files:
paths = [p for p in paths if os.path.basename(p) in files]
if not paths and not always:
return SanitySkipped(self.name)
data = '\n'.join(paths)
display.info(data, verbosity=4)
if data:
display.info(data, verbosity=4)
try:
stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True)
status = 0

@ -15,6 +15,7 @@ from lib.sanity import (
from lib.util import (
SubprocessError,
run_command,
display,
)
from lib.config import (
@ -49,10 +50,14 @@ class CompileTest(SanityMultipleVersion):
if not paths:
return SanitySkipped(self.name, python_version=python_version)
cmd = ['python%s' % python_version, 'test/sanity/compile/compile.py'] + paths
cmd = ['python%s' % python_version, 'test/sanity/compile/compile.py']
data = '\n'.join(paths)
display.info(data, verbosity=4)
try:
stdout, stderr = run_command(args, cmd, capture=True)
stdout, stderr = run_command(args, cmd, data=data, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout

@ -17,6 +17,7 @@ from lib.util import (
run_command,
intercept_command,
remove_tree,
display,
)
from lib.ansible_util import (
@ -88,12 +89,17 @@ class ImportTest(SanityMultipleVersion):
run_command(args, ['pip', 'uninstall', '--disable-pip-version-check', '-y', 'setuptools'], env=env)
run_command(args, ['pip', 'uninstall', '--disable-pip-version-check', '-y', 'pip'], env=env)
cmd = ['importer.py'] + paths
cmd = ['importer.py']
data = '\n'.join(paths)
display.info(data, verbosity=4)
results = []
try:
stdout, stderr = intercept_command(args, cmd, target_name=self.name, env=env, capture=True, python_version=python_version, path=env['PATH'])
stdout, stderr = intercept_command(args, cmd, data=data, target_name=self.name, env=env, capture=True, python_version=python_version,
path=env['PATH'])
if stdout or stderr:
raise SubprocessError(cmd, stdout=stdout, stderr=stderr)

@ -15,6 +15,7 @@ from lib.sanity import (
from lib.util import (
SubprocessError,
run_command,
display,
)
from lib.config import (
@ -66,10 +67,14 @@ class YamllintTest(SanitySingleVersion):
cmd = [
'python%s' % args.python_version,
'test/sanity/yamllint/yamllinter.py',
] + paths
]
data = '\n'.join(paths)
display.info(data, verbosity=4)
try:
stdout, stderr = run_command(args, cmd, capture=True)
stdout, stderr = run_command(args, cmd, data=data, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout

@ -0,0 +1,4 @@
{
"always": true,
"output": "path-message"
}

@ -2,11 +2,19 @@
"""Make sure the Azure requirements files match."""
import filecmp
import os
src = 'packaging/requirements/requirements-azure.txt'
dst = 'test/runner/requirements/integration.cloud.azure.txt'
if not filecmp.cmp(src, dst):
print('Update the Azure integration test requirements with the packaging test requirements:')
print('cp %s %s' % (src, dst))
exit(1)
def main():
src = 'packaging/requirements/requirements-azure.txt'
dst = 'test/runner/requirements/integration.cloud.azure.txt'
if not filecmp.cmp(src, dst):
print('%s: must be identical to `%s`' % (dst, src))
if os.path.islink(dst):
print('%s: must not be a symbolic link' % dst)
if __name__ == '__main__':
main()

@ -0,0 +1,4 @@
{
"always": true,
"output": "path-message"
}

@ -0,0 +1,31 @@
#!/usr/bin/env python
import os
def main():
# required by external automated processes and should not be moved, renamed or converted to a symbolic link
path = 'examples/scripts/ConfigureRemotingForAnsible.ps1'
directory = path
while True:
directory = os.path.dirname(directory)
if not directory:
break
if not os.path.isdir(directory):
print('%s: must be a directory' % directory)
if os.path.islink(directory):
print('%s: cannot be a symbolic link' % directory)
if not os.path.isfile(path):
print('%s: must be a file' % path)
if os.path.islink(path):
print('%s: cannot be a symbolic link' % path)
if __name__ == '__main__':
main()

@ -1,9 +0,0 @@
#!/bin/sh
FILE='examples/scripts/ConfigureRemotingForAnsible.ps1'
if [ ! -f "${FILE}" ] || [ -h "${FILE}" ]; then
echo 'The file "ConfigureRemotingForAnsible.ps1" is missing or is not a regular file.'
echo 'It is required by external automated processes and should not be moved or renamed.'
exit 1
fi

@ -4,8 +4,8 @@
"lib/ansible/module_utils/",
"test/units/"
],
"extensions": [
".py"
"files": [
"__init__.py"
],
"output": "path-message"
}

@ -19,9 +19,6 @@ def main():
if path in skip:
continue
if os.path.basename(path) != '__init__.py':
continue
if os.path.getsize(path) > 0:
print('%s: empty __init__.py required' % path)

@ -0,0 +1,4 @@
{
"always": true,
"output": "path-message"
}

@ -10,8 +10,6 @@ def main():
with open('test/integration/target-prefixes.network', 'r') as prefixes_fd:
network_prefixes = prefixes_fd.read().splitlines()
missing_aliases = []
for target in sorted(os.listdir(targets_dir)):
target_dir = os.path.join(targets_dir, target)
aliases_path = os.path.join(target_dir, 'aliases')
@ -38,47 +36,7 @@ def main():
if any(target.startswith('%s_' % prefix) for prefix in network_prefixes):
continue
missing_aliases.append(target_dir)
if missing_aliases:
message = textwrap.dedent('''
The following integration target directories are missing `aliases` files:
%s
If these tests cannot be run as part of CI (requires external services, unsupported dependencies, etc.),
then they most likely belong in `test/integration/roles/` instead of `test/integration/targets/`.
In that case, do not add an `aliases` file. Instead, just relocate the tests.
However, if you think that the tests should be able to be supported by CI, please discuss test
organization with @mattclay or @gundalow on GitHub or #ansible-devel on IRC.
If these tests can be run as part of CI, you'll need to add an appropriate CI alias, such as:
posix/ci/group1
windows/ci/group2
The CI groups are used to balance tests across multiple jobs to minimize test run time.
Using the relevant `group1` entry is fine in most cases. Groups can be changed later to redistribute tests.
Aliases can also be used to express test requirements:
needs/privileged
needs/root
needs/ssh
Other aliases are used to skip tests under certain conditions:
skip/freebsd
skip/osx
skip/python3
Take a look at existing `aliases` files to see what aliases are available and how they're used.
''').strip() % '\n'.join(missing_aliases)
print(message)
exit(1)
print('%s: missing integration test `aliases` file' % aliases_path)
if __name__ == '__main__':

@ -8,22 +8,16 @@ ASSERT_RE = re.compile(r'.*(?<![-:a-zA-Z#][ -])\bassert\b(?!:).*')
def main():
failed = False
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r') as f:
for i, line in enumerate(f.readlines()):
matches = ASSERT_RE.findall(line)
if matches:
failed = True
lineno = i + 1
colno = line.index('assert') + 1
print('%s:%d:%d: raise AssertionError instead of: %s' % (path, lineno, colno, matches[0][colno - 1:]))
if failed:
sys.exit(1)
if __name__ == '__main__':
main()

@ -0,0 +1,4 @@
{
"always": true,
"output": "path-message"
}

@ -54,45 +54,41 @@ ILLEGAL_END_CHARS = [
def check_path(path, dir=False):
errors = []
type_name = 'directory' if dir else 'file'
parent, file_name = os.path.split(path)
name, ext = os.path.splitext(file_name)
if name.upper() in ILLEGAL_NAMES:
errors.append("Illegal %s name %s: %s" % (type_name, name.upper(), path))
print("%s: illegal %s name %s" % (path, type_name, name.upper()))
if file_name[-1] in ILLEGAL_END_CHARS:
errors.append("Illegal %s name end-char '%s': %s" % (type_name, file_name[-1], path))
print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1]))
bfile = to_bytes(file_name, encoding='utf-8')
for char in ILLEGAL_CHARS:
if char in bfile:
bpath = to_bytes(path, encoding='utf-8')
errors.append("Illegal char %s in %s name: %s" % (char, type_name, bpath))
return errors
print("%s: illegal char '%s' in %s name" % (bpath, char, type_name))
def main():
errors = []
pattern = re.compile("^./test/integration/targets/.*/backup")
pattern = re.compile("^test/integration/targets/.*/backup")
for root, dirs, files in os.walk('.'):
if root == '.':
root = ''
elif root.startswith('./'):
root = root[2:]
# ignore test/integration/targets/*/backup
if pattern.match(root):
continue
for dir_name in dirs:
errors += check_path(os.path.abspath(os.path.join(root, dir_name)), dir=True)
check_path(os.path.join(root, dir_name), dir=True)
for file_name in files:
errors += check_path(os.path.abspath(os.path.join(root, file_name)), dir=False)
if len(errors) > 0:
print('Ansible git repo should not contain any illegal filenames')
for error in errors:
print(error)
exit(1)
check_path(os.path.join(root, file_name), dir=False)
if __name__ == '__main__':

@ -49,8 +49,6 @@ FILTER_RE = re.compile(r'((.+?)\s*(?P<left>[\w \.\'"]+)(\s*)\|(\s*)(?P<filter>\w
def main():
failed = False
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path) as f:
text = f.read()
@ -67,7 +65,6 @@ def main():
if test_name not in TESTS:
continue
failed = True
left = match.group('left').strip()
start = match.start('left')
@ -80,9 +77,6 @@ def main():
print('%s:%d:%d: use `%s is %s` instead of `%s | %s`' % (path, lineno, colno, left, test_name, left, filter_name))
if failed:
sys.exit(1)
if __name__ == '__main__':
main()

@ -0,0 +1,9 @@
{
"prefixes": [
"test/runner/requirements/"
],
"extensions": [
".txt"
],
"output": "path-line-column-message"
}

@ -0,0 +1,27 @@
#!/usr/bin/env python
import re
import sys
def main():
skip = set([
'test/runner/requirements/constraints.txt',
'test/runner/requirements/integration.cloud.azure.txt',
])
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if path in skip:
continue
with open(path, 'r') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'^[^;#]*?([<>=])(?!.*sanity_ok.*)', text)
if match:
print('%s:%d:%d: put constraints in `test/runner/requirements/constraints.txt`' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()

@ -1,17 +0,0 @@
#!/bin/sh
constraints=$(
grep '.' test/runner/requirements/*.txt \
| grep -v '(sanity_ok)$' \
| sed 's/ *;.*$//; s/ #.*$//' \
| grep -v '/constraints.txt:' \
| grep -v '/integration.cloud.azure.txt:' \
| grep '[<>=]'
)
if [ "${constraints}" ]; then
echo 'Constraints for test requirements should be in "test/runner/requirements/constraints.txt".'
echo 'The following constraints were found outside the "constraints.txt" file:'
echo "${constraints}"
exit 1
fi

@ -1,105 +1,16 @@
#!/usr/bin/env python
"""Python syntax checker with lint friendly output."""
import os
import parser
import re
import sys
def main():
paths, verbose, skip_patterns = parse_options()
paths = filter_paths(paths, skip_patterns)
check(paths, verbose)
def parse_options():
paths = []
skip_patterns = []
option = None
verbose = False
valid_options = [
'-x',
'-v',
]
for arg in sys.argv[1:]:
if option == '-x':
skip_patterns.append(re.compile(arg))
option = None
elif arg.startswith('-'):
if arg not in valid_options:
raise Exception('Unknown Option: %s' % arg)
if arg == '-v':
verbose = True
else:
option = arg
else:
paths.append(arg)
if option:
raise Exception('Incomplete Option: %s' % option)
return paths, verbose, skip_patterns
def filter_paths(paths, skip_patterns):
if not paths:
paths = ['.']
candidates = paths
paths = []
for candidate in candidates:
if os.path.isdir(candidate):
for root, directories, files in os.walk(candidate):
remove = []
for directory in directories:
if directory.startswith('.'):
remove.append(directory)
for path in remove:
directories.remove(path)
for f in files:
if f.endswith('.py'):
paths.append(os.path.join(root, f))
else:
paths.append(candidate)
final_paths = []
for path in sorted(paths):
skip = False
for skip_pattern in skip_patterns:
if skip_pattern.search(path):
skip = True
break
if skip:
continue
final_paths.append(path)
return final_paths
def check(paths, verbose):
status = 0
for path in paths:
if verbose:
sys.stderr.write('%s\n' % path)
sys.stderr.flush()
source_fd = open(path, 'r')
try:
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r') as source_fd:
source = source_fd.read()
finally:
source_fd.close()
try:
parser.suite(source)

@ -43,7 +43,7 @@ def main():
base_dir = os.getcwd()
messages = set()
for path in sys.argv[1:]:
for path in sys.argv[1:] or sys.stdin.read().splitlines():
test_python_module(path, base_dir, messages, False)
test_python_module(path, base_dir, messages, True)

@ -14,7 +14,7 @@ from yamllint.config import YamlLintConfig
def main():
"""Main program body."""
paths = sys.argv[1:]
paths = sys.argv[1:] or sys.stdin.read().splitlines()
checker = YamlChecker()
checker.check(paths)

Loading…
Cancel
Save