ansible-test - Always use unique container names (#80724)

pull/80592/head
Matt Clay 3 years ago committed by GitHub
parent 2b6d5536d7
commit cdeb607b1d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,6 @@
bugfixes:
- ansible-test - All containers created by ansible-test now include the current test session ID in their name.
This avoids conflicts between concurrent ansible-test invocations using the same container host.
breaking_changes:
- ansible-test - Test plugins that rely on containers no longer support reusing running containers.
The previous behavior was an undocumented, untested feature.

@ -8,7 +8,6 @@ from ....config import (
)
from ....containers import (
CleanupMode,
run_support_container,
)
@ -22,8 +21,6 @@ from . import (
class ACMEProvider(CloudProvider):
"""ACME plugin. Sets up cloud resources for tests."""
DOCKER_SIMULATOR_NAME = 'acme-simulator'
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
@ -51,17 +48,18 @@ class ACMEProvider(CloudProvider):
14000, # Pebble ACME CA
]
run_support_container(
descriptor = run_support_container(
self.args,
self.platform,
self.image,
self.DOCKER_SIMULATOR_NAME,
'acme-simulator',
ports,
allow_existing=True,
cleanup=CleanupMode.YES,
)
self._set_cloud_config('acme_host', self.DOCKER_SIMULATOR_NAME)
if not descriptor:
return
self._set_cloud_config('acme_host', descriptor.name)
def _setup_static(self) -> None:
raise NotImplementedError()

@ -21,7 +21,6 @@ from ....docker_util import (
)
from ....containers import (
CleanupMode,
run_support_container,
wait_for_file,
)
@ -36,8 +35,6 @@ from . import (
class CsCloudProvider(CloudProvider):
"""CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
@ -96,10 +93,8 @@ class CsCloudProvider(CloudProvider):
self.args,
self.platform,
self.image,
self.DOCKER_SIMULATOR_NAME,
'cloudstack-sim',
ports,
allow_existing=True,
cleanup=CleanupMode.YES,
)
if not descriptor:
@ -107,7 +102,7 @@ class CsCloudProvider(CloudProvider):
# apply work-around for OverlayFS issue
# https://github.com/docker/for-linux/issues/72#issuecomment-319904698
docker_exec(self.args, self.DOCKER_SIMULATOR_NAME, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'], capture=True)
docker_exec(self.args, descriptor.name, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'], capture=True)
if self.args.explain:
values = dict(
@ -115,10 +110,10 @@ class CsCloudProvider(CloudProvider):
PORT=str(self.port),
)
else:
credentials = self._get_credentials(self.DOCKER_SIMULATOR_NAME)
credentials = self._get_credentials(descriptor.name)
values = dict(
HOST=self.DOCKER_SIMULATOR_NAME,
HOST=descriptor.name,
PORT=str(self.port),
KEY=credentials['apikey'],
SECRET=credentials['secretkey'],

@ -25,10 +25,10 @@ from . import (
# We add BasicAuthentication, to make the tasks that deal with
# direct API access easier to deal with across galaxy_ng and pulp
SETTINGS = b'''
CONTENT_ORIGIN = 'http://ansible-ci-pulp:80'
ANSIBLE_API_HOSTNAME = 'http://ansible-ci-pulp:80'
ANSIBLE_CONTENT_HOSTNAME = 'http://ansible-ci-pulp:80/pulp/content'
SETTINGS = '''
CONTENT_ORIGIN = 'http://{host}:80'
ANSIBLE_API_HOSTNAME = 'http://{host}:80'
ANSIBLE_CONTENT_HOSTNAME = 'http://{host}:80/pulp/content'
TOKEN_AUTH_DISABLED = True
GALAXY_REQUIRE_CONTENT_APPROVAL = False
GALAXY_AUTHENTICATION_CLASSES = [
@ -97,7 +97,6 @@ class GalaxyProvider(CloudProvider):
super().setup()
galaxy_port = 80
pulp_host = 'ansible-ci-pulp'
pulp_port = 24817
ports = [
@ -110,32 +109,31 @@ class GalaxyProvider(CloudProvider):
self.args,
self.platform,
self.pulp,
pulp_host,
'galaxy-pulp',
ports,
start=False,
allow_existing=True,
)
if not descriptor:
return
if not descriptor.running:
pulp_id = descriptor.container_id
pulp_id = descriptor.container_id
injected_files = {
'/etc/pulp/settings.py': SETTINGS,
'/etc/cont-init.d/111-postgres': SET_ADMIN_PASSWORD,
'/etc/cont-init.d/000-ansible-test-overrides': OVERRIDES,
}
for path, content in injected_files.items():
with tempfile.NamedTemporaryFile() as temp_fd:
temp_fd.write(content)
temp_fd.flush()
docker_cp_to(self.args, pulp_id, temp_fd.name, path)
injected_files = {
'/etc/pulp/settings.py': SETTINGS.format(host=descriptor.name).encode(),
'/etc/cont-init.d/111-postgres': SET_ADMIN_PASSWORD,
'/etc/cont-init.d/000-ansible-test-overrides': OVERRIDES,
}
descriptor.start(self.args)
for path, content in injected_files.items():
with tempfile.NamedTemporaryFile() as temp_fd:
temp_fd.write(content)
temp_fd.flush()
docker_cp_to(self.args, pulp_id, temp_fd.name, path)
self._set_cloud_config('PULP_HOST', pulp_host)
descriptor.start(self.args)
self._set_cloud_config('PULP_HOST', descriptor.name)
self._set_cloud_config('PULP_PORT', str(pulp_port))
self._set_cloud_config('GALAXY_PORT', str(galaxy_port))
self._set_cloud_config('PULP_USER', 'admin')

@ -13,7 +13,6 @@ from ....config import (
)
from ....containers import (
CleanupMode,
run_support_container,
)
@ -62,8 +61,6 @@ class HttptesterProvider(CloudProvider):
'http-test-container',
ports,
aliases=aliases,
allow_existing=True,
cleanup=CleanupMode.YES,
env={
KRB5_PASSWORD_ENV: generate_password(),
},

@ -8,7 +8,6 @@ from ....config import (
)
from ....containers import (
CleanupMode,
run_support_container,
)
@ -22,8 +21,6 @@ from . import (
class NiosProvider(CloudProvider):
"""Nios plugin. Sets up NIOS mock server for tests."""
DOCKER_SIMULATOR_NAME = 'nios-simulator'
# Default image to run the nios simulator.
#
# The simulator must be pinned to a specific version
@ -65,17 +62,18 @@ class NiosProvider(CloudProvider):
nios_port,
]
run_support_container(
descriptor = run_support_container(
self.args,
self.platform,
self.image,
self.DOCKER_SIMULATOR_NAME,
'nios-simulator',
ports,
allow_existing=True,
cleanup=CleanupMode.YES,
)
self._set_cloud_config('NIOS_HOST', self.DOCKER_SIMULATOR_NAME)
if not descriptor:
return
self._set_cloud_config('NIOS_HOST', descriptor.name)
def _setup_static(self) -> None:
raise NotImplementedError()

@ -16,7 +16,6 @@ from ....config import (
)
from ....containers import (
CleanupMode,
run_support_container,
wait_for_file,
)
@ -31,8 +30,6 @@ from . import (
class OpenShiftCloudProvider(CloudProvider):
"""OpenShift cloud provider plugin. Sets up cloud resources before delegation."""
DOCKER_CONTAINER_NAME = 'openshift-origin'
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args, config_extension='.kubeconfig')
@ -74,10 +71,8 @@ class OpenShiftCloudProvider(CloudProvider):
self.args,
self.platform,
self.image,
self.DOCKER_CONTAINER_NAME,
'openshift-origin',
ports,
allow_existing=True,
cleanup=CleanupMode.YES,
cmd=cmd,
)
@ -87,7 +82,7 @@ class OpenShiftCloudProvider(CloudProvider):
if self.args.explain:
config = '# Unknown'
else:
config = self._get_config(self.DOCKER_CONTAINER_NAME, 'https://%s:%s/' % (self.DOCKER_CONTAINER_NAME, port))
config = self._get_config(descriptor.name, 'https://%s:%s/' % (descriptor.name, port))
self._write_config(config)

@ -4,7 +4,6 @@ from __future__ import annotations
import atexit
import collections.abc as c
import contextlib
import enum
import json
import random
import time
@ -46,6 +45,7 @@ from .docker_util import (
get_docker_container_id,
get_docker_host_ip,
get_podman_host_ip,
get_session_container_name,
require_docker,
detect_host_properties,
)
@ -101,14 +101,6 @@ class HostType:
managed = 'managed'
class CleanupMode(enum.Enum):
"""How container cleanup should be handled."""
YES = enum.auto()
NO = enum.auto()
INFO = enum.auto()
def run_support_container(
args: EnvironmentConfig,
context: str,
@ -117,8 +109,7 @@ def run_support_container(
ports: list[int],
aliases: t.Optional[list[str]] = None,
start: bool = True,
allow_existing: bool = False,
cleanup: t.Optional[CleanupMode] = None,
cleanup: bool = True,
cmd: t.Optional[list[str]] = None,
env: t.Optional[dict[str, str]] = None,
options: t.Optional[list[str]] = None,
@ -128,6 +119,8 @@ def run_support_container(
Start a container used to support tests, but not run them.
Containers created this way will be accessible from tests.
"""
name = get_session_container_name(args, name)
if args.prime_containers:
docker_pull(args, image)
return None
@ -165,46 +158,13 @@ def run_support_container(
options.extend(['--ulimit', 'nofile=%s' % max_open_files])
support_container_id = None
if allow_existing:
try:
container = docker_inspect(args, name)
except ContainerNotFoundError:
container = None
if container:
support_container_id = container.id
if not container.running:
display.info('Ignoring existing "%s" container which is not running.' % name, verbosity=1)
support_container_id = None
elif not container.image:
display.info('Ignoring existing "%s" container which has the wrong image.' % name, verbosity=1)
support_container_id = None
elif publish_ports and not all(port and len(port) == 1 for port in [container.get_tcp_port(port) for port in ports]):
display.info('Ignoring existing "%s" container which does not have the required published ports.' % name, verbosity=1)
support_container_id = None
if not support_container_id:
docker_rm(args, name)
if args.dev_systemd_debug:
options.extend(('--env', 'SYSTEMD_LOG_LEVEL=debug'))
if support_container_id:
display.info('Using existing "%s" container.' % name)
running = True
existing = True
else:
display.info('Starting new "%s" container.' % name)
docker_pull(args, image)
support_container_id = run_container(args, image, name, options, create_only=not start, cmd=cmd)
running = start
existing = False
if cleanup is None:
cleanup = CleanupMode.INFO if existing else CleanupMode.YES
display.info('Starting new "%s" container.' % name)
docker_pull(args, image)
support_container_id = run_container(args, image, name, options, create_only=not start, cmd=cmd)
running = start
descriptor = ContainerDescriptor(
image,
@ -215,7 +175,6 @@ def run_support_container(
aliases,
publish_ports,
running,
existing,
cleanup,
env,
)
@ -694,8 +653,7 @@ class ContainerDescriptor:
aliases: list[str],
publish_ports: bool,
running: bool,
existing: bool,
cleanup: CleanupMode,
cleanup: bool,
env: t.Optional[dict[str, str]],
) -> None:
self.image = image
@ -706,7 +664,6 @@ class ContainerDescriptor:
self.aliases = aliases
self.publish_ports = publish_ports
self.running = running
self.existing = existing
self.cleanup = cleanup
self.env = env
self.details: t.Optional[SupportContainer] = None
@ -805,10 +762,8 @@ def wait_for_file(
def cleanup_containers(args: EnvironmentConfig) -> None:
"""Clean up containers."""
for container in support_containers.values():
if container.cleanup == CleanupMode.YES:
docker_rm(args, container.container_id)
elif container.cleanup == CleanupMode.INFO:
display.notice(f'Remember to run `{require_docker().command} rm -f {container.name}` when finished testing.')
if container.cleanup:
docker_rm(args, container.name)
def create_hosts_entries(context: dict[str, ContainerAccess]) -> list[str]:

@ -300,7 +300,7 @@ def detect_host_properties(args: CommonConfig) -> ContainerHostProperties:
options = ['--volume', '/sys/fs/cgroup:/probe:ro']
cmd = ['sh', '-c', ' && echo "-" && '.join(multi_line_commands)]
stdout = run_utility_container(args, f'ansible-test-probe-{args.session_name}', cmd, options)[0]
stdout = run_utility_container(args, 'ansible-test-probe', cmd, options)[0]
if args.explain:
return ContainerHostProperties(
@ -336,7 +336,7 @@ def detect_host_properties(args: CommonConfig) -> ContainerHostProperties:
cmd = ['sh', '-c', 'ulimit -Hn']
try:
stdout = run_utility_container(args, f'ansible-test-ulimit-{args.session_name}', cmd, options)[0]
stdout = run_utility_container(args, 'ansible-test-ulimit', cmd, options)[0]
except SubprocessError as ex:
display.warning(str(ex))
else:
@ -402,6 +402,11 @@ def detect_host_properties(args: CommonConfig) -> ContainerHostProperties:
return properties
def get_session_container_name(args: CommonConfig, name: str) -> str:
"""Return the given container name with the current test session name applied to it."""
return f'{name}-{args.session_name}'
def run_utility_container(
args: CommonConfig,
name: str,
@ -410,6 +415,8 @@ def run_utility_container(
data: t.Optional[str] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command using the ansible-test utility container, returning stdout and stderr."""
name = get_session_container_name(args, name)
options = options + [
'--name', name,
'--rm',

@ -99,7 +99,6 @@ from .ansible_util import (
)
from .containers import (
CleanupMode,
HostType,
get_container_database,
run_support_container,
@ -447,7 +446,7 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
@property
def label(self) -> str:
"""Label to apply to resources related to this profile."""
return f'{"controller" if self.controller else "target"}-{self.args.session_name}'
return f'{"controller" if self.controller else "target"}'
def provision(self) -> None:
"""Provision the host before delegation."""
@ -462,7 +461,7 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
ports=[22],
publish_ports=not self.controller, # connections to the controller over SSH are not required
options=init_config.options,
cleanup=CleanupMode.NO,
cleanup=False,
cmd=self.build_init_command(init_config, init_probe),
)
@ -838,7 +837,7 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
"""Check the cgroup v1 systemd hierarchy to verify it is writeable for our container."""
probe_script = (read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'check_systemd_cgroup_v1.sh'))
.replace('@MARKER@', self.MARKER)
.replace('@LABEL@', self.label))
.replace('@LABEL@', f'{self.label}-{self.args.session_name}'))
cmd = ['sh']
@ -853,7 +852,7 @@ class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[Do
def create_systemd_cgroup_v1(self) -> str:
"""Create a unique ansible-test cgroup in the v1 systemd hierarchy and return its path."""
self.cgroup_path = f'/sys/fs/cgroup/systemd/ansible-test-{self.label}'
self.cgroup_path = f'/sys/fs/cgroup/systemd/ansible-test-{self.label}-{self.args.session_name}'
# Privileged mode is required to create the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0.
# The mkdir command will fail with "Permission denied" otherwise.

@ -76,7 +76,7 @@ def run_pypi_proxy(args: EnvironmentConfig, targets_use_pypi: bool) -> None:
args=args,
context='__pypi_proxy__',
image=image,
name=f'pypi-test-container-{args.session_name}',
name='pypi-test-container',
ports=[port],
)

Loading…
Cancel
Save