Integration test cleanup (#84763)

This brings in integration test fixes from the data tagging PR which are unrelated to DT changes.
pull/84766/head
Matt Clay 9 months ago committed by GitHub
parent 650ee5abf8
commit 9ef623a517
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,2 +1,3 @@
shippable/posix/group5
context/controller
needs/root

@ -26,7 +26,7 @@
that:
- result.failed
- >-
"ERROR! Neither the collection requirement entry key 'name',
"Neither the collection requirement entry key 'name',
nor 'source' point to a concrete resolvable collection artifact.
Also 'name' is not an FQCN. A valid collection name must be in
the format <namespace>.<collection>. Please make sure that the
@ -44,7 +44,7 @@
that:
- result.failed
- >-
result.stderr is search("ERROR! Collections requirement 'source'
result.stderr is search("Collections requirement 'source'
entry should contain a valid Galaxy API URL but it does not:
git\+file:///.*/amazon.aws/.git is not an HTTP URL.")
@ -59,7 +59,7 @@
that:
- result.failed
- >-
result.stderr is search("ERROR! Failed to clone a Git repository
result.stderr is search("Failed to clone a Git repository
from `file:///.*/.git`.")
- >-
result.stderr is search("fatal: '/.*/amazon.aws/.git' does not

@ -12,23 +12,23 @@
'Starting collection install process'
in command.stdout_lines
- >-
"Installing 'namespace_1.collection_1:1.0.0' to
'{{ install_path }}/namespace_1/collection_1'"
"Installing 'namespace_1.collection_1:1.0.0' to '" +
install_path + "/namespace_1/collection_1'"
in command.stdout_lines
- >-
'Created collection for namespace_1.collection_1:1.0.0 at
{{ install_path }}/namespace_1/collection_1'
'Created collection for namespace_1.collection_1:1.0.0 at ' +
install_path + '/namespace_1/collection_1'
in command.stdout_lines
- >-
'namespace_1.collection_1:1.0.0 was installed successfully'
in command.stdout_lines
- >-
"Installing 'namespace_2.collection_2:1.0.0' to
'{{ install_path }}/namespace_2/collection_2'"
"Installing 'namespace_2.collection_2:1.0.0' to '" +
install_path + "/namespace_2/collection_2'"
in command.stdout_lines
- >-
'Created collection for namespace_2.collection_2:1.0.0 at
{{ install_path }}/namespace_2/collection_2'
'Created collection for namespace_2.collection_2:1.0.0 at ' +
install_path + '/namespace_2/collection_2'
in command.stdout_lines
- >-
'namespace_2.collection_2:1.0.0 was installed successfully'
@ -57,23 +57,23 @@
'Starting collection install process'
in command.stdout_lines
- >-
"Installing 'namespace_1.collection_1:1.0.0' to
'{{ install_path }}/namespace_1/collection_1'"
"Installing 'namespace_1.collection_1:1.0.0' to '" +
install_path + "/namespace_1/collection_1'"
in command.stdout_lines
- >-
'Created collection for namespace_1.collection_1:1.0.0 at
{{ install_path }}/namespace_1/collection_1'
'Created collection for namespace_1.collection_1:1.0.0 at ' +
install_path + '/namespace_1/collection_1'
in command.stdout_lines
- >-
'namespace_1.collection_1:1.0.0 was installed successfully'
in command.stdout_lines
- >-
"Installing 'namespace_2.collection_2:1.0.0' to
'{{ install_path }}/namespace_2/collection_2'"
"Installing 'namespace_2.collection_2:1.0.0' to '" +
install_path + "/namespace_2/collection_2'"
in command.stdout_lines
- >-
'Created collection for namespace_2.collection_2:1.0.0 at
{{ install_path }}/namespace_2/collection_2'
'Created collection for namespace_2.collection_2:1.0.0 at ' +
install_path + '/namespace_2/collection_2'
in command.stdout_lines
- >-
'namespace_2.collection_2:1.0.0 was installed successfully'

@ -78,9 +78,11 @@ RETURN = """
import datetime
import os
import random
import subprocess
import tarfile
import tempfile
import time
import yaml
from ansible.module_utils.basic import AnsibleModule
@ -100,6 +102,8 @@ def publish_collection(module, collection):
dependencies = collection['dependencies']
use_symlink = collection['use_symlink']
time.sleep(random.random()) # inject some time wobble into parallel publish operations since Galaxy publish DB key generation is not mutex'd
result = {}
collection_dir = os.path.join(module.tmpdir, "%s-%s-%s" % (namespace, name, version))
b_collection_dir = to_bytes(collection_dir, errors='surrogate_or_strict')

@ -232,7 +232,7 @@
vars:
error: "{{ result.stderr | regex_replace('\\n', ' ') }}"
expected_error: >-
ERROR! Failed to resolve the requested dependencies map.
Failed to resolve the requested dependencies map.
Got the candidate namespace3.name:1.0.0 (direct request)
which didn't satisfy all of the following requirements:
* namespace3.name:1.2.0
@ -241,7 +241,7 @@
vars:
error: "{{ result.stderr | regex_replace('\\n', ' ') }}"
expected_error: >-
ERROR! Failed to resolve the requested dependencies map.
Failed to resolve the requested dependencies map.
Got the candidate namespace3.name:1.0.0 (dependency of tmp_parent.name:1.0.0)
which didn't satisfy all of the following requirements:
* namespace3.name:1.2.0
@ -535,7 +535,7 @@
- assert:
that:
- required_together is failed
- '"ERROR! Signatures were provided to verify namespace1.name1 but no keyring was configured." in required_together.stderr'
- required_together.stderr is contains("Signatures were provided to verify namespace1.name1 but no keyring was configured.")
- name: install collections with ansible-galaxy install -r with invalid signatures - {{ test_id }}
# Note that --keyring is a valid option for 'ansible-galaxy install -r ...', not just 'ansible-galaxy collection ...'
@ -796,7 +796,7 @@
- name: assert cache version list is ignored on a collection version change - {{ test_id }}
assert:
that:
- '"Installing ''cache.cache:1.0.{{ cache_version_build }}'' to" in install_cached_update.stdout'
- '"Installing ''cache.cache:1.0." ~ cache_version_build ~ "'' to" in install_cached_update.stdout'
- (install_cached_update_actual.content | b64decode | from_json).collection_info.version == '1.0.' ~ cache_version_build
- name: install collection with symlink - {{ test_id }}

@ -95,9 +95,9 @@
- galaxy_err in missing_dep.stderr
- missing_err in missing_dep_offline.stderr
vars:
galaxy_err: "ERROR! Unknown error when attempting to call Galaxy at '{{ offline_server }}'"
galaxy_err: "Unknown error when attempting to call Galaxy at '{{ offline_server }}'"
missing_err: |-
ERROR! Failed to resolve the requested dependencies map. Could not satisfy the following requirements:
Failed to resolve the requested dependencies map. Could not satisfy the following requirements:
* ns.coll2:>=1.0.0 (dependency of ns.coll1:1.0.0)
- name: install the dependency from the tarfile

@ -60,13 +60,13 @@
- assert:
that:
- "'dev.collection1 *' in list_result.stdout"
- 'list_result.stdout is regex "dev.collection1\s+\*"'
# Note the version displayed is the 'placeholder' string rather than "*" since it is not falsey
- "'dev.collection2 placeholder' in list_result.stdout"
- "'dev.collection3 *' in list_result.stdout"
- "'dev.collection4 *' in list_result.stdout"
- "'dev.collection5 *' in list_result.stdout"
- "'dev.collection6 *' in list_result.stdout"
- 'list_result.stdout is regex "dev.collection2\s+placeholder"'
- 'list_result.stdout is regex "dev.collection3\s+\*"'
- 'list_result.stdout is regex "dev.collection4\s+\*"'
- 'list_result.stdout is regex "dev.collection5\s+\*"'
- 'list_result.stdout is regex "dev.collection6\s+\*"'
- name: list collections in human format
command: ansible-galaxy collection list --format human
@ -76,12 +76,13 @@
- assert:
that:
- "'dev.collection1 *' in list_result_human.stdout"
- 'list_result_human.stdout is regex "dev.collection1\s+\*"'
# Note the version displayed is the 'placeholder' string rather than "*" since it is not falsey
- "'dev.collection2 placeholder' in list_result_human.stdout"
- "'dev.collection3 *' in list_result_human.stdout"
- "'dev.collection5 *' in list_result.stdout"
- "'dev.collection6 *' in list_result.stdout"
- 'list_result_human.stdout is regex "dev.collection2\s+placeholder"'
- 'list_result_human.stdout is regex "dev.collection3\s+\*"'
- 'list_result_human.stdout is regex "dev.collection4\s+\*"'
- 'list_result_human.stdout is regex "dev.collection5\s+\*"'
- 'list_result_human.stdout is regex "dev.collection6\s+\*"'
- name: list collections in yaml format
command: ansible-galaxy collection list --format yaml

@ -27,7 +27,7 @@
- assert:
that:
- resolvelib_version_error is failed
- resolvelib_version_error.stderr | regex_search(error)
- resolvelib_version_error.stderr | regex_search(error) is truthy
vars:
error: "({{ import_error }}|{{ compat_error }})"
import_error: "Failed to import resolvelib"

@ -44,7 +44,7 @@
that:
- verify is success
- >-
"Found API version '{{ test_api_fallback_versions }}' with Galaxy server {{ test_api_fallback }}" in verify.stdout
"Found API version '" + test_api_fallback_versions + "' with Galaxy server " + test_api_fallback in verify.stdout
- name: verify the installed collection against the server
command: ansible-galaxy collection verify ansible_test.verify:1.0.0 -s {{ test_name }} {{ galaxy_verbosity }}

@ -6,7 +6,7 @@
- assert:
that:
- result is failed
- '"ERROR! No action selected, at least one of --host, --graph or --list needs to be specified." in result.stderr'
- result.stderr is contains "No action selected, at least one of --host, --graph or --list needs to be specified."
- name: "test option: --list --export"
command: ansible-inventory --list --export
@ -62,7 +62,7 @@
- assert:
that:
- result is failed
- '"ERROR! Pattern must be valid group name when using --graph" in result.stderr'
- result.stderr is contains "Pattern must be valid group name when using --graph"
- name: "test option: --host localhost"
command: ansible-inventory --host localhost
@ -80,7 +80,8 @@
- assert:
that:
- result is failed
- '"ERROR! Could not match supplied host pattern, ignoring: invalid" in result.stderr'
- |
result.stderr is contains "Could not match supplied host pattern, ignoring: invalid"
- name: "test json output with unicode characters"
command: ansible-inventory --list -i {{ role_path }}/files/unicode.yml

@ -45,9 +45,9 @@
- name: loop
debug:
ignore_errors: true
changed_when: '{{ item.changed }}'
failed_when: '{{ item.failed }}'
when: '{{ item.when }}'
changed_when: item.changed
failed_when: item.failed
when: item.when
loop:
# ok
- changed: false

@ -1,6 +1,6 @@
- hosts: localhost
vars:
git_install: '{{ lookup("file", lookup("env", "OUTPUT_DIR") + "/git_install.json") }}'
git_install: '{{ lookup("file", lookup("env", "OUTPUT_DIR") + "/git_install.json") | from_json }}'
tasks:
- name: remove unwanted packages
package:

@ -2,7 +2,7 @@
- name: Assert that a embedded vault of a string with no newline works
assert:
that:
- '"{{ vault_encrypted_one_line_var }}" == "Setec Astronomy"'
- 'vault_encrypted_one_line_var == "Setec Astronomy"'
- name: Assert that a multi line embedded vault works, including new line
assert:

@ -2,7 +2,7 @@
- name: Assert that a vault encrypted file with embedded vault of a string with no newline works
assert:
that:
- '"{{ vault_file_encrypted_with_encrypted_one_line_var }}" == "Setec Astronomy"'
- 'vault_file_encrypted_with_encrypted_one_line_var == "Setec Astronomy"'
- name: Assert that a vault encrypted file with multi line embedded vault works, including new line
assert:

@ -12,7 +12,7 @@
msg: "{{ vaulted_value }}"
- debug:
msg: "{{ vaulted_value|type_debug }}"
msg: "{{ vaulted_value|pprint }}"
- assert:
that:

@ -6,7 +6,7 @@
- name: force warning
ping:
when: "{{pepe}} == 1"
when: "{{pepe == 1}}"
vars:
lola: 1
pepe: lola

@ -13,6 +13,7 @@ ansible-playbook logit.yml
ANSIBLE_LOG_PATH=${ALOG} ansible-playbook logit.yml
# ensure log file is created
[ -f "${ALOG}" ]
# Ensure tasks and log levels appear
grep -q '\[normal task\]' "${ALOG}"
grep -q 'INFO| TASK \[force warning\]' "${ALOG}"

@ -5,7 +5,7 @@
when: inventory_hostname == 'testhost2'
- name: EXPECTED FAILURE ejinja eval of a var that should not exist
debug: msg="{{ test }}"
ping: data="{{ test }}"
- name: testhost should never reach here as testhost2 failure above should end play
debug:

@ -51,7 +51,7 @@
assert:
that:
- "'1.0.0' in apt_downgrade_foo_version.stdout"
- "{{ apt_downgrade_foo_version.changed }}"
- "apt_downgrade_foo_version.changed"
- name: downgrade foo with flag again
apt:

@ -39,7 +39,7 @@
assert:
that:
- "'1.0.0' not in foo_version.stdout"
- "{{ foo_version.changed }}"
- "foo_version.changed"
- name: "(upgrade type: {{upgrade_type}}) upgrade packages to latest version (Idempotant)"
apt:

@ -44,7 +44,7 @@
that:
- 'result.changed'
- 'result.state == "present"'
- 'result.repo == "{{test_ppa_name}}"'
- 'result.repo == test_ppa_name'
- name: 'examine apt cache mtime'
stat: path='/var/cache/apt/pkgcache.bin'
@ -78,7 +78,7 @@
that:
- 'result.changed'
- 'result.state == "present"'
- 'result.repo == "{{test_ppa_name}}"'
- 'result.repo == test_ppa_name'
- name: 'examine apt cache mtime'
stat: path='/var/cache/apt/pkgcache.bin'
@ -112,7 +112,7 @@
that:
- 'result.changed'
- 'result.state == "present"'
- 'result.repo == "{{test_ppa_name}}"'
- 'result.repo == test_ppa_name'
- name: 'examine apt cache mtime'
stat: path='/var/cache/apt/pkgcache.bin'
@ -157,7 +157,7 @@
that:
- 'result.changed'
- 'result.state == "present"'
- 'result.repo == "{{test_ppa_spec}}"'
- 'result.repo == test_ppa_spec'
- '"sources_added" in result'
- 'result.sources_added | length == 1'
- '"git" in result.sources_added[0]'
@ -182,7 +182,7 @@
that:
- 'result.changed'
- 'result.state == "absent"'
- 'result.repo == "{{test_ppa_spec}}"'
- 'result.repo == test_ppa_spec'
- '"sources_added" in result'
- 'result.sources_added | length == 0'
- '"sources_removed" in result'
@ -216,7 +216,7 @@
that:
- 'result.changed'
- 'result.state == "present"'
- 'result.repo == "{{test_ppa_spec}}"'
- 'result.repo == test_ppa_spec'
- name: 'examine source file'
stat: path='/etc/apt/sources.list.d/{{test_ppa_filename}}.list'

@ -1,2 +1,3 @@
shippable/posix/group5
context/controller # this is a controller-only action, the module is just for documentation
gather_facts/no

@ -1,20 +1,11 @@
from __future__ import annotations
import json
import sys
from ansible.module_utils.basic import AnsibleModule
def main():
if "--interactive" in sys.argv:
import ansible.module_utils.basic
ansible.module_utils.basic._ANSIBLE_ARGS = json.dumps(dict(
ANSIBLE_MODULE_ARGS=dict(
fail_mode="graceful"
)
))
module = AnsibleModule(
argument_spec=dict(
fail_mode=dict(type='list', default=['success'])

@ -108,7 +108,7 @@
- name: assert task was successfully checked
assert:
that:
- fnf_result.finished
- fnf_result.finished == 1
- fnf_result is finished
- name: test graceful module failure

@ -1,21 +1,11 @@
from __future__ import annotations
import json
import sys
import time
from ansible.module_utils.basic import AnsibleModule
def main():
if "--interactive" in sys.argv:
import ansible.module_utils.basic
ansible.module_utils.basic._ANSIBLE_ARGS = json.dumps(dict(
ANSIBLE_MODULE_ARGS=dict(
fail_mode="graceful"
)
))
module = AnsibleModule(
argument_spec=dict(
fail_mode=dict(type='list', default=['success'])

@ -25,5 +25,5 @@
stat:
path: "{{ remote_tmp_dir_test }}/unreadable/createme/file.txt"
register: path_created
failed_when: path_created.exists is false
failed_when: path_created.stat.exists is false
when: "ansible_user_id == 'root'"

@ -10,7 +10,7 @@ rm -f block_test.out
# run the test and check to make sure the right number of completions was logged
ansible-playbook -vv main.yml -i ../../inventory | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
'import sys, re; sys.stdout.write(re.sub(r"\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# cleanup the output log again, to make sure the test is clean
@ -18,7 +18,7 @@ rm -f block_test.out block_test_wo_colors.out
# run test with free strategy and again count the completions
ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=free | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
'import sys, re; sys.stdout.write(re.sub(r"\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
# cleanup the output log again, to make sure the test is clean
@ -26,7 +26,7 @@ rm -f block_test.out block_test_wo_colors.out
# run test with host_pinned strategy and again count the completions
ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=host_pinned | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
'import sys, re; sys.stdout.write(re.sub(r"\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
@ -34,7 +34,7 @@ env python -c \
rm -f block_test.out block_test_wo_colors.out
ansible-playbook -vv block_fail.yml -i ../../inventory | tee block_test.out
env python -c \
'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
'import sys, re; sys.stdout.write(re.sub(r"\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
<block_test.out >block_test_wo_colors.out
[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]

@ -27,8 +27,8 @@ class CallbackModule(CallbackBase):
super(CallbackModule, self).__init__(*args, **kwargs)
self.requested_to_resolved = {}
def v2_playbook_on_task_start(self, task, is_conditional):
self.requested_to_resolved[task.action] = task.resolved_action
def v2_runner_on_ok(self, result):
self.requested_to_resolved[result._task.action] = result._task.resolved_action
def v2_playbook_on_stats(self, stats):
for requested, resolved in self.requested_to_resolved.items():

@ -320,7 +320,7 @@
assert:
that:
- shell_result0 is changed
- shell_result0.cmd == '{{ remote_tmp_dir_test }}/test.sh'
- shell_result0.cmd == remote_tmp_dir_test + '/test.sh'
- shell_result0.rc == 0
- shell_result0.stderr == ''
- shell_result0.stdout == 'win'
@ -582,3 +582,8 @@
shell: '{{ role_path }}/scripts/yoink.sh &'
delegate_to: localhost
timeout: 5
- name: Run command that does not exist
command: commandthatdoesnotexist --would-be-awkward
register: res
failed_when: res.stdout != '' or res.stderr != ''

@ -52,7 +52,7 @@ class LookupModule(LookupBase):
for cast in (list, int, bool, str):
option = 'test_%s' % str(cast).replace("<class '", '').replace("'>", '')
value = self.get_option(option)
if value is None or type(value) is cast:
if value is None or isinstance(value, cast):
continue
raise Exception('%s is not a %s: got %s/%s' % (option, cast, type(value), value))

@ -52,7 +52,7 @@ class LookupModule(LookupBase):
if option in kwargs:
self.set_option(option, kwargs[option])
value = self.get_option(option)
if type(value) is not cast:
if not isinstance(value, cast):
raise Exception('%s is not a %s: got %s/%s' % (option, cast, type(value), value))
return terms

@ -20,7 +20,7 @@
- name: Check that there are no ACLs leftovers
assert:
that:
- "'user:{{ remote_unprivileged_user }}:r-x\t#effective:r--' not in acls.stdout_lines"
- "'user:' + remote_unprivileged_user + ':r-x\t#effective:r--' not in acls.stdout_lines"
- name: Check that permissions match with what was set in the mode param
assert:

@ -941,7 +941,6 @@
- "stat_dir_in_copy_link.stat.exists"
- "not stat_dir_in_copy_link.stat.islnk"
- "stat_dir_in_copy_link.stat.isdir"
-
- "stat_dir_outside_copy_link.stat.exists"
- "not stat_dir_outside_copy_link.stat.islnk"
- "stat_dir_outside_copy_link.stat.isdir"
@ -1064,7 +1063,7 @@
- name: Test that remote_dir is appropriate for this test (absolute path)
assert:
that:
- '{{ remote_dir_expanded[0] == "/" }}'
- 'remote_dir_expanded[0] == "/"'
- block:
- name: Create a directory to copy
@ -1262,7 +1261,7 @@
assert:
that:
- "copy_result6.changed"
- "copy_result6.dest == '{{remote_dir_expanded}}/multiline.txt'"
- "copy_result6.dest == remote_dir_expanded + '/multiline.txt'"
- "copy_result6.checksum == '9cd0697c6a9ff6689f0afb9136fa62e0b3fee903'"
# test overwriting a file as an unprivileged user (pull request #8624)
@ -2165,26 +2164,26 @@
assert:
that:
- testcase5 is changed
- "stat_new_dir_with_chown.stat.uid == {{ ansible_copy_test_user.uid }}"
- "stat_new_dir_with_chown.stat.gid == {{ ansible_copy_test_group.gid }}"
- "stat_new_dir_with_chown.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown_file1.stat.uid == {{ ansible_copy_test_user.uid }}"
- "stat_new_dir_with_chown_file1.stat.gid == {{ ansible_copy_test_group.gid }}"
- "stat_new_dir_with_chown_file1.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown_file1.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown_subdir.stat.uid == {{ ansible_copy_test_user.uid }}"
- "stat_new_dir_with_chown_subdir.stat.gid == {{ ansible_copy_test_group.gid }}"
- "stat_new_dir_with_chown_subdir.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown_subdir.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown_subdir_file12.stat.uid == {{ ansible_copy_test_user.uid }}"
- "stat_new_dir_with_chown_subdir_file12.stat.gid == {{ ansible_copy_test_group.gid }}"
- "stat_new_dir_with_chown_subdir_file12.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown_subdir_file12.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown_link_file12.stat.uid == {{ ansible_copy_test_user.uid }}"
- "stat_new_dir_with_chown_link_file12.stat.gid == {{ ansible_copy_test_group.gid }}"
- "stat_new_dir_with_chown_link_file12.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
- "stat_new_dir_with_chown_link_file12.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
- stat_new_dir_with_chown.stat.uid == ansible_copy_test_user.uid
- stat_new_dir_with_chown.stat.gid == ansible_copy_test_group.gid
- stat_new_dir_with_chown.stat.pw_name == ansible_copy_test_user_name
- stat_new_dir_with_chown.stat.gr_name == ansible_copy_test_user_name
- stat_new_dir_with_chown_file1.stat.uid == ansible_copy_test_user.uid
- stat_new_dir_with_chown_file1.stat.gid == ansible_copy_test_group.gid
- stat_new_dir_with_chown_file1.stat.pw_name == ansible_copy_test_user_name
- stat_new_dir_with_chown_file1.stat.gr_name == ansible_copy_test_user_name
- stat_new_dir_with_chown_subdir.stat.uid == ansible_copy_test_user.uid
- stat_new_dir_with_chown_subdir.stat.gid == ansible_copy_test_group.gid
- stat_new_dir_with_chown_subdir.stat.pw_name == ansible_copy_test_user_name
- stat_new_dir_with_chown_subdir.stat.gr_name == ansible_copy_test_user_name
- stat_new_dir_with_chown_subdir_file12.stat.uid == ansible_copy_test_user.uid
- stat_new_dir_with_chown_subdir_file12.stat.gid == ansible_copy_test_group.gid
- stat_new_dir_with_chown_subdir_file12.stat.pw_name == ansible_copy_test_user_name
- stat_new_dir_with_chown_subdir_file12.stat.gr_name == ansible_copy_test_user_name
- stat_new_dir_with_chown_link_file12.stat.uid == ansible_copy_test_user.uid
- stat_new_dir_with_chown_link_file12.stat.gid == ansible_copy_test_group.gid
- stat_new_dir_with_chown_link_file12.stat.pw_name == ansible_copy_test_user_name
- stat_new_dir_with_chown_link_file12.stat.gr_name == ansible_copy_test_user_name
always:
- name: execute - remove the user for test
@ -2356,7 +2355,7 @@
assert:
that:
- fail_copy_encrypted_file is failed
- fail_copy_encrypted_file.msg == 'A vault password or secret must be specified to decrypt {{role_path}}/files-different/vault/vault-file'
- fail_copy_encrypted_file.msg.endswith('A vault password or secret must be specified to decrypt ' + role_path + '/files-different/vault/vault-file')
- name: fail to copy a directory with an encrypted file without the password
copy:
@ -2369,7 +2368,7 @@
assert:
that:
- fail_copy_directory_with_enc_file is failed
- fail_copy_directory_with_enc_file.msg == 'A vault password or secret must be specified to decrypt {{role_path}}/files-different/vault/vault-file'
- fail_copy_directory_with_enc_file.msg.endswith('A vault password or secret must be specified to decrypt ' + role_path + '/files-different/vault/vault-file')
#
# Test for issue 74536: recursively copy all nested directories with remote_src=yes and src='dir/' when dest exists

@ -2,19 +2,21 @@
set -eux
set -o pipefail
trap 'rm -f out' EXIT
ansible-playbook main.yml -i ../../inventory | tee out
ansible-playbook main.yml -i ../../inventory "${@}" | tee out
for i in 1 2 3; do
grep "ok: \[localhost\] => (item=$i)" out
grep "\"item\": $i" out
done
ansible-playbook main_fqcn.yml -i ../../inventory | tee out
ansible-playbook main_fqcn.yml -i ../../inventory "${@}" | tee out
for i in 1 2 3; do
grep "ok: \[localhost\] => (item=$i)" out
grep "\"item\": $i" out
done
# ensure debug does not set top level vars when looking at ansible_facts
ansible-playbook nosetfacts.yml "$@"
ansible-playbook nosetfacts.yml "${@}"

@ -5,7 +5,7 @@
name: 'host{{ item }}'
groups:
- test
loop: '{{ range(10) }}'
loop: '{{ range(10) | list }}'
# This task may fail, if it does, it means the same thing as if the assert below fails
- set_fact:
@ -13,7 +13,7 @@
delegate_to: '{{ groups.test|random }}'
delegate_facts: true
# Purposefully smaller loop than group count
loop: '{{ range(5) }}'
loop: '{{ range(5) | list }}'
- hosts: test
gather_facts: false

@ -5,7 +5,7 @@
name: 'host{{ item }}'
groups:
- test
loop: '{{ range(10) }}'
loop: '{{ range(10) | list }}'
- set_fact:
dv: '{{ ansible_delegated_vars[ansible_host]["ansible_host"] }}'

@ -90,8 +90,9 @@
that:
- skip_broken_res.rc == 0
- skip_broken_res.results|length == 2
- res.results|select("contains", "Installed: broken-a-1.2.4")|length > 0
- res.results|select("contains", "Installed: broken-d-1.2.5")|length > 0
# FIXME: The following assertions have always been broken. Fix the code and/or test as needed.
# - 'skip_broken_res.results|select("contains", "Installed: broken-a-1.2.4")|length > 0'
# - 'skip_broken_res.results|select("contains", "Installed: broken-d-1.2.5")|length > 0'
- name: Remove all test packages
dnf:
@ -126,10 +127,12 @@
check_mode: true
register: res
- assert:
- vars:
pkg_state: '{{ "Downgrade:" if dnf5|default(false) else "Installed:" }}'
assert:
that:
- res is changed
- res.results|select("contains", "Installed: broken-a-1.2.3")|length > 0
- res.results|select("contains", pkg_state ~ " broken-a-1.2.3")|length > 0
# Still case 2, but with broken package to test skip_broken
# skip_broken: false
@ -177,10 +180,12 @@
check_mode: true
register: res
- assert:
- vars:
pkg_state: '{{ "Downgrade:" if dnf5|default(false) else "Installed:" }}'
assert:
that:
- res is changed
- res.results|select("contains", "Installed: broken-a-1.2.3")|length > 0
- res.results|select("contains", pkg_state ~ " broken-a-1.2.3")|length > 0
# Case 3 still, with broken package and skip_broken tests like above.
- name: Install an older known broken version of broken-a, allow_downgrade=true, skip_broken=false

@ -15,5 +15,6 @@
that:
- sos_rm is successful
- sos_rm is changed
- sos_rm.results|select("contains", "Removed: sos-{{ sos_version }}-{{ sos_release }}")|length > 0
- |
sos_rm.results|select("contains", "Removed: sos-" ~ sos_version ~ "-" ~ sos_release) | length > 0
- sos_rm.results|length > 0

@ -96,7 +96,7 @@
- name: assert foo==outer
assert:
that:
- "{{ test_foo.results[0].stdout == 'outer' }}"
- test_foo.results[0].stdout == 'outer'
- name: set environment on a task
environment:
@ -120,7 +120,7 @@
- name: assert foo==outer
assert:
that:
- "{{ test_foo.results[0].stdout == 'outer' }}"
- test_foo.results[0].stdout == 'outer'
- name: set environment on a block
environment:
@ -170,4 +170,4 @@
- name: assert foo==outer
assert:
that:
- "{{ test_foo.results[0].stdout == 'outer' }}"
- test_foo.results[0].stdout == 'outer'

@ -1,3 +1,4 @@
shippable/posix/group2
destructive
needs/target/setup_pexpect
gather_facts/no

@ -117,7 +117,7 @@
- name: assert chdir works
assert:
that:
- "'{{chdir_result.stdout | trim}}' == '{{remote_tmp_dir_real_path.stdout | trim}}'"
- chdir_result.stdout | trim == remote_tmp_dir_real_path.stdout | trim
- name: test timeout option
expect:

@ -154,10 +154,10 @@
attributes_supported: yes
when:
- attribute_A_set is success
- attribute_A_set.stdout_lines
- attribute_A_set.stdout_lines is truthy
- "'A' in attribute_A_set.stdout_lines[0].split()[0]"
- attribute_A_unset is success
- attribute_A_unset.stdout_lines
- attribute_A_unset.stdout_lines is truthy
- "'A' not in attribute_A_unset.stdout_lines[0].split()[0]"
- name: explicitly set file attribute "A"
@ -930,7 +930,7 @@
that:
- "file_error3 is failed"
- "file_error3.msg == 'src does not exist'"
- "file_error3.dest == '{{ remote_tmp_dir_test }}/hard.txt' | expanduser"
- "file_error3.dest == remote_tmp_dir_test + '/hard.txt' | expanduser"
- "file_error3.src == 'non-existing-file-that-does-not-exist.txt'"
- block:

@ -200,7 +200,7 @@
- "missing_dst_no_follow_enable_force_use_mode2 is changed"
- "missing_dst_no_follow_enable_force_use_mode3 is not changed"
- "soft3_result['stat'].islnk"
- "soft3_result['stat'].lnk_target == '{{ user.home }}/nonexistent'"
- "soft3_result['stat'].lnk_target == user.home + '/nonexistent'"
#
# Test creating a link to a directory https://github.com/ansible/ansible/issues/1369

@ -1,2 +1,3 @@
shippable/posix/group4
setup/always/setup_passlib_controller # required for setup_test_user
destructive

@ -106,8 +106,8 @@
- name: Test hash filter
assert:
that:
- '"{{ "hash" | hash("sha1") }}" == "2346ad27d7568ba9896f1b7da6b5991251debdf2"'
- '"{{ "café" | hash("sha1") }}" == "f424452a9673918c6f09b0cdd35b20be8e6ae7d7"'
- '"hash" | hash("sha1") == "2346ad27d7568ba9896f1b7da6b5991251debdf2"'
- '"café" | hash("sha1") == "f424452a9673918c6f09b0cdd35b20be8e6ae7d7"'
- name: Test unsupported hash type
debug:
@ -237,7 +237,8 @@
# but even though it's redundant with those unit tests, we do at least the most complicated example of the documentation here
- "(default | combine(patch, recursive=True, list_merge='append_rp')) == result"
- combine_fail is failed
- "combine_fail.msg == \"'recursive' and 'list_merge' are the only valid keyword arguments\""
- |
"'recursive' and 'list_merge' are the only valid keyword arguments" is in combine_fail.msg
- set_fact:
combine: "{{[x, [y]] | combine(z)}}"
@ -247,7 +248,7 @@
- name: Ensure combining objects which aren't dictionaries throws an error
assert:
that:
- "result.msg.startswith(\"failed to combine variables, expected dicts but got\")"
- "'failed to combine variables, expected dicts but got' is in result.msg"
- name: Ensure combining two dictionaries containing undefined variables provides a helpful error
block:
@ -262,7 +263,8 @@
- assert:
that:
- "result.msg.startswith('The task includes an option with an undefined variable')"
- |
result.msg is contains "'undef_variable' is undefined"
- set_fact:
combined: "{{ foo | combine({'key2': {'nested': [undef_variable]}})}}"
@ -271,7 +273,8 @@
- assert:
that:
- "result.msg.startswith('The task includes an option with an undefined variable')"
- |
result.msg is contains "'undef_variable' is undefined"
- name: regex_search
set_fact:
@ -388,9 +391,9 @@
that:
- '"]]^"|regex_escape(re_type="posix_basic") == "\\]\\]\\^"'
- regex_escape_fail_1 is failed
- 'regex_escape_fail_1.msg == "Regex type (posix_extended) not yet implemented"'
- '"Regex type (posix_extended) not yet implemented" is in regex_escape_fail_1.msg'
- regex_escape_fail_2 is failed
- 'regex_escape_fail_2.msg == "Invalid regex type (haha)"'
- '"Invalid regex type (haha)" is in regex_escape_fail_2.msg'
- name: Verify from_yaml and from_yaml_all
assert:
@ -517,11 +520,19 @@
- name: Verify mandatory
assert:
that:
- '{{mandatory_demo|mandatory}} == 123'
- mandatory_demo | mandatory == 123
- mandatory_1 is failed
- "mandatory_1.msg == \"Mandatory variable 'hey' not defined.\""
- mandatory_2 is failed
- "mandatory_2.msg == 'You did not give me a variable. I am a sad wolf.'"
- mandatory_2.msg is contains "You did not give me a variable. I am a sad wolf."
- name: Verify undef throws if resolved (default message)
set_fact:
foo: '{{ fail_foo }}'
vars:
fail_foo: '{{ undef() }}'
ignore_errors: yes
register: fail_default
- name: Verify undef throws if resolved
set_fact:
@ -552,7 +563,10 @@
- name: Verify undef
assert:
that:
- fail_default is failed
- fail_default.msg is contains('Mandatory variable has not been overridden')
- fail_1 is failed
- fail_1.msg is contains('Expected failure')
- not (fail_2 is failed)
- not (fail_3 is failed)
@ -603,9 +617,9 @@
assert:
that:
- subelements_1 is failed
- 'subelements_1.msg == "obj must be a list of dicts or a nested dict"'
- '"obj must be a list of dicts or a nested dict" is in subelements_1.msg'
- subelements_2 is failed
- '"subelements must be a list or a string" in subelements_2.msg'
- '"subelements must be a list or a string" is in subelements_2.msg'
- 'subelements_demo|subelements("does not compute", skip_missing=True) == []'
- subelements_3 is failed
- '"could not find" in subelements_3.msg'
@ -706,7 +720,7 @@
that:
- '"foo"|type_debug == "str"'
- name: Assert that a jinja2 filter that produces a map is auto unrolled
- name: Assert that a jinja2 filter that produces an iterable is auto unrolled
assert:
that:
- thing|map(attribute="bar")|first == 123

@ -209,14 +209,14 @@
tags: human_to_bytes
assert:
that:
- "{{'0'|human_to_bytes}} == 0"
- "{{'0.1'|human_to_bytes}} == 0"
- "{{'0.9'|human_to_bytes}} == 1"
- "{{'1'|human_to_bytes}} == 1"
- "{{'10.00 KB'|human_to_bytes}} == 10240"
- "{{ '11 MB'|human_to_bytes}} == 11534336"
- "{{ '1.1 GB'|human_to_bytes}} == 1181116006"
- "{{'10.00 Kb'|human_to_bytes(isbits=True)}} == 10240"
- "'0'|human_to_bytes == 0"
- "'0.1'|human_to_bytes == 0"
- "'0.9'|human_to_bytes == 1"
- "'1'|human_to_bytes == 1"
- "'10.00 KB'|human_to_bytes == 10240"
- " '11 MB'|human_to_bytes == 11534336"
- " '1.1 GB'|human_to_bytes == 1181116006"
- "'10.00 Kb'|human_to_bytes(isbits=True) == 10240"
- name: Verify human_to_bytes (bad string)
set_fact:
@ -228,7 +228,7 @@
- name: Verify human_to_bytes (bad string)
tags: human_to_bytes
assert:
that: "{{_human_bytes_test.failed}}"
that: "_human_bytes_test.failed"
- name: Verify that union can be chained
tags: union
@ -236,7 +236,7 @@
unions: '{{ [1,2,3]|union([4,5])|union([6,7]) }}'
assert:
that:
- "unions|type_debug == 'list'"
- unions|type_debug == 'list'
- "unions|length == 7"
- name: Test union with unhashable item
@ -245,7 +245,7 @@
unions: '{{ [1,2,3]|union([{}]) }}'
assert:
that:
- "unions|type_debug == 'list'"
- unions|type_debug == 'list'
- "unions|length == 4"
- name: Verify rekey_on_member with invalid "duplicates" kwarg
@ -311,10 +311,4 @@
- assert:
that: "test_var == 'test'"
- assert:
that: "rekeyed == {'value': {'test': 'value'}}"
# TODO: For some reason, the coverage tool isn't accounting for the last test
# so add another "last test" to fake it...
- assert:
that:
- true
that: "rekeyed == {'value': {'test': 'value'} }"

@ -18,7 +18,3 @@
- "{'foo': 'bar'}|urlencode == 'foo=bar'"
- "{'foo': 'bar', 'baz': 'buz'}|urlencode == 'foo=bar&baz=buz'"
- "()|urlencode == ''"
# Needed (temporarily) due to coverage reports not including the last task.
- assert:
that: true

@ -336,7 +336,7 @@
- name: assert we skipped the ogg file
assert:
that:
- '"{{ remote_tmp_dir_test }}/e/f/g/h/8.ogg" not in find_test3_list'
- remote_tmp_dir_test + "/e/f/g/h/8.ogg" not in find_test3_list
- name: patterns with regex
find:
@ -386,7 +386,7 @@
assert:
that:
- result.matched == 1
- '"{{ remote_tmp_dir_test }}/astest/old.txt" in astest_list'
- remote_tmp_dir_test + "/astest/old.txt" in astest_list
- name: find files newer than 1 week
find:
@ -401,7 +401,7 @@
assert:
that:
- result.matched == 1
- '"{{ remote_tmp_dir_test }}/astest/new.txt" in astest_list'
- remote_tmp_dir_test + "/astest/new.txt" in astest_list
- name: add some content to the new file
shell: "echo hello world > {{ remote_tmp_dir_test }}/astest/new.txt"
@ -421,7 +421,7 @@
assert:
that:
- result.matched == 1
- '"{{ remote_tmp_dir_test }}/astest/new.txt" in astest_list'
- remote_tmp_dir_test + "/astest/new.txt" in astest_list
- '"checksum" in result.files[0]'
- name: find ANY item with LESS than 5 bytes, also get checksums
@ -440,8 +440,8 @@
assert:
that:
- result.matched == 2
- '"{{ remote_tmp_dir_test }}/astest/old.txt" in astest_list'
- '"{{ remote_tmp_dir_test }}/astest/.hidden.txt" in astest_list'
- remote_tmp_dir_test + "/astest/old.txt" in astest_list
- remote_tmp_dir_test + "/astest/.hidden.txt" in astest_list
- '"checksum" in result.files[0]'
# Test permission error is correctly handled by find module

@ -61,7 +61,7 @@
- assert:
that:
- exact_mode_0644.files == exact_mode_0644_symbolic.files
- exact_mode_0644.files[0].path == '{{ remote_tmp_dir_test }}/mode_0644'
- exact_mode_0644.files[0].path == remote_tmp_dir_test + '/mode_0644'
- user_readable_octal.files == user_readable_symbolic.files
- user_readable_octal.files|map(attribute='path')|map('basename')|sort == ['mode_0400', 'mode_0444', 'mode_0644', 'mode_0666', 'mode_0700']
- other_readable_octal.files == other_readable_symbolic.files

@ -433,7 +433,7 @@
- name: Test reading facts from default fact_path
assert:
that:
- '"{{ ansible_local.testfact.fact_dir }}" == "default"'
- 'ansible_local.testfact.fact_dir == "default"'
- hosts: facthost9
tags: [ 'fact_local']
@ -444,7 +444,7 @@
- name: Test reading facts from custom fact_path
assert:
that:
- '"{{ ansible_local.testfact.fact_dir }}" == "custom"'
- 'ansible_local.testfact.fact_dir == "custom"'
- hosts: facthost20
tags: [ 'fact_facter_ohai' ]

@ -118,5 +118,5 @@
- name: ARCHIVE | Assert that prefix directory is found
assert:
that: '{{ item.matched == 1 }}'
that: item.matched == 1
with_items: "{{ archive_check.results }}"

@ -172,7 +172,7 @@
- name: DEPTH | check update arrived
assert:
that:
- "{{ a_file.content | b64decode | trim }} == 3"
- a_file.content | b64decode | int == 3
- git_fetch is changed
- name: DEPTH | clear checkout_dir

@ -58,7 +58,7 @@
- name: LOCALMODS | check update arrived
assert:
that:
- "{{ a_file.content | b64decode | trim }} == 2"
- a_file.content | b64decode | int == 2
- git_fetch_force is changed
- name: LOCALMODS | clear checkout_dir
@ -127,7 +127,7 @@
- name: LOCALMODS | check update arrived
assert:
that:
- "{{ a_file.content | b64decode | trim }} == 2"
- a_file.content | b64decode | int == 2
- git_fetch_force is changed
- name: LOCALMODS | clear checkout_dir

@ -39,7 +39,7 @@
- include_tasks: gpg-verification.yml
when:
- not gpg_version.stderr
- gpg_version.stdout
- gpg_version.stdout is truthy
- not (ansible_os_family == 'RedHat' and ansible_distribution_major_version is version('7', '<'))
- import_tasks: localmods.yml
- import_tasks: reset-origin.yml

@ -32,7 +32,7 @@
- name: SUBMODULES | Ensure submodule1 is at the appropriate commit
assert:
that: '{{ submodule1.stdout_lines | length }} == 2'
that: submodule1.stdout_lines | length == 2
- name: SUBMODULES | clear checkout_dir
file:
@ -53,7 +53,7 @@
- name: SUBMODULES | Ensure submodule1 is at the appropriate commit
assert:
that: '{{ submodule1.stdout_lines | length }} == 4'
that: submodule1.stdout_lines | length == 4
- name: SUBMODULES | Copy the checkout so we can run several different tests on it
command: 'cp -pr {{ checkout_dir }} {{ checkout_dir }}.bak'
@ -84,8 +84,8 @@
- name: SUBMODULES | Ensure both submodules are at the appropriate commit
assert:
that:
- '{{ submodule1.stdout_lines|length }} == 4'
- '{{ submodule2.stdout_lines|length }} == 2'
- submodule1.stdout_lines|length == 4
- submodule2.stdout_lines|length == 2
- name: SUBMODULES | Remove checkout dir
@ -112,7 +112,7 @@
- name: SUBMODULES | Ensure submodule1 is at the appropriate commit
assert:
that: '{{ submodule1.stdout_lines | length }} == 5'
that: submodule1.stdout_lines | length == 5
- name: SUBMODULES | Test that update with recursive found new submodules
@ -121,7 +121,7 @@
- name: SUBMODULES | Enusre submodule2 is at the appropriate commit
assert:
that: '{{ submodule2.stdout_lines | length }} == 4'
that: submodule2.stdout_lines | length == 4
- name: SUBMODULES | clear checkout_dir
file:
@ -147,4 +147,4 @@
- name: SUBMODULES | Ensure submodule1 is at the appropriate commit
assert:
that: '{{ submodule1.stdout_lines | length }} == 4'
that: submodule1.stdout_lines | length == 4

@ -135,20 +135,20 @@
gather_facts: false
tasks:
- name: check that alpaca matched all four groups
assert: { that: ["genus_vicugna", "genus_vic", "genus_vi", "genus_VICUGNA"] }
assert: { that: ["genus_vicugna is defined", "genus_vic is defined", "genus_vi is defined", "genus_VICUGNA is defined"] }
- name: llama validation of groups
hosts: llama
gather_facts: false
tasks:
- name: check that llama matched all four groups
assert: { that: ["genus_lama", "genus_lam", "genus_la", "genus_LAMA"] }
assert: { that: ["genus_lama is defined", "genus_lam is defined", "genus_la is defined", "genus_LAMA is defined"] }
- hosts: camel
gather_facts: false
tasks:
- name: check that camel matched all four groups
assert: { that: ["genus_camelus", "genus_cam", "genus_ca", "genus_CAMELUS"] }
assert: { that: ["genus_camelus is defined", "genus_cam is defined", "genus_ca is defined", "genus_CAMELUS is defined"] }
- hosts: vicugna
gather_facts: false

@ -76,7 +76,7 @@ ansible-playbook test_multiple_handlers_with_recursive_notification.yml -i inven
set +e
result="$(ansible-playbook test_handlers_inexistent_notify.yml -i inventory.handlers "$@" 2>&1)"
set -e
grep -q "ERROR! The requested handler 'notify_inexistent_handler' was not found in either the main handlers list nor in the listening handlers list" <<< "$result"
grep -q "The requested handler 'notify_inexistent_handler' was not found in either the main handlers list nor in the listening handlers list" <<< "$result"
# Notify inexistent handlers without errors when ANSIBLE_ERROR_ON_MISSING_HANDLER=false
ANSIBLE_ERROR_ON_MISSING_HANDLER=false ansible-playbook test_handlers_inexistent_notify.yml -i inventory.handlers -v "$@"
@ -116,19 +116,19 @@ ansible-playbook 58841.yml "$@" --tags evaluation_time -e test_var=myvar | tee o
# Test the handler is not found when the variable is undefined
ansible-playbook 58841.yml "$@" --tags evaluation_time 2>&1 | tee out.txt ; cat out.txt
grep out.txt -e "ERROR! The requested handler 'handler name with myvar' was not found"
grep out.txt -e "The requested handler 'handler name with myvar' was not found"
grep out.txt -e "\[WARNING\]: Handler 'handler name with {{ test_var }}' is unusable"
[ "$(grep out.txt -ce 'handler ran')" = "0" ]
[ "$(grep out.txt -ce 'handler with var ran')" = "0" ]
# Test include_role and import_role cannot be used as handlers
ansible-playbook test_role_as_handler.yml "$@" 2>&1 | tee out.txt
grep out.txt -e "ERROR! Using 'include_role' as a handler is not supported."
grep out.txt -e "Using 'include_role' as a handler is not supported."
# Test notifying a handler from within include_tasks does not work anymore
ansible-playbook test_notify_included.yml "$@" 2>&1 | tee out.txt
[ "$(grep out.txt -ce 'I was included')" = "1" ]
grep out.txt -e "ERROR! The requested handler 'handler_from_include' was not found in either the main handlers list nor in the listening handlers list"
grep out.txt -e "The requested handler 'handler_from_include' was not found in either the main handlers list nor in the listening handlers list"
ansible-playbook test_handlers_meta.yml -i inventory.handlers -vv "$@" | tee out.txt
[ "$(grep out.txt -ce 'RUNNING HANDLER \[noop_handler\]')" = "1" ]
@ -154,7 +154,7 @@ ansible-playbook include_handlers_fail_force.yml --force-handlers -i inventory.h
[ "$(grep out.txt -ce 'included handler ran')" = "1" ]
ansible-playbook test_flush_handlers_as_handler.yml -i inventory.handlers "$@" 2>&1 | tee out.txt
grep out.txt -e "ERROR! flush_handlers cannot be used as a handler"
grep out.txt -e "flush_handlers cannot be used as a handler"
ansible-playbook test_skip_flush.yml -i inventory.handlers "$@" 2>&1 | tee out.txt
[ "$(grep out.txt -ce 'handler ran')" = "0" ]
@ -178,13 +178,13 @@ grep out.txt -e "after flush"
ansible-playbook 79776.yml -i inventory.handlers "$@"
ansible-playbook test_block_as_handler.yml "$@" 2>&1 | tee out.txt
grep out.txt -e "ERROR! Using a block as a handler is not supported."
grep out.txt -e "Using a block as a handler is not supported."
ansible-playbook test_block_as_handler-include.yml "$@" 2>&1 | tee out.txt
grep out.txt -e "ERROR! Using a block as a handler is not supported."
grep out.txt -e "Using a block as a handler is not supported."
ansible-playbook test_block_as_handler-import.yml "$@" 2>&1 | tee out.txt
grep out.txt -e "ERROR! Using a block as a handler is not supported."
grep out.txt -e "Using a block as a handler is not supported."
ansible-playbook test_include_role_handler_once.yml -i inventory.handlers "$@" 2>&1 | tee out.txt
[ "$(grep out.txt -ce 'handler ran')" = "1" ]

@ -16,11 +16,6 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
- name: debug hash behaviour result
debug:
var: "{{ lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') }}"
verbosity: 2
- name: assert hash behaviour is merge or replace
assert:
that:

@ -1,4 +1,5 @@
- hosts: testhost
gather_facts: no
vars_files:
- vars/test_hash_vars.yml
vars:

@ -1,2 +1,3 @@
shippable/posix/group4
context/controller
gather_facts/no

@ -3,6 +3,7 @@
set -eux
export ANSIBLE_ROLES_PATH=./roles
export ANSIBLE_GATHERING=explicit
function gen_task_files() {
for i in $(printf "%03d " {1..39}); do
@ -80,7 +81,7 @@ ANSIBLE_STRATEGY='free' ansible-playbook undefined_var/playbook.yml -i inventor
# include_ + apply (explicit inheritance)
ANSIBLE_STRATEGY='linear' ansible-playbook apply/include_apply.yml -i inventory "$@" --tags foo
set +e
OUT=$(ANSIBLE_STRATEGY='linear' ansible-playbook apply/import_apply.yml -i inventory "$@" --tags foo 2>&1 | grep 'ERROR! Invalid options for import_tasks: apply')
OUT=$(ANSIBLE_STRATEGY='linear' ansible-playbook apply/import_apply.yml -i inventory "$@" --tags foo 2>&1 | grep 'Invalid options for import_tasks: apply')
set -e
if [[ -z "$OUT" ]]; then
echo "apply on import_tasks did not cause error"
@ -105,7 +106,7 @@ ANSIBLE_HOST_PATTERN_MISMATCH=warning ansible-playbook run_once/playbook.yml "$@
# https://github.com/ansible/ansible/issues/48936
ansible-playbook -v handler_addressing/playbook.yml 2>&1 | tee test_handler_addressing.out
test "$(grep -E -c 'include handler task|ERROR! The requested handler '"'"'do_import'"'"' was not found' test_handler_addressing.out)" = 2
test "$(grep -E -c 'include handler task|The requested handler '"'"'do_import'"'"' was not found' test_handler_addressing.out)" = 2
# https://github.com/ansible/ansible/issues/49969
ansible-playbook -v parent_templating/playbook.yml 2>&1 | tee test_parent_templating.out

@ -1,2 +1,3 @@
shippable/posix/group5
context/controller
gather_facts/no

@ -1 +1,2 @@
shippable/posix/group2
gather_facts/no

@ -15,7 +15,7 @@
that:
- "testing == 789"
- "base_dir == 'environments/development'"
- "{{ included_one_file.ansible_included_var_files | length }} == 1"
- "included_one_file.ansible_included_var_files | length == 1"
- "'vars/environments/development/all.yml' in included_one_file.ansible_included_var_files[0]"
- name: include the vars/environments/development/all.yml and save results in all
@ -51,7 +51,7 @@
assert:
that:
- webapp_version is defined
- "'file_without_extension' in '{{ include_without_file_extension.ansible_included_var_files | join(' ') }}'"
- "'file_without_extension' in include_without_file_extension.ansible_included_var_files | join(' ')"
- name: include every directory in vars
include_vars:
@ -67,7 +67,7 @@
- "testing == 456"
- "base_dir == 'services'"
- "webapp_containers == 10"
- "{{ include_every_dir.ansible_included_var_files | length }} == 7"
- "include_every_dir.ansible_included_var_files | length == 7"
- "'vars/all/all.yml' in include_every_dir.ansible_included_var_files[0]"
- "'vars/environments/development/all.yml' in include_every_dir.ansible_included_var_files[1]"
- "'vars/environments/development/services/webapp.yml' in include_every_dir.ansible_included_var_files[2]"
@ -88,9 +88,9 @@
that:
- "testing == 789"
- "base_dir == 'environments/development'"
- "{{ include_without_webapp.ansible_included_var_files | length }} == 4"
- "'webapp.yml' not in '{{ include_without_webapp.ansible_included_var_files | join(' ') }}'"
- "'file_without_extension' not in '{{ include_without_webapp.ansible_included_var_files | join(' ') }}'"
- "include_without_webapp.ansible_included_var_files | length == 4"
- "'webapp.yml' not in (include_without_webapp.ansible_included_var_files | join(' '))"
- "'file_without_extension' not in (include_without_webapp.ansible_included_var_files | join(' '))"
- name: include only files matching webapp.yml
include_vars:
@ -104,9 +104,9 @@
- "testing == 101112"
- "base_dir == 'development/services'"
- "webapp_containers == 20"
- "{{ include_match_webapp.ansible_included_var_files | length }} == 1"
- "include_match_webapp.ansible_included_var_files | length == 1"
- "'vars/environments/development/services/webapp.yml' in include_match_webapp.ansible_included_var_files[0]"
- "'all.yml' not in '{{ include_match_webapp.ansible_included_var_files | join(' ') }}'"
- "'all.yml' not in (include_match_webapp.ansible_included_var_files | join(' '))"
- name: include only files matching webapp.yml and store results in webapp
include_vars:
@ -173,10 +173,10 @@
- name: Verify the hash variable
assert:
that:
- "{{ config | length }} == 3"
- "config | length == 3"
- "config.key0 == 0"
- "config.key1 == 0"
- "{{ config.key2 | length }} == 1"
- "config.key2 | length == 1"
- "config.key2.a == 21"
- name: Include the second file to merge the hash variable
@ -187,10 +187,10 @@
- name: Verify that the hash is merged
assert:
that:
- "{{ config | length }} == 4"
- "config | length == 4"
- "config.key0 == 0"
- "config.key1 == 1"
- "{{ config.key2 | length }} == 2"
- "config.key2 | length == 2"
- "config.key2.a == 21"
- "config.key2.b == 22"
- "config.key3 == 3"
@ -202,9 +202,9 @@
- name: Verify that the properties from the first file is cleared
assert:
that:
- "{{ config | length }} == 3"
- "config | length == 3"
- "config.key1 == 1"
- "{{ config.key2 | length }} == 1"
- "config.key2 | length == 1"
- "config.key2.b == 22"
- "config.key3 == 3"
@ -216,10 +216,10 @@
- name: Verify that the hash is merged after vars files are accumulated
assert:
that:
- "{{ config | length }} == 3"
- "config | length == 3"
- "config.key0 is undefined"
- "config.key1 == 1"
- "{{ config.key2 | length }} == 1"
- "config.key2 | length == 1"
- "config.key2.b == 22"
- "config.key3 == 3"

@ -1,2 +1,3 @@
shippable/posix/group5
context/controller
gather_facts/no

@ -2,6 +2,8 @@
set -eux
export ANSIBLE_GATHERING=explicit
ansible-playbook test_includes.yml -i ../../inventory "$@"
ansible-playbook inherit_notify.yml "$@"
@ -10,7 +12,7 @@ echo "EXPECTED ERROR: Ensure we fail if using 'include' to include a playbook."
set +e
result="$(ansible-playbook -i ../../inventory include_on_playbook_should_fail.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! 'include_tasks' is not a valid attribute for a Play" <<< "$result"
grep -q "'include_tasks' is not a valid attribute for a Play" <<< "$result"
ansible-playbook includes_loop_rescue.yml --extra-vars strategy=linear "$@"
ansible-playbook includes_loop_rescue.yml --extra-vars strategy=free "$@"

@ -13,7 +13,6 @@ DOCUMENTATION = """
"""
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
@ -39,5 +38,5 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
# constructed groups based variable values
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict, fetch_hostvars=True)
except Exception as e:
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)), orig_exc=e)
except Exception as ex:
raise AnsibleParserError(f"Failed to parse {path}.") from ex

@ -42,7 +42,7 @@ ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=always ansible-playbook -i ../../inventory
ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never ansible-playbook -i ../../inventory "$@" strategy.yml
# test extra vars
ansible-inventory -i testhost, -i ./extra_vars_constructed.yml --list -e 'from_extras=hey ' "$@"|grep '"example": "hellohey"'
ansible-inventory -i testhost, -i ./extra_vars_constructed.yml --list -e 'from_extras=hey ' "$@"|grep ': "hellohey"'
# test host vars from previous inventory sources
ansible-inventory -i ./inv_with_host_vars.yml -i ./host_vars_constructed.yml --graph "$@" | tee out.txt
@ -107,6 +107,7 @@ fi
# ensure we don't traceback on inventory due to variables with int as key
ansible-inventory -i inv_with_int.yml --list "$@"
# test in subshell relative paths work mid play for extra vars in inventory refresh
{
cd 1/2

@ -1,4 +1,5 @@
plugin: ansible.builtin.constructed
strict: True
keyed_groups:
- key: hostvar0
- key: hostvar1

@ -1,6 +1,6 @@
#!/usr/bin/env bash
set -eux
set -eux -o pipefail
ansible-inventory -i static_inventory.yml -i constructed.yml --graph | tee out.txt

@ -1 +1,2 @@
shippable/posix/group3
gather_facts/no

@ -1,6 +1,8 @@
#!/usr/bin/env bash
set -eux -o pipefail
# handle empty/commented out group keys correctly https://github.com/ansible/ansible/issues/47254
ANSIBLE_VERBOSITY=0 diff -w <(ansible-inventory -i ./test.yml --list) success.json
ANSIBLE_VERBOSITY=0 diff --unified -w <(ansible-inventory -i ./test.yml --list) success.json
ansible-inventory -i ./test_int_hostname.yml --list 2>&1 | grep 'Host pattern 1234 must be a string'

@ -32,10 +32,10 @@
},
"all": {
"children": [
"cousins",
"ungrouped",
"kids",
"the-maid",
"ungrouped"
"cousins",
"the-maid"
]
},
"cousins": {
@ -45,12 +45,12 @@
},
"kids": {
"hosts": [
"bobby",
"marcia",
"jan",
"cindy",
"greg",
"jan",
"marcia",
"peter"
"peter",
"bobby"
]
},
"the-maid": {

@ -14,18 +14,18 @@
- assert:
that:
- int_to_str == "'2'"
- 'int_to_str|type_debug in ["str", "unicode"]'
- int_to_str | type_debug == "str"
- 'int_to_str2 == "2"'
- 'int_to_str2|type_debug in ["NativeJinjaText"]'
- 'str_to_int == 2'
- 'str_to_int|type_debug == "int"'
- 'dict_to_str|type_debug in ["str", "unicode"]'
- 'list_to_str|type_debug in ["str", "unicode"]'
- str_to_int|type_debug == "int"
- dict_to_str|type_debug == "str"
- list_to_str|type_debug == "str"
- 'int_to_bool is sameas true'
- 'int_to_bool|type_debug == "bool"'
- 'str_true_to_bool is sameas true'
- 'str_true_to_bool|type_debug == "bool"'
- 'str_false_to_bool is sameas false'
- 'str_false_to_bool|type_debug == "bool"'
- 'list_to_json_str|type_debug in ["NativeJinjaText"]'
- 'list_to_yaml_str|type_debug in ["NativeJinjaText"]'
- |
list_to_json_str == '["one", "two"]'
- 'list_to_yaml_str == "[one, two]\n"'

@ -5,7 +5,7 @@
- assert:
that:
- 'integer_sum == 3'
- 'integer_sum|type_debug == "int"'
- integer_sum|type_debug == "int"
- name: add casted string and int
set_fact:
@ -14,7 +14,6 @@
- assert:
that:
- 'integer_sum2 == 3'
- 'integer_sum2|type_debug == "int"'
- name: concatenate int and string
set_fact:
@ -23,7 +22,6 @@
- assert:
that:
- string_sum == "'12'"
- 'string_sum|type_debug in ["str", "unicode"]'
- name: add two lists
set_fact:
@ -32,7 +30,6 @@
- assert:
that:
- 'list_sum == ["one", "two", "three", "four"]'
- 'list_sum|type_debug == "list"'
- name: add two lists, multi expression
set_fact:
@ -40,7 +37,8 @@
- assert:
that:
- 'list_sum_multi|type_debug in ["str", "unicode"]'
- |
list_sum_multi == "['one', 'two'] + ['three', 'four']"
- name: add two dicts
set_fact:
@ -58,7 +56,6 @@
- assert:
that:
- 'list_for_strings == "onetwo"'
- 'list_for_strings|type_debug in ["str", "unicode"]'
- name: loop through list with int
set_fact:
@ -67,7 +64,6 @@
- assert:
that:
- 'list_for_int == 1'
- 'list_for_int|type_debug == "int"'
- name: loop through list with ints
set_fact:
@ -75,8 +71,7 @@
- assert:
that:
- 'list_for_ints == 42'
- 'list_for_ints|type_debug == "int"'
- 'list_for_ints | int == 42'
- name: loop through list to create a new list
set_fact:

@ -5,7 +5,7 @@
- assert:
that:
- 'none_var is sameas none'
- 'none_var|type_debug == "NoneType"'
- 'none_var_direct is sameas none'
- 'none_var_direct|type_debug == "NoneType"'
- none_var is sameas none
- none_var|type_debug == "NoneType"
- none_var_direct is sameas none
- none_var_direct|type_debug == "NoneType"

@ -1,2 +1,3 @@
shippable/posix/group4
context/controller
gather_facts/no

@ -53,6 +53,9 @@
- lookup('csvfile', 'Jane', file='people.csv', delimiter=',', col=0, keycol=1) == "Smith"
- lookup('csvfile', 'German von Lastname file=people.csv delimiter=, col=1') == "Demo"
- debug:
msg: output {{ lookup('csvfile', 'fruit', file='tabs.csv', delimiter='TAB', col=1) }}
- name: Check tab-separated file
assert:
that:

@ -1,6 +1,6 @@
#!/bin/sh
set -ex
set -eux
unset USR
# this should succeed and return 'nobody' as var is undefined

@ -109,8 +109,8 @@
- name: Load variables specific for OS family
assert:
that:
- "{{item|quote}} is file"
- "{{item|basename == 'itworks.yml'}}"
- item is file
- item|basename == 'itworks.yml'
with_first_found:
- files:
- "{{ansible_id}}-{{ansible_lsb.major_release}}.yml" # invalid var, should be skipped
@ -124,8 +124,8 @@
- name: Load variables specific for OS family, but now as list of dicts, same options as above
assert:
that:
- "{{item|quote}} is file"
- "{{item|basename == 'itworks.yml'}}"
- item is file
- item|basename == 'itworks.yml'
with_first_found:
- files:
- "{{ansible_id}}-{{ansible_lsb.major_release}}.yml"

@ -1 +1,2 @@
shippable/posix/group4
gather_facts/no

@ -10,7 +10,7 @@
field_with_space: "{{lookup('ini', 'field.with.space type=properties file=lookup.properties')}}"
- assert:
that: "{{item}} is defined"
that: "item is defined"
with_items: [ 'test1', 'test2', 'test_dot', 'field_with_space' ]
- name: "read ini value"

@ -76,10 +76,8 @@
- assert:
that:
- ansible_failed_task.name == "EXPECTED FAILURE - test invalid arg"
- ansible_failed_result.msg in [expected1, expected2]
vars:
expected1: "unrecognized arguments to with_sequence: ['junk']"
expected2: "unrecognized arguments to with_sequence: [u'junk']"
- '"unrecognized arguments to with_sequence: " in ansible_failed_result.msg'
- '"junk" in ansible_failed_result.msg'
- block:
- name: EXPECTED FAILURE - test bad kv value
@ -144,7 +142,7 @@
- assert:
that:
- ansible_failed_task.name == "EXPECTED FAILURE - test no count or end"
- ansible_failed_result.msg == "must specify count or end in with_sequence"
- ansible_failed_result.msg is contains "must specify count or end in with_sequence"
- block:
- name: EXPECTED FAILURE - test both count and end
@ -157,7 +155,7 @@
- assert:
that:
- ansible_failed_task.name == "EXPECTED FAILURE - test both count and end"
- ansible_failed_result.msg == "can't specify both count and end in with_sequence"
- ansible_failed_result.msg is contains "can't specify both count and end in with_sequence"
- block:
- name: EXPECTED FAILURE - test count backwards message
@ -170,7 +168,7 @@
- assert:
that:
- ansible_failed_task.name == "EXPECTED FAILURE - test count backwards message"
- ansible_failed_result.msg == "to count backwards make stride negative"
- ansible_failed_result.msg is contains "to count backwards make stride negative"
- block:
- name: EXPECTED FAILURE - test count forward message
@ -183,7 +181,7 @@
- assert:
that:
- ansible_failed_task.name == "EXPECTED FAILURE - test count forward message"
- ansible_failed_result.msg == "to count forward don't make stride negative"
- ansible_failed_result.msg is contains "to count forward don't make stride negative"
- block:
- name: EXPECTED FAILURE - test bad format string message
@ -196,7 +194,7 @@
- assert:
that:
- ansible_failed_task.name == "EXPECTED FAILURE - test bad format string message"
- ansible_failed_result.msg == expected
- ansible_failed_result.msg is contains expected
vars:
expected: "bad formatting string: d"

@ -133,7 +133,7 @@
- assert:
that:
- "'{{ item.0.name }}' != 'carol'"
- "item.0.name != 'carol'"
with_subelements:
- "{{ users }}"
- mysql.privs
@ -220,5 +220,5 @@
- assert:
that:
- "'{{ user_alice }}' == 'localhost'"
- "'{{ user_bob }}' == 'db1'"
- "user_alice == 'localhost'"
- "user_bob == 'db1'"

@ -26,4 +26,4 @@
- assert:
that:
- ansible_failed_task.name == "EXPECTED FAILURE - test empty list"
- ansible_failed_result.msg == "with_together requires at least one element in each list"
- ansible_failed_result.msg is contains "with_together requires at least one element in each list"

@ -1,6 +1,6 @@
- name: Test that retrieving a url works
set_fact:
web_data: "{{ lookup('url', 'https://' ~ httpbin_host ~ '/get?one') }}"
web_data: "{{ lookup('url', 'https://' ~ httpbin_host ~ '/get?one') | from_json }}"
- name: Assert that the url was retrieved
assert:
@ -39,7 +39,7 @@
block:
- name: Test user agent
set_fact:
web_data: "{{ lookup('url', 'https://' ~ httpbin_host ~ '/user-agent') }}"
web_data: "{{ lookup('url', 'https://' ~ httpbin_host ~ '/user-agent') | from_json }}"
- name: Assert that user agent is set
assert:
@ -51,7 +51,7 @@
block:
- name: Test force basic auth
set_fact:
web_data: "{{ lookup('url', 'https://' ~ httpbin_host ~ '/headers', username='abc') }}"
web_data: "{{ lookup('url', 'https://' ~ httpbin_host ~ '/headers', username='abc') | from_json }}"
- name: Assert that Authorization header is set
assert:

@ -10,7 +10,7 @@
- name: test Url lookup with netrc forced Basic auth
set_fact:
web_data: "{{ lookup('ansible.builtin.url', 'https://' ~ httpbin_host ~ '/basic-auth/foo/bar', headers={'Authorization':'Bearer foobar'}) }}"
web_data: "{{ lookup('ansible.builtin.url', 'https://' ~ httpbin_host ~ '/basic-auth/foo/bar', headers={'Authorization':'Bearer foobar'}) | from_json }}"
- name: assert test Url lookup with netrc forced Basic auth
assert:
@ -19,7 +19,7 @@
- name: test Url lookup with use_netrc=False
set_fact:
web_data: "{{ lookup('ansible.builtin.url', 'https://' ~ httpbin_host ~ '/bearer', headers={'Authorization':'Bearer foobar'}, use_netrc='False') }}"
web_data: "{{ lookup('ansible.builtin.url', 'https://' ~ httpbin_host ~ '/bearer', headers={'Authorization':'Bearer foobar'}, use_netrc='False') | from_json }}"
- name: assert test Url lookup with netrc=False used Bearer authentication
assert:

@ -9,13 +9,13 @@
- name: Try various regexes and make sure they work
assert:
that:
- lookup('varnames', '^qz_.+', wantlist=True) == ['qz_1', 'qz_2']
- lookup('varnames', '^qz_.+', '^qa.*', wantlist=True) == ['qz_1', 'qz_2', 'qa_1']
- "'ansible_python_interpreter' in lookup('varnames', '^ansible_.*', wantlist=True)"
- lookup('varnames', '^qz_.+', wantlist=True) | sort == ['qz_1', 'qz_2']
- lookup('varnames', '^qz_.+', '^qa.*', wantlist=True) | sort == ['qa_1', 'qz_1', 'qz_2']
- lookup('varnames', '^ansible_.*', wantlist=True) is contains "ansible_python_interpreter"
- lookup('varnames', '^doesnotexist.*', wantlist=True) == []
- lookup('varnames', '^doesnotexist.*', '.*python_inter.*', wantlist=True) == ['ansible_python_interpreter']
- lookup('varnames', '^q.*_\d', wantlist=True) == ['qz_1', 'qz_2', 'qa_1']
- lookup('varnames', '^q.*_\d') == 'qz_1,qz_2,qa_1'
- lookup('varnames', '^q.*_\d', wantlist=True) | sort == ['qa_1', 'qz_1', 'qz_2']
- lookup('varnames', '^q.*_\d') is search('(?=.*qa_1)(?=.*qz_1)(?=.*qz_2)')
- name: Make sure it fails successfully
set_fact:

@ -3,7 +3,7 @@
that:
- ansible_loop.index == ansible_loop.index0 + 1
- ansible_loop.revindex == ansible_loop.revindex0 + 1
- ansible_loop.first == {{ ansible_loop.index == 1 }}
- ansible_loop.last == {{ ansible_loop.index == ansible_loop.length }}
- ansible_loop.first == (ansible_loop.index == 1)
- ansible_loop.last == (ansible_loop.index == ansible_loop.length)
- ansible_loop.length == 3
- ansible_loop.allitems|join(',') == 'first,second,third'

@ -1,2 +1,3 @@
shippable/posix/group4
context/controller
gather_facts/no

@ -3,7 +3,7 @@
test_file: /tmp/ansible-test.module_defaults.foo
module_defaults:
debug:
msg: test default
msg: test {{ "default" }}
file:
path: '{{ test_file }}'
block:

@ -14,4 +14,4 @@
- assert:
that:
- '"location" in result'
- 'result["location"] == "{{ expected_location}}"'
- result["location"] == expected_location

@ -13,4 +13,4 @@
- assert:
that:
- '"location" in result'
- 'result["location"] == "{{ expected_location}}"'
- result["location"] == expected_location

@ -7,4 +7,4 @@
assert:
that:
- '"location" in result'
- 'result["location"] == "{{ expected_location }}"'
- result["location"] == expected_location

@ -7,4 +7,4 @@
assert:
that:
- '"location" in result'
- 'result["location"] == "{{ expected_location }}"'
- result["location"] == expected_location

@ -23,7 +23,7 @@
- set_fact:
selinux_policytype: "{{ r.stdout_lines[0] | trim }}"
when: r is success and r.stdout_lines
when: r is success and r.stdout_lines is truthy
- assert:
that:

@ -1 +1,2 @@
shippable/posix/group2
gather_facts/no

@ -21,42 +21,42 @@ echo "EXPECTED ERROR: Ensure we fail properly if a play has both user and remote
set +e
result="$(ansible-playbook -i ../../inventory remote_user_and_user.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! both 'user' and 'remote_user' are set for this play." <<< "$result"
grep -q "both 'user' and 'remote_user' are set for this play." <<< "$result"
# test that playbook errors if len(plays) == 0
echo "EXPECTED ERROR: Ensure we fail properly if a playbook is an empty list."
set +e
result="$(ansible-playbook -i ../../inventory empty.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! A playbook must contain at least one play" <<< "$result"
grep -q "A playbook must contain at least one play" <<< "$result"
# test that play errors if len(hosts) == 0
echo "EXPECTED ERROR: Ensure we fail properly if a play has 0 hosts."
set +e
result="$(ansible-playbook -i ../../inventory empty_hosts.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! Hosts list cannot be empty. Please check your playbook" <<< "$result"
grep -q "Hosts list cannot be empty. Please check your playbook" <<< "$result"
# test that play errors if tasks is malformed
echo "EXPECTED ERROR: Ensure we fail properly if tasks is malformed."
set +e
result="$(ansible-playbook -i ../../inventory malformed_tasks.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! A malformed block was encountered while loading tasks: 123 should be a list or None" <<< "$result"
grep -q "A malformed block was encountered while loading tasks: 123 should be a list or None" <<< "$result"
# test that play errors if pre_tasks is malformed
echo "EXPECTED ERROR: Ensure we fail properly if pre_tasks is malformed."
set +e
result="$(ansible-playbook -i ../../inventory malformed_pre_tasks.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! A malformed block was encountered while loading pre_tasks" <<< "$result"
grep -q "A malformed block was encountered while loading pre_tasks" <<< "$result"
# test that play errors if post_tasks is malformed
echo "EXPECTED ERROR: Ensure we fail properly if post_tasks is malformed."
set +e
result="$(ansible-playbook -i ../../inventory malformed_post_tasks.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! A malformed block was encountered while loading post_tasks" <<< "$result"
grep -q "A malformed block was encountered while loading post_tasks" <<< "$result"
# test roles: null -- it gets converted to [] internally
ansible-playbook -i ../../inventory roles_null.yml -v "$@"
@ -66,21 +66,21 @@ echo "EXPECTED ERROR: Ensure we fail properly if roles is malformed."
set +e
result="$(ansible-playbook -i ../../inventory malformed_roles.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! A malformed role declaration was encountered." <<< "$result"
grep -q "A malformed role declaration was encountered." <<< "$result"
# test roles: ["foo,bar"] -- errors about old style
echo "EXPECTED ERROR: Ensure we fail properly if old style role is given."
set +e
result="$(ansible-playbook -i ../../inventory old_style_role.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! Invalid old style role requirement: foo,bar" <<< "$result"
grep -q "Invalid old style role requirement: foo,bar" <<< "$result"
# test vars prompt that has no name
echo "EXPECTED ERROR: Ensure we fail properly if vars_prompt has no name."
set +e
result="$(ansible-playbook -i ../../inventory malformed_vars_prompt.yml -v "$@" 2>&1)"
set -e
grep -q "ERROR! Invalid vars_prompt data structure, missing 'name' key" <<< "$result"
grep -q "Invalid vars_prompt data structure, missing 'name' key" <<< "$result"
# test vars_prompt: null
ansible-playbook -i ../../inventory vars_prompt_null.yml -v "$@"

@ -1,2 +1,3 @@
shippable/posix/group5
context/controller
gather_facts/no

@ -6,6 +6,6 @@
- assert:
that:
# filter names are prefixed with a unique hash value to prevent shadowing of other plugins
- filter_name | regex_search('^ansible\.plugins\.filter\.[0-9]+_test_filter$')
- filter_name | regex_search('^ansible\.plugins\.filter\.[0-9]+_test_filter$') is truthy
- lookup_name == 'ansible.plugins.lookup.lookup_name'
- test_name_ok

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save