Merge branch 'ansible:devel' into deprecate-is-module

pull/82192/head
JustinWayland 1 year ago committed by GitHub
commit b52b1d4d14
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -79,10 +79,10 @@ stages:
targets: targets:
- name: macOS 13.2 - name: macOS 13.2
test: macos/13.2 test: macos/13.2
- name: RHEL 9.2 py39 - name: RHEL 9.3 py39
test: rhel/9.2@3.9 test: rhel/9.3@3.9
- name: RHEL 9.2 py311 - name: RHEL 9.3 py311
test: rhel/9.2@3.11 test: rhel/9.3@3.11
- name: FreeBSD 13.2 - name: FreeBSD 13.2
test: freebsd/13.2 test: freebsd/13.2
groups: groups:
@ -93,8 +93,8 @@ stages:
targets: targets:
- name: macOS 13.2 - name: macOS 13.2
test: macos/13.2 test: macos/13.2
- name: RHEL 9.2 - name: RHEL 9.3
test: rhel/9.2 test: rhel/9.3
- name: FreeBSD 13.2 - name: FreeBSD 13.2
test: freebsd/13.2 test: freebsd/13.2
groups: groups:
@ -108,8 +108,8 @@ stages:
test: alpine/3.18 test: alpine/3.18
- name: Fedora 38 - name: Fedora 38
test: fedora/38 test: fedora/38
- name: RHEL 9.2 - name: RHEL 9.3
test: rhel/9.2 test: rhel/9.3
- name: Ubuntu 22.04 - name: Ubuntu 22.04
test: ubuntu/22.04 test: ubuntu/22.04
groups: groups:

@ -0,0 +1,2 @@
bugfixes:
- flush_handlers - properly handle a handler failure in a nested block when ``force_handlers`` is set (http://github.com/ansible/ansible/issues/81532)

@ -0,0 +1,2 @@
minor_changes:
- ansible-test - Add support for RHEL 9.3 remotes.

@ -0,0 +1,4 @@
bugfixes:
- copy action now ensures that tempfiles use the same suffix as destination, to allow for ``validate`` to work with utilities that check extensions.
- copy action now also generates temprary files as hidden ('.' prefixed) to avoid accidental pickup by running services that glob by extension.
- template action will also inherit the behavior from copy (as it uses it internally).

@ -50,7 +50,7 @@ class FailedStates(IntFlag):
TASKS = 2 TASKS = 2
RESCUE = 4 RESCUE = 4
ALWAYS = 8 ALWAYS = 8
HANDLERS = 16 HANDLERS = 16 # NOTE not in use anymore
class HostState: class HostState:
@ -429,10 +429,6 @@ class PlayIterator:
state.update_handlers = False state.update_handlers = False
state.cur_handlers_task = 0 state.cur_handlers_task = 0
if state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
state.update_handlers = True
state.run_state = IteratingStates.COMPLETE
else:
while True: while True:
try: try:
task = state.handlers[state.cur_handlers_task] task = state.handlers[state.cur_handlers_task]
@ -485,20 +481,16 @@ class PlayIterator:
else: else:
state.fail_state |= FailedStates.ALWAYS state.fail_state |= FailedStates.ALWAYS
state.run_state = IteratingStates.COMPLETE state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.HANDLERS:
state.fail_state |= FailedStates.HANDLERS
state.update_handlers = True
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
return state return state
def mark_host_failed(self, host): def mark_host_failed(self, host):
s = self.get_host_state(host) s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s)) display.debug("marking host %s failed, current state: %s" % (host, s))
if s.run_state == IteratingStates.HANDLERS:
# we are failing `meta: flush_handlers`, so just reset the state to whatever
# it was before and let `_set_failed_state` figure out the next state
s.run_state = s.pre_flushing_run_state
s.update_handlers = True
s = self._set_failed_state(s) s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s) display.debug("^ failed state is now: %s" % s)
self.set_state_for_host(host.name, s) self.set_state_for_host(host.name, s)
@ -514,8 +506,6 @@ class PlayIterator:
return True return True
elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state): elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state):
return True return True
elif state.run_state == IteratingStates.HANDLERS and state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
return True
elif state.fail_state != FailedStates.NONE: elif state.fail_state != FailedStates.NONE:
if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0: if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0:
return False return False

@ -542,7 +542,7 @@ def check_type_raw(value):
def check_type_bytes(value): def check_type_bytes(value):
"""Convert a human-readable string value to bytes """Convert a human-readable string value to bytes
Raises :class:`TypeError` if unable to covert the value Raises :class:`TypeError` if unable to convert the value
""" """
try: try:
return human_to_bytes(value) return human_to_bytes(value)
@ -555,7 +555,7 @@ def check_type_bits(value):
Example: ``check_type_bits('1Mb')`` returns integer 1048576. Example: ``check_type_bits('1Mb')`` returns integer 1048576.
Raises :class:`TypeError` if unable to covert the value. Raises :class:`TypeError` if unable to convert the value.
""" """
try: try:
return human_to_bytes(value, isbits=True) return human_to_bytes(value, isbits=True)
@ -567,7 +567,7 @@ def check_type_jsonarg(value):
"""Return a jsonified string. Sometimes the controller turns a json string """Return a jsonified string. Sometimes the controller turns a json string
into a dict/list so transform it back into json here into a dict/list so transform it back into json here
Raises :class:`TypeError` if unable to covert the value Raises :class:`TypeError` if unable to convert the value
""" """
if isinstance(value, (text_type, binary_type)): if isinstance(value, (text_type, binary_type)):

@ -292,7 +292,12 @@ class ActionModule(ActionBase):
return result return result
# Define a remote directory that we will copy the file to. # Define a remote directory that we will copy the file to.
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source') tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, '.source')
# ensure we keep suffix for validate
suffix = os.path.splitext(dest_file)[1]
if suffix:
tmp_src += suffix
remote_path = None remote_path = None
@ -385,7 +390,7 @@ class ActionModule(ActionBase):
def _create_content_tempfile(self, content): def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content ''' ''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP) fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP, prefix='.')
f = os.fdopen(fd, 'wb') f = os.fdopen(fd, 'wb')
content = to_bytes(content) content = to_bytes(content)
try: try:

@ -29,7 +29,7 @@ EXAMPLES: |
# list1: [1, 2, 5, 1, 3, 4, 10] # list1: [1, 2, 5, 1, 3, 4, 10]
# list2: [1, 2, 3, 4, 5, 11, 99] # list2: [1, 2, 3, 4, 5, 11, 99]
{{ list1 | union(list2) }} {{ list1 | union(list2) }}
# => [1, 2, 5, 1, 3, 4, 10, 11, 99] # => [1, 2, 5, 3, 4, 10, 11, 99]
RETURN: RETURN:
_value: _value:
description: A unique list of all the elements from both lists. description: A unique list of all the elements from both lists.

@ -166,7 +166,7 @@ changed: [testhost] =>
mode: '0644' mode: '0644'
owner: root owner: root
size: 3 size: 3
src: .../source src: .../.source.txt
state: file state: file
uid: 0 uid: 0

@ -173,7 +173,7 @@ changed: [testhost] =>
mode: '0644' mode: '0644'
owner: root owner: root
size: 3 size: 3
src: .../source src: .../.source.txt
state: file state: file
uid: 0 uid: 0

@ -34,7 +34,7 @@ run_test() {
sed -i -e 's/^Using .*//g' "${OUTFILE}.${testname}.stdout" sed -i -e 's/^Using .*//g' "${OUTFILE}.${testname}.stdout"
sed -i -e 's/[0-9]:[0-9]\{2\}:[0-9]\{2\}\.[0-9]\{6\}/0:00:00.000000/g' "${OUTFILE}.${testname}.stdout" sed -i -e 's/[0-9]:[0-9]\{2\}:[0-9]\{2\}\.[0-9]\{6\}/0:00:00.000000/g' "${OUTFILE}.${testname}.stdout"
sed -i -e 's/[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\} [0-9]\{2\}:[0-9]\{2\}:[0-9]\{2\}\.[0-9]\{6\}/0000-00-00 00:00:00.000000/g' "${OUTFILE}.${testname}.stdout" sed -i -e 's/[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\} [0-9]\{2\}:[0-9]\{2\}:[0-9]\{2\}\.[0-9]\{6\}/0000-00-00 00:00:00.000000/g' "${OUTFILE}.${testname}.stdout"
sed -i -e 's#: .*/source$#: .../source#g' "${OUTFILE}.${testname}.stdout" sed -i -e 's#: .*/\.source\.txt$#: .../.source.txt#g' "${OUTFILE}.${testname}.stdout"
sed -i -e '/secontext:/d' "${OUTFILE}.${testname}.stdout" sed -i -e '/secontext:/d' "${OUTFILE}.${testname}.stdout"
sed -i -e 's/group: wheel/group: root/g' "${OUTFILE}.${testname}.stdout" sed -i -e 's/group: wheel/group: root/g' "${OUTFILE}.${testname}.stdout"

@ -0,0 +1,19 @@
- hosts: A,B
gather_facts: false
force_handlers: true
tasks:
- block:
- command: echo
notify: h
- meta: flush_handlers
rescue:
- debug:
msg: flush_handlers_rescued
always:
- debug:
msg: flush_handlers_always
handlers:
- name: h
fail:
when: inventory_hostname == "A"

@ -206,3 +206,7 @@ ansible-playbook force_handlers_blocks_81533-1.yml -i inventory.handlers "$@" 2>
ansible-playbook force_handlers_blocks_81533-2.yml -i inventory.handlers "$@" 2>&1 | tee out.txt ansible-playbook force_handlers_blocks_81533-2.yml -i inventory.handlers "$@" 2>&1 | tee out.txt
[ "$(grep out.txt -ce 'hosts_left')" = "1" ] [ "$(grep out.txt -ce 'hosts_left')" = "1" ]
ansible-playbook nested_flush_handlers_failure_force.yml -i inventory.handlers "$@" 2>&1 | tee out.txt
[ "$(grep out.txt -ce 'flush_handlers_rescued')" = "1" ]
[ "$(grep out.txt -ce 'flush_handlers_always')" = "2" ]

@ -7,6 +7,7 @@ freebsd python_dir=/usr/local/bin become=su_sudo provider=aws arch=x86_64
macos/13.2 python=3.11 python_dir=/usr/local/bin become=sudo provider=parallels arch=x86_64 macos/13.2 python=3.11 python_dir=/usr/local/bin become=sudo provider=parallels arch=x86_64
macos python_dir=/usr/local/bin become=sudo provider=parallels arch=x86_64 macos python_dir=/usr/local/bin become=sudo provider=parallels arch=x86_64
rhel/9.2 python=3.9,3.11 become=sudo provider=aws arch=x86_64 rhel/9.2 python=3.9,3.11 become=sudo provider=aws arch=x86_64
rhel/9.3 python=3.9,3.11 become=sudo provider=aws arch=x86_64
rhel become=sudo provider=aws arch=x86_64 rhel become=sudo provider=aws arch=x86_64
ubuntu/22.04 python=3.10 become=sudo provider=aws arch=x86_64 ubuntu/22.04 python=3.10 become=sudo provider=aws arch=x86_64
ubuntu become=sudo provider=aws arch=x86_64 ubuntu become=sudo provider=aws arch=x86_64

Loading…
Cancel
Save