diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml
index d2078a43cf0..3c73f3f2590 100644
--- a/.azure-pipelines/azure-pipelines.yml
+++ b/.azure-pipelines/azure-pipelines.yml
@@ -78,36 +78,44 @@ stages:
test: 2022/psrp/http
- name: 2022 SSH Key
test: 2022/ssh/key
+ - name: 2025 PSRP HTTP
+ test: 2025/psrp/http
+ - name: 2025 SSH Key
+ test: 2025/ssh/key
- stage: Remote
dependsOn: []
jobs:
- template: templates/matrix.yml # context/target
parameters:
targets:
- - name: macOS 14.3
- test: macos/14.3
- - name: RHEL 9.4 py39
- test: rhel/9.4@3.9
- - name: RHEL 9.4 py312
- test: rhel/9.4@3.12
- - name: FreeBSD 13.3
- test: freebsd/13.3
- - name: FreeBSD 14.1
- test: freebsd/14.1
+ - name: macOS 15.3
+ test: macos/15.3
+ - name: RHEL 9.5 py39
+ test: rhel/9.5@3.9
+ - name: RHEL 9.5 py312
+ test: rhel/9.5@3.12
+ - name: RHEL 10.0
+ test: rhel/10.0
+ - name: FreeBSD 13.5
+ test: freebsd/13.5
+ - name: FreeBSD 14.2
+ test: freebsd/14.2
groups:
- 1
- 2
- template: templates/matrix.yml # context/controller
parameters:
targets:
- - name: macOS 14.3
- test: macos/14.3
- - name: RHEL 9.4
- test: rhel/9.4
- - name: FreeBSD 13.3
- test: freebsd/13.3
- - name: FreeBSD 14.1
- test: freebsd/14.1
+ - name: macOS 15.3
+ test: macos/15.3
+ - name: RHEL 9.5
+ test: rhel/9.5
+ - name: RHEL 10.0
+ test: rhel/10.0
+ - name: FreeBSD 13.5
+ test: freebsd/13.5
+ - name: FreeBSD 14.2
+ test: freebsd/14.2
groups:
- 3
- 4
@@ -115,12 +123,14 @@ stages:
- template: templates/matrix.yml # context/controller (ansible-test container management)
parameters:
targets:
- - name: Alpine 3.20
- test: alpine/3.20
- - name: Fedora 40
- test: fedora/40
- - name: RHEL 9.4
- test: rhel/9.4
+ - name: Alpine 3.21
+ test: alpine/3.21
+ - name: Fedora 41
+ test: fedora/41
+ - name: RHEL 9.5
+ test: rhel/9.5
+ - name: RHEL 10.0
+ test: rhel/10.0
- name: Ubuntu 24.04
test: ubuntu/24.04
groups:
@@ -132,10 +142,10 @@ stages:
parameters:
testFormat: linux/{0}
targets:
- - name: Alpine 3.20
- test: alpine320
- - name: Fedora 40
- test: fedora40
+ - name: Alpine 3.21
+ test: alpine321
+ - name: Fedora 41
+ test: fedora41
- name: Ubuntu 22.04
test: ubuntu2204
- name: Ubuntu 24.04
@@ -147,16 +157,24 @@ stages:
parameters:
testFormat: linux/{0}
targets:
- - name: Alpine 3.20
- test: alpine320
- - name: Fedora 40
- test: fedora40
+ - name: Alpine 3.21
+ test: alpine321
+ - name: Fedora 41
+ test: fedora41
- name: Ubuntu 24.04
test: ubuntu2404
groups:
- 3
- 4
- 5
+ - template: templates/matrix.yml # context/target (dnf-oldest, dnf-latest)
+ parameters:
+ testFormat: linux/{0}
+ targets:
+ - name: Fedora 41
+ test: fedora41
+ groups:
+ - 7
- stage: Galaxy
dependsOn: []
jobs:
@@ -198,15 +216,10 @@ stages:
test: 2022/psrp/http
- name: 2022 SSH Key
test: 2022/ssh/key
- - stage: Incidental
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: i/{0}/1
- targets:
- - name: IOS Python
- test: ios/csr1000v/
+ - name: 2025 PSRP HTTP
+ test: 2025/psrp/http
+ - name: 2025 SSH Key
+ test: 2025/ssh/key
- stage: Summary
condition: succeededOrFailed()
dependsOn:
@@ -218,6 +231,5 @@ stages:
- Galaxy
- Generic
- Incidental_Windows
- - Incidental
jobs:
- template: templates/coverage.yml
diff --git a/.azure-pipelines/commands/incidental/ios.sh b/.azure-pipelines/commands/incidental/ios.sh
deleted file mode 120000
index cad3e41b707..00000000000
--- a/.azure-pipelines/commands/incidental/ios.sh
+++ /dev/null
@@ -1 +0,0 @@
-network.sh
\ No newline at end of file
diff --git a/.azure-pipelines/commands/incidental/network.sh b/.azure-pipelines/commands/incidental/network.sh
deleted file mode 100755
index 1c489f9e31c..00000000000
--- a/.azure-pipelines/commands/incidental/network.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env bash
-
-set -o pipefail -eux
-
-declare -a args
-IFS='/:' read -ra args <<< "$1"
-
-platform="${args[0]}"
-version="${args[1]}"
-python_version="${args[2]}"
-
-target="shippable/${platform}/incidental/"
-
-stage="${S:-prod}"
-provider="${P:-default}"
-
-# python versions to test in order
-# all versions run full tests
-IFS=' ' read -r -a python_versions <<< \
- "$(PYTHONPATH="${PWD}/test/lib" python -c 'from ansible_test._internal import constants; print(" ".join(constants.CONTROLLER_PYTHON_VERSIONS))')"
-
-if [ "${python_version}" ]; then
- # limit tests to a single python version
- python_versions=("${python_version}")
-fi
-
-for python_version in "${python_versions[@]}"; do
- # terminate remote instances on the final python version tested
- if [ "${python_version}" = "${python_versions[-1]}" ]; then
- terminate="always"
- else
- terminate="never"
- fi
-
- # shellcheck disable=SC2086
- ansible-test network-integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
- --platform "${platform}/${version}" \
- --docker default --python "${python_version}" \
- --remote-terminate "${terminate}" --remote-stage "${stage}" --remote-provider "${provider}"
-done
diff --git a/.azure-pipelines/scripts/publish-codecov.py b/.azure-pipelines/scripts/publish-codecov.py
index 41f30af76d4..8a15822b0e8 100755
--- a/.azure-pipelines/scripts/publish-codecov.py
+++ b/.azure-pipelines/scripts/publish-codecov.py
@@ -9,11 +9,11 @@ from __future__ import annotations
import argparse
import dataclasses
import pathlib
-import shutil
+import shlex
import subprocess
import tempfile
import typing as t
-import urllib.request
+import venv
@dataclasses.dataclass(frozen=True)
@@ -43,6 +43,27 @@ def parse_args() -> Args:
return Args(**kwargs)
+def run(*args: str | pathlib.Path) -> None:
+ cmd = [str(arg) for arg in args]
+ print(f'==> {shlex.join(cmd)}', flush=True)
+ subprocess.run(cmd, check=True)
+
+
+def install_codecov(dest: pathlib.Path) -> pathlib.Path:
+ package = 'codecov-cli'
+ version = '11.0.3'
+
+ venv_dir = dest / 'venv'
+ python_bin = venv_dir / 'bin' / 'python'
+ codecov_bin = venv_dir / 'bin' / 'codecovcli'
+
+ venv.create(venv_dir, with_pip=True)
+
+ run(python_bin, '-m', 'pip', 'install', f'{package}=={version}', '--disable-pip-version-check')
+
+ return codecov_bin
+
+
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
processed = []
for file in directory.joinpath('reports').glob('coverage*.xml'):
@@ -57,45 +78,43 @@ def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
return tuple(processed)
-def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
+def upload_files(codecov_bin: pathlib.Path, config_file: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
for file in files:
cmd = [
- str(codecov_bin),
- '--name', file.name,
- '--file', str(file.path),
+ codecov_bin,
+ '--disable-telem',
+ '--codecov-yml-path',
+ config_file,
+ 'upload-process',
+ '--disable-search',
+ '--disable-file-fixes',
+ '--plugin',
+ 'noop',
+ '--name',
+ file.name,
+ '--file',
+ file.path,
]
+
for flag in file.flags:
- cmd.extend(['--flags', flag])
+ cmd.extend(['--flag', flag])
if dry_run:
- print(f'DRY-RUN: Would run command: {cmd}')
- continue
-
- subprocess.run(cmd, check=True)
-
+ cmd.append('--dry-run')
-def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
- if dry_run:
- print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
- return
+ run(*cmd)
- with urllib.request.urlopen(url) as resp:
- with dest.open('w+b') as f:
- # Read data in chunks rather than all at once
- shutil.copyfileobj(resp, f, 64 * 1024)
- dest.chmod(flags)
-
-
-def main():
+def main() -> None:
args = parse_args()
- url = 'https://ci-files.testing.ansible.com/codecov/linux/codecov'
+
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
- codecov_bin = pathlib.Path(tmpdir) / 'codecov'
- download_file(url, codecov_bin, 0o755, args.dry_run)
+ config_file = pathlib.Path(tmpdir) / 'config.yml'
+ config_file.write_text('')
+ codecov_bin = install_codecov(pathlib.Path(tmpdir))
files = process_files(args.path)
- upload_files(codecov_bin, files, args.dry_run)
+ upload_files(codecov_bin, config_file, files, args.dry_run)
if __name__ == '__main__':
diff --git a/.azure-pipelines/scripts/report-coverage.sh b/.azure-pipelines/scripts/report-coverage.sh
index 4db905eae28..edf7580ad6d 100755
--- a/.azure-pipelines/scripts/report-coverage.sh
+++ b/.azure-pipelines/scripts/report-coverage.sh
@@ -12,6 +12,6 @@ if ! ansible-test --help >/dev/null 2>&1; then
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
fi
-# Generate stubs using docker (if supported) otherwise fall back to using a virtual environment instead.
-# The use of docker is required when Powershell code is present, but Ansible 2.12 was the first version to support --docker with coverage.
-ansible-test coverage xml --group-by command --stub --docker --color -v || ansible-test coverage xml --group-by command --stub --venv --color -v
+# Generate stubs using docker.
+# The use of docker is mandatory when Powershell code is present.
+ansible-test coverage xml --group-by command --stub --docker --color -v
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 8f4944c43c0..4b8f97e4ca1 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -47,23 +47,7 @@ body:
- type: dropdown
attributes:
label: Issue Type
- description: >
- Please select the single available option in the drop-down.
-
-
-
- Why?
-
-
- We would do it by ourselves but unfortunately, the current
- edition of GitHub Issue Forms Alpha does not support this yet 🤷
-
-
- _We will make it easier in the future, once GitHub
- supports dropdown defaults. Promise!_
-
-
- # FIXME: Once GitHub allows defining the default choice, update this
+ description: This is a marker for our automatic bot. Do not change it.
options:
- Bug Report
validations:
@@ -152,7 +136,7 @@ body:
attributes:
label: Steps to Reproduce
description: |
- Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also pased any playbooks, configs and commands you used.
+ Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also provide any playbooks, configs and commands you used.
**HINT:** You can paste https://gist.github.com links for larger files.
value: |
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 6aa4a2b7647..140f479aed0 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -16,10 +16,10 @@ contact_links:
url: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser
about: ❤ Be nice to other members of the community. ☮ Behave.
- name: 💬 Talk to the community
- url: https://docs.ansible.com/ansible/devel/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
+ url: https://docs.ansible.com/ansible/devel/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#forum
about: Please ask and answer usage questions here
- name: ⚡ Working groups
- url: https://github.com/ansible/community/wiki
+ url: https://forum.ansible.com/g?utm_medium=github&utm_source=issue_template_chooser
about: Interested in improving a specific area? Become a part of a working group!
- name: 💼 For Enterprise
url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser
diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml
index efe8d1c2035..b854d877f8a 100644
--- a/.github/ISSUE_TEMPLATE/documentation_report.yml
+++ b/.github/ISSUE_TEMPLATE/documentation_report.yml
@@ -84,20 +84,7 @@ body:
- type: dropdown
attributes:
label: Issue Type
- description: >
- Please select the single available option in the drop-down.
-
-
-
- Why?
-
-
-
- _We will make it easier in the future, once GitHub
- supports dropdown defaults. Promise!_
-
-
- # FIXME: Once GitHub allows defining the default choice, update this
+ description: This is a marker for our automatic bot. Do not change it.
options:
- Documentation Report
validations:
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index 2fce680fe64..68093a77730 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -101,23 +101,7 @@ body:
- type: dropdown
attributes:
label: Issue Type
- description: >
- Please select the single available option in the drop-down.
-
-
-
- Why?
-
-
- We would do it by ourselves but unfortunately, the current
- edition of GitHub Issue Forms Alpha does not support this yet 🤷
-
-
- _We will make it easier in the future, once GitHub
- supports dropdown defaults. Promise!_
-
-
- # FIXME: Once GitHub allows defining the default choice, update this
+ description: This is a marker for our automatic bot. Do not change it.
options:
- Feature Idea
validations:
diff --git a/.github/ISSUE_TEMPLATE/internal_issue.md b/.github/ISSUE_TEMPLATE/internal_issue.md
new file mode 100644
index 00000000000..aaf524ae6ae
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/internal_issue.md
@@ -0,0 +1,10 @@
+---
+name: Internal Issue
+about: Free-form issue creation for core maintainer use only.
+title: ''
+labels: [core-internal]
+assignees: ''
+---
+
+
+@ansibot bot_skip
diff --git a/.github/ISSUE_TEMPLATE/pre_release.yml b/.github/ISSUE_TEMPLATE/pre_release.yml
new file mode 100644
index 00000000000..80f10eb845d
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/pre_release.yml
@@ -0,0 +1,42 @@
+name: Pre-Release Bug Report
+description: File a bug report against a pre-release version
+labels:
+ - bug
+ - pre_release
+assignees:
+ - nitzmahone
+ - mattclay
+body:
+ - type: markdown
+ attributes:
+ value: |
+ ## Bug Report
+ - type: textarea
+ attributes:
+ label: Ansible Version
+ description: Paste the full output from `ansible --version` below.
+ render: console
+ placeholder: $ ansible --version
+ validations:
+ required: true
+ - type: textarea
+ attributes:
+ label: Summary
+ description: Describe the issue with any relevant steps to reproduce.
+ validations:
+ required: true
+ - type: dropdown
+ attributes:
+ label:
+ options:
+ - |
+
+ validations:
+ required: true
diff --git a/.github/PULL_REQUEST_TEMPLATE/Bug fix.md b/.github/PULL_REQUEST_TEMPLATE/Bug fix.md
index b400b336dff..3ad5e0cff2b 100644
--- a/.github/PULL_REQUEST_TEMPLATE/Bug fix.md
+++ b/.github/PULL_REQUEST_TEMPLATE/Bug fix.md
@@ -2,19 +2,8 @@
-
+
##### ISSUE TYPE
- Bugfix Pull Request
-
-##### ADDITIONAL INFORMATION
-
-
-
-
-
-
-```paste below
-
-```
diff --git a/.github/PULL_REQUEST_TEMPLATE/Documentation change.md b/.github/PULL_REQUEST_TEMPLATE/Documentation change.md
index c62ff7bfc55..e0280ea016a 100644
--- a/.github/PULL_REQUEST_TEMPLATE/Documentation change.md
+++ b/.github/PULL_REQUEST_TEMPLATE/Documentation change.md
@@ -2,18 +2,8 @@
-
+
##### ISSUE TYPE
- Docs Pull Request
-
-##### ADDITIONAL INFORMATION
-
-
-
-
-
-```paste below
-
-```
diff --git a/.github/PULL_REQUEST_TEMPLATE/New feature.md b/.github/PULL_REQUEST_TEMPLATE/New feature.md
index 9e10c45d5d4..bd56e021164 100644
--- a/.github/PULL_REQUEST_TEMPLATE/New feature.md
+++ b/.github/PULL_REQUEST_TEMPLATE/New feature.md
@@ -2,18 +2,8 @@
-
+
##### ISSUE TYPE
- Feature Pull Request
-
-##### ADDITIONAL INFORMATION
-
-
-
-
-
-```paste below
-
-```
diff --git a/.github/PULL_REQUEST_TEMPLATE/Tests.md b/.github/PULL_REQUEST_TEMPLATE/Tests.md
index b059793b49a..80eb3c6ab02 100644
--- a/.github/PULL_REQUEST_TEMPLATE/Tests.md
+++ b/.github/PULL_REQUEST_TEMPLATE/Tests.md
@@ -2,19 +2,8 @@
-
+
##### ISSUE TYPE
- Test Pull Request
-
-##### ADDITIONAL INFORMATION
-
-
-
-
-
-
-```paste below
-
-```
diff --git a/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md b/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md
index 33504c1d708..baed1c6c83b 100644
--- a/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md
+++ b/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md
@@ -2,7 +2,7 @@
-
+
##### ISSUE TYPE
@@ -12,14 +12,3 @@
- Docs Pull Request
- Feature Pull Request
- Test Pull Request
-
-##### ADDITIONAL INFORMATION
-
-
-
-
-
-
-```paste below
-
-```
diff --git a/.github/RELEASE_NAMES.txt b/.github/RELEASE_NAMES.txt
index 17d96a6897e..588013b5e9d 100644
--- a/.github/RELEASE_NAMES.txt
+++ b/.github/RELEASE_NAMES.txt
@@ -1,3 +1,4 @@
+2.20.0 TBD
2.19.0 What Is and What Should Never Be
2.18.0 Fool in the Rain
2.17.0 Gallows Pole
diff --git a/.gitignore b/.gitignore
index 57019fd1ab6..6551222edd2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -97,6 +97,9 @@ Vagrantfile
# vendored lib dir
lib/ansible/_vendor/*
!lib/ansible/_vendor/__init__.py
+# PowerShell signed hashlist
+lib/ansible/config/powershell_signatures.psd1
+*.authenticode
# test stuff
/test/integration/cloud-config-*.*
!/test/integration/cloud-config-*.*.template
diff --git a/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml b/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml
deleted file mode 100644
index 07d6312cb4d..00000000000
--- a/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - copy - parameter ``local_follow`` was incorrectly documented as having default value ``True`` (https://github.com/ansible/ansible/pull/83643).
- - copy - fix sanity test failures (https://github.com/ansible/ansible/pull/83643).
diff --git a/changelogs/fragments/83690-get_url-content-disposition-filename.yml b/changelogs/fragments/83690-get_url-content-disposition-filename.yml
deleted file mode 100644
index 47f9734c35e..00000000000
--- a/changelogs/fragments/83690-get_url-content-disposition-filename.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - get_url - fix honoring ``filename`` from the ``content-disposition`` header even when the type is ``inline`` (https://github.com/ansible/ansible/issues/83690)
diff --git a/changelogs/fragments/83965-action-groups-schema.yml b/changelogs/fragments/83965-action-groups-schema.yml
deleted file mode 100644
index cd4a439044d..00000000000
--- a/changelogs/fragments/83965-action-groups-schema.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - "runtime-metadata sanity test - improve validation of ``action_groups`` (https://github.com/ansible/ansible/pull/83965)."
diff --git a/changelogs/fragments/84008-additional-logging.yml b/changelogs/fragments/84008-additional-logging.yml
deleted file mode 100644
index 80bd3a7ddd9..00000000000
--- a/changelogs/fragments/84008-additional-logging.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - Added a -vvvvv log message indicating when a host fails to produce output within the timeout period.
- - SSH Escalation-related -vvv log messages now include the associated host information.
diff --git a/changelogs/fragments/85217-stat-add-selinux-context.yml b/changelogs/fragments/85217-stat-add-selinux-context.yml
new file mode 100644
index 00000000000..44e32a5d90f
--- /dev/null
+++ b/changelogs/fragments/85217-stat-add-selinux-context.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - stat module - add SELinux context as a return value, and add a new option to trigger this return, which is False by default. (https://github.com/ansible/ansible/issues/85217).
diff --git a/changelogs/fragments/ansible-doc-description-verbosity.yml b/changelogs/fragments/ansible-doc-description-verbosity.yml
new file mode 100644
index 00000000000..7c000d995db
--- /dev/null
+++ b/changelogs/fragments/ansible-doc-description-verbosity.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-doc - Return a more verbose error message when the ``description`` field is missing.
diff --git a/changelogs/fragments/ansible-test-coverage-upgrade.yml b/changelogs/fragments/ansible-test-coverage-upgrade.yml
new file mode 100644
index 00000000000..bd11de61b37
--- /dev/null
+++ b/changelogs/fragments/ansible-test-coverage-upgrade.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Upgrade to ``coverage`` version 7.9.1 for Python 3.9 and later.
diff --git a/changelogs/fragments/ansible-test-ios.yml b/changelogs/fragments/ansible-test-ios.yml
new file mode 100644
index 00000000000..671758596b2
--- /dev/null
+++ b/changelogs/fragments/ansible-test-ios.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Removed support for automatic provisioning of obsolete instances for network-integration tests.
diff --git a/changelogs/fragments/ansible-test-nios-container.yml b/changelogs/fragments/ansible-test-nios-container.yml
deleted file mode 100644
index f4b2a99acdd..00000000000
--- a/changelogs/fragments/ansible-test-nios-container.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - ansible-test - Update ``nios-test-container`` to version 6.0.0.
diff --git a/changelogs/fragments/ansible-test-probe-error-handling.yml b/changelogs/fragments/ansible-test-probe-error-handling.yml
deleted file mode 100644
index bf4301cc48b..00000000000
--- a/changelogs/fragments/ansible-test-probe-error-handling.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - Improve container runtime probe error handling.
- When unexpected probe output is encountered, an error with more useful debugging information is provided.
diff --git a/changelogs/fragments/ansible-test-pylint-fix.yml b/changelogs/fragments/ansible-test-pylint-fix.yml
deleted file mode 100644
index 877a5944967..00000000000
--- a/changelogs/fragments/ansible-test-pylint-fix.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-bugfixes:
- - ansible-test - Enable the ``sys.unraisablehook`` work-around for the ``pylint`` sanity test on Python 3.11.
- Previously the work-around was only enabled for Python 3.12 and later.
- However, the same issue has been discovered on Python 3.11.
diff --git a/changelogs/fragments/ansible-test-update.yml b/changelogs/fragments/ansible-test-update.yml
deleted file mode 100644
index 8431887dedb..00000000000
--- a/changelogs/fragments/ansible-test-update.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-minor_changes:
- - ansible-test - Update ``pylint`` sanity test to use version 3.3.1.
- - ansible-test - Default to Python 3.13 in the ``base`` and ``default`` containers.
- - ansible-test - Disable the ``deprecated-`` prefixed ``pylint`` rules as their results vary by Python version.
- - ansible-test - Update the ``base`` and ``default`` containers.
diff --git a/changelogs/fragments/apt_deb_install.yml b/changelogs/fragments/apt_deb_install.yml
new file mode 100644
index 00000000000..4f96af6c7d3
--- /dev/null
+++ b/changelogs/fragments/apt_deb_install.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - apt - mark dependencies installed as part of deb file installation as auto (https://github.com/ansible/ansible/issues/78123).
diff --git a/changelogs/fragments/cron_err.yml b/changelogs/fragments/cron_err.yml
deleted file mode 100644
index 5e65a7b68ec..00000000000
--- a/changelogs/fragments/cron_err.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-minor_changes:
- - cron - Provide additional error information while writing cron file (https://github.com/ansible/ansible/issues/83223).
diff --git a/changelogs/fragments/debconf_empty_password.yml b/changelogs/fragments/debconf_empty_password.yml
deleted file mode 100644
index 473dc53e0d5..00000000000
--- a/changelogs/fragments/debconf_empty_password.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - debconf - set empty password values (https://github.com/ansible/ansible/issues/83214).
diff --git a/changelogs/fragments/dnf-remove-install_repoquery.yml b/changelogs/fragments/dnf-remove-install_repoquery.yml
new file mode 100644
index 00000000000..b804acc3e0d
--- /dev/null
+++ b/changelogs/fragments/dnf-remove-install_repoquery.yml
@@ -0,0 +1,2 @@
+removed_features:
+ - dnf/dnf5 - remove deprecated ``install_repoquery`` option.
diff --git a/changelogs/fragments/dnf5-plugins-compat.yml b/changelogs/fragments/dnf5-plugins-compat.yml
deleted file mode 100644
index 5d42b0f99f1..00000000000
--- a/changelogs/fragments/dnf5-plugins-compat.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - "dnf5 - fix traceback when ``enable_plugins``/``disable_plugins`` is used on ``python3-libdnf5`` versions that do not support this functionality"
diff --git a/changelogs/fragments/file_simplify.yml b/changelogs/fragments/file_simplify.yml
deleted file mode 100644
index 63e48fbdb9a..00000000000
--- a/changelogs/fragments/file_simplify.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-minor_changes:
- - file - make code more readable and simple.
diff --git a/changelogs/fragments/find-checksum.yml b/changelogs/fragments/find-checksum.yml
deleted file mode 100644
index c713beabd68..00000000000
--- a/changelogs/fragments/find-checksum.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - find - add a checksum_algorithm parameter to specify which type of checksum the module will return
diff --git a/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml b/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml
deleted file mode 100644
index aba789bdadd..00000000000
--- a/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Fix disabling SSL verification when installing collections and roles from git repositories. If ``--ignore-certs`` isn't provided, the value for the ``GALAXY_IGNORE_CERTS`` configuration option will be used (https://github.com/ansible/ansible/issues/83326).
diff --git a/changelogs/fragments/fix-module-utils-facts-timeout.yml b/changelogs/fragments/fix-module-utils-facts-timeout.yml
deleted file mode 100644
index 3ecc95dfab3..00000000000
--- a/changelogs/fragments/fix-module-utils-facts-timeout.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Use the requested error message in the ansible.module_utils.facts.timeout timeout function instead of hardcoding one.
diff --git a/changelogs/fragments/fix_errors.yml b/changelogs/fragments/fix_errors.yml
deleted file mode 100644
index 995cc28ffda..00000000000
--- a/changelogs/fragments/fix_errors.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Errors now preserve stacked error messages even when YAML is involved.
diff --git a/changelogs/fragments/no-return.yml b/changelogs/fragments/no-return.yml
deleted file mode 100644
index b55db43eb2f..00000000000
--- a/changelogs/fragments/no-return.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - module_utils - Add ``NoReturn`` type annotations to functions which never return.
diff --git a/changelogs/fragments/os_family.yml b/changelogs/fragments/os_family.yml
deleted file mode 100644
index 7126a00c27b..00000000000
--- a/changelogs/fragments/os_family.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - facts - skip if distribution file path is directory, instead of raising error (https://github.com/ansible/ansible/issues/84006).
diff --git a/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml b/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml
deleted file mode 100644
index 8dd037a4e02..00000000000
--- a/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - "``package``/``dnf`` action plugins - provide the reason behind the failure to gather the ``ansible_pkg_mgr`` fact to identify the package backend"
diff --git a/changelogs/fragments/paramiko-global-config-removal.yml b/changelogs/fragments/paramiko-global-config-removal.yml
new file mode 100644
index 00000000000..599e4c26805
--- /dev/null
+++ b/changelogs/fragments/paramiko-global-config-removal.yml
@@ -0,0 +1,2 @@
+removed_features:
+ - paramiko - Removed the ``PARAMIKO_HOST_KEY_AUTO_ADD`` and ``PARAMIKO_LOOK_FOR_KEYS`` configuration keys, which were previously deprecated.
diff --git a/changelogs/fragments/remove_ini_ignored_dir.yml b/changelogs/fragments/remove_ini_ignored_dir.yml
deleted file mode 100644
index 10a5a8e61ce..00000000000
--- a/changelogs/fragments/remove_ini_ignored_dir.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - INVENTORY_IGNORE_EXTS config, removed ``ini`` from the default list, inventory scripts using a corresponding .ini configuration are rare now and inventory.ini files are more common. Those that need to ignore the ini files for inventory scripts can still add it to configuration.
diff --git a/changelogs/fragments/selector_removal.yml b/changelogs/fragments/selector_removal.yml
deleted file mode 100644
index 53b263ec03d..00000000000
--- a/changelogs/fragments/selector_removal.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - selector - remove deprecated compat.selector related files (https://github.com/ansible/ansible/pull/84155).
diff --git a/changelogs/fragments/service_facts_fbsd.yml b/changelogs/fragments/service_facts_fbsd.yml
deleted file mode 100644
index 6f06ab79f23..00000000000
--- a/changelogs/fragments/service_facts_fbsd.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - service_facts module got freebsd support added.
diff --git a/changelogs/fragments/skip-handlers-tagged-play.yml b/changelogs/fragments/skip-handlers-tagged-play.yml
deleted file mode 100644
index 755308eafbe..00000000000
--- a/changelogs/fragments/skip-handlers-tagged-play.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - "Do not run implicit ``flush_handlers`` meta tasks when the whole play is excluded from the run due to tags specified."
diff --git a/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml b/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml
deleted file mode 100644
index a4c913791d2..00000000000
--- a/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - "Improve performance on large inventories by reducing the number of implicit meta tasks."
diff --git a/changelogs/fragments/skip-role-task-iterator.yml b/changelogs/fragments/skip-role-task-iterator.yml
deleted file mode 100644
index 1cf6b4cbb84..00000000000
--- a/changelogs/fragments/skip-role-task-iterator.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - PlayIterator - do not return tasks from already executed roles so specific strategy plugins do not have to do the filtering of such tasks themselves
diff --git a/changelogs/fragments/user_action_fix.yml b/changelogs/fragments/user_action_fix.yml
deleted file mode 100644
index 64ee997d688..00000000000
--- a/changelogs/fragments/user_action_fix.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - user module now avoids changing ownership of files symlinked in provided home dir skeleton
diff --git a/changelogs/fragments/user_ssh_fix.yml b/changelogs/fragments/user_ssh_fix.yml
deleted file mode 100644
index b2c47d60e3a..00000000000
--- a/changelogs/fragments/user_ssh_fix.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-bugfixes:
- - user action will now require O(force) to overwrite the public part of an ssh key when generating ssh keys, as was already the case for the private part.
-security_fixes:
- - user action won't allow ssh-keygen, chown and chmod to run on existing ssh public key file, avoiding traversal on existing symlinks (CVE-2024-9902).
diff --git a/changelogs/fragments/v2.19.0-initial-commit.yaml b/changelogs/fragments/v2.19.0-initial-commit.yaml
deleted file mode 100644
index 0967ef424bc..00000000000
--- a/changelogs/fragments/v2.19.0-initial-commit.yaml
+++ /dev/null
@@ -1 +0,0 @@
-{}
diff --git a/changelogs/fragments/yum_repository-remove-keepcache.yml b/changelogs/fragments/yum_repository-remove-keepcache.yml
new file mode 100644
index 00000000000..6c6222549c7
--- /dev/null
+++ b/changelogs/fragments/yum_repository-remove-keepcache.yml
@@ -0,0 +1,2 @@
+removed_features:
+ - yum_repository - remove deprecated ``keepcache`` option.
diff --git a/hacking/create-bulk-issues.py b/hacking/create-bulk-issues.py
index d2651415df1..09c79590e22 100755
--- a/hacking/create-bulk-issues.py
+++ b/hacking/create-bulk-issues.py
@@ -35,6 +35,7 @@ class Issue:
body: str
project: str
labels: list[str] | None = None
+ assignee: str | None = None
def create(self) -> str:
cmd = ['gh', 'issue', 'create', '--title', self.title, '--body', self.body, '--project', self.project]
@@ -43,8 +44,18 @@ class Issue:
for label in self.labels:
cmd.extend(('--label', label))
- process = subprocess.run(cmd, capture_output=True, check=True)
- url = process.stdout.decode().strip()
+ if self.assignee:
+ cmd.extend(('--assignee', self.assignee))
+
+ try:
+ process = subprocess.run(cmd, capture_output=True, check=True, text=True)
+ except subprocess.CalledProcessError as ex:
+ print('>>> Note')
+ print(f"You may need to run 'gh auth refresh -s project' if 'gh' reports it cannot find the project {self.project!r} when it exists.")
+ print(f'>>> Standard Output\n{ex.stdout.strip()}\n>>> Standard Error\n{ex.stderr.strip()}\n>>> Exception')
+ raise
+
+ url = process.stdout.strip()
return url
@@ -54,6 +65,7 @@ class Feature:
summary: str
component: str
labels: list[str] | None = None
+ assignee: str | None = None
@staticmethod
def from_dict(data: dict[str, t.Any]) -> Feature:
@@ -61,6 +73,7 @@ class Feature:
summary = data.get('summary')
component = data.get('component')
labels = data.get('labels')
+ assignee = data.get('assignee')
if not isinstance(title, str):
raise RuntimeError(f'`title` is not `str`: {title}')
@@ -71,6 +84,9 @@ class Feature:
if not isinstance(component, str):
raise RuntimeError(f'`component` is not `str`: {component}')
+ if not isinstance(assignee, (str, type(None))):
+ raise RuntimeError(f'`assignee` is not `str`: {assignee}')
+
if not isinstance(labels, list) or not all(isinstance(item, str) for item in labels):
raise RuntimeError(f'`labels` is not `list[str]`: {labels}')
@@ -79,6 +95,7 @@ class Feature:
summary=summary,
component=component,
labels=labels,
+ assignee=assignee,
)
def create_issue(self, project: str) -> Issue:
@@ -102,6 +119,7 @@ Feature Idea
body=body.strip(),
project=project,
labels=self.labels,
+ assignee=self.assignee,
)
@@ -297,7 +315,21 @@ def create_deprecation_parser(subparser) -> None:
def create_feature_parser(subparser) -> None:
- parser: argparse.ArgumentParser = subparser.add_parser('feature')
+ epilog = """
+Example source YAML:
+
+default:
+ component: ansible-test
+ labels:
+ - ansible-test
+ - feature
+ assignee: "@me"
+features:
+ - title: Some title goes here
+ summary: A summary goes here.
+"""
+
+ parser: argparse.ArgumentParser = subparser.add_parser('feature', epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(type=FeatureArgs)
parser.set_defaults(command=feature_command)
diff --git a/hacking/env-setup.fish b/hacking/env-setup.fish
index ee945ec1452..fcb739bf0cd 100644
--- a/hacking/env-setup.fish
+++ b/hacking/env-setup.fish
@@ -3,9 +3,23 @@
# Description: Modifies the environment for running Ansible from a checkout
# Usage: . ./hacking/env-setup [-q]
+# Set PYTHON_BIN
+if not set -q PYTHON_BIN
+ for exe in python3 python
+ if command -v $exe > /dev/null
+ set -gx PYTHON_BIN (command -v $exe)
+ break
+ end
+ end
+ if not set -q PYTHON_BIN
+ echo "No valid Python found"
+ exit 1
+ end
+end
+
# Retrieve the path of the current directory where the script resides
set HACKING_DIR (dirname (status -f))
-set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
+set FULL_PATH ($PYTHON_BIN -c "import os; print(os.path.realpath('$HACKING_DIR'))")
set ANSIBLE_HOME (dirname $FULL_PATH)
# Set quiet flag
@@ -50,20 +64,6 @@ else if not string match -qr $PREFIX_MANPATH'($|:)' $MANPATH
set -gx MANPATH "$PREFIX_MANPATH:$MANPATH"
end
-# Set PYTHON_BIN
-if not set -q PYTHON_BIN
- for exe in python3 python
- if command -v $exe > /dev/null
- set -gx PYTHON_BIN (command -v $exe)
- break
- end
- end
- if not set -q PYTHON_BIN
- echo "No valid Python found"
- exit 1
- end
-end
-
pushd $ANSIBLE_HOME
if test -n "$QUIET"
# Remove any .pyc files found
diff --git a/hacking/test-module.py b/hacking/test-module.py
index a9df1a79b8f..a5cf7862311 100755
--- a/hacking/test-module.py
+++ b/hacking/test-module.py
@@ -43,7 +43,6 @@ from pathlib import Path
from ansible.release import __version__
import ansible.utils.vars as utils_vars
from ansible.parsing.dataloader import DataLoader
-from ansible.parsing.utils.jsonify import jsonify
from ansible.parsing.splitter import parse_kv
from ansible.plugins.loader import init_plugin_loader
from ansible.executor import module_common
@@ -89,6 +88,22 @@ def parse():
return options, args
+def jsonify(result, format=False):
+ """ format JSON output (uncompressed or uncompressed) """
+
+ if result is None:
+ return "{}"
+
+ indent = None
+ if format:
+ indent = 4
+
+ try:
+ return json.dumps(result, sort_keys=True, indent=indent, ensure_ascii=False)
+ except UnicodeDecodeError:
+ return json.dumps(result, sort_keys=True, indent=indent)
+
+
def write_argsfile(argstring, json=False):
""" Write args to a file for old-style module's use. """
argspath = Path("~/.ansible_test_module_arguments").expanduser()
@@ -152,16 +167,20 @@ def boilerplate_module(modfile, args, interpreters, check, destfile):
if check:
complex_args['_ansible_check_mode'] = True
+ modfile = os.path.abspath(modfile)
modname = os.path.basename(modfile)
modname = os.path.splitext(modname)[0]
- (module_data, module_style, shebang) = module_common.modify_module(
- modname,
- modfile,
- complex_args,
- Templar(loader=loader),
+
+ built_module = module_common.modify_module(
+ module_name=modname,
+ module_path=modfile,
+ module_args=complex_args,
+ templar=Templar(loader=loader),
task_vars=task_vars
)
+ module_data, module_style = built_module.b_module_data, built_module.module_style
+
if module_style == 'new' and '_ANSIBALLZ_WRAPPER = True' in to_native(module_data):
module_style = 'ansiballz'
@@ -198,10 +217,11 @@ def ansiballz_setup(modfile, modname, interpreters):
# All the directories in an AnsiBallZ that modules can live
core_dirs = glob.glob(os.path.join(debug_dir, 'ansible/modules'))
+ non_core_dirs = glob.glob(os.path.join(debug_dir, 'ansible/legacy'))
collection_dirs = glob.glob(os.path.join(debug_dir, 'ansible_collections/*/*/plugins/modules'))
# There's only one module in an AnsiBallZ payload so look for the first module and then exit
- for module_dir in core_dirs + collection_dirs:
+ for module_dir in core_dirs + collection_dirs + non_core_dirs:
for dirname, directories, filenames in os.walk(module_dir):
for filename in filenames:
if filename == modname + '.py':
diff --git a/hacking/ticket_stubs/bug_internal_api.md b/hacking/ticket_stubs/bug_internal_api.md
index 89162558ca0..3bb563bc5f5 100644
--- a/hacking/ticket_stubs/bug_internal_api.md
+++ b/hacking/ticket_stubs/bug_internal_api.md
@@ -9,14 +9,14 @@ as such this is not considered a bug unless it causes an issue with Ansible comm
(`ansible`, `ansible-playbook`, `ansible-doc`, etc).
We do support the provided API for use in developing plugins (modules, dynamic inventories, callbacks, strategies, etc),
-but this does not seem to match that case.
+but this does not match that case.
-If you really need a stable API target to use Ansible, consider using ansible-runner:
+If you need a stable API target to use Ansible, consider using ansible-runner:
*
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
+If you or anyone else has any further questions, please let us know by using any of the methods listed on the communication page:
*
diff --git a/hacking/ticket_stubs/collections.md b/hacking/ticket_stubs/collections.md
index 3698ea14bd9..e5358387731 100644
--- a/hacking/ticket_stubs/collections.md
+++ b/hacking/ticket_stubs/collections.md
@@ -8,9 +8,9 @@ However, we recommend looking into providing this functionality through Ansible
* .
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-The mailing list and irc are great ways to ask questions, or post if you don't think this particular issue is resolved.
+The Ansible Forum is the best place to start a discussion about this particular issue.
-See this page for a complete and up to date list of communication channels and their purposes:
+See this page for a complete and up-to-date list of communication channels and their purposes:
*
diff --git a/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md b/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
index 708eedc53d1..710d3ff643b 100644
--- a/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
+++ b/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
@@ -1,11 +1,11 @@
-@{{ paste.handle.here }} it seems to me that you are new to GitHub and
+@{{ paste.handle.here }} It seems to me that you are new to GitHub and
have created this
[PR](https://help.github.com/articles/about-pull-requests/)
accidentally. That's why I'm closing it.
But no worries! Welcome to the Ansible community :)
-Assuming that you wanted to create actual contribution, I think that
+Assuming that you wanted to create an actual contribution, I think that
you may want to learn and read through the following articles I've
gathered for you:
@@ -14,7 +14,7 @@ gathered for you:
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
+If you or anyone else has any further questions, please let us know by using any of the communication methods listed on the page below:
diff --git a/hacking/ticket_stubs/no_thanks.md b/hacking/ticket_stubs/no_thanks.md
index 8c32b6bc4f9..9953ae1b468 100644
--- a/hacking/ticket_stubs/no_thanks.md
+++ b/hacking/ticket_stubs/no_thanks.md
@@ -1,19 +1,19 @@
Hi!
-Thanks very much for your submission to Ansible. It means a lot to us that you've taken time to contribute.
+Thank you very much for your submission to Ansible. It means a lot to us that you've taken the time to contribute.
-Unfortunately, we're not sure if we want this feature in the program, and I don't want this to seem confrontational.
-Our reasons for this are:
+Unfortunately, we're not sure if we want this feature in the program, our reasons are:
* (A) INSERT ITEM HERE
-However, we're absolutely always up for discussion.
+However, we're always up for discussion.
+
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
+If you or anyone else has any further questions, please let us know by using any of the communication methods listed on the page below:
*
-In the future, sometimes starting a discussion on the development list prior to implementing
+In the future, sometimes starting a discussion on the Ansible Forum before implementing
a feature can make getting things included a little easier, but it's not always necessary.
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/no_thanks_minor_changes.md b/hacking/ticket_stubs/no_thanks_minor_changes.md
new file mode 100644
index 00000000000..b4c36cf96fb
--- /dev/null
+++ b/hacking/ticket_stubs/no_thanks_minor_changes.md
@@ -0,0 +1,21 @@
+Hi!
+
+Thank you very much for your submission to Ansible. It means a lot to us that you've taken the time to contribute.
+
+Unfortunately, we're not currently accepting most outside contributions consisting only of minor changes such as:
+
+* Spelling, grammar or typo fixes in comments, code or tests.
+* Type annotations for existing code.
+* Code style changes.
+
+However, we're always up for discussion.
+
+Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
+If you or anyone else has any further questions, please let us know by using any of the communication methods listed on the page below:
+
+*
+
+In the future, sometimes starting a discussion on the Ansible Forum before implementing
+a feature can make getting things included a little easier, but it's not always necessary.
+
+Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/pr_duplicate.md b/hacking/ticket_stubs/pr_duplicate.md
index 080e4e4abf1..7bc88bc273c 100644
--- a/hacking/ticket_stubs/pr_duplicate.md
+++ b/hacking/ticket_stubs/pr_duplicate.md
@@ -1,6 +1,6 @@
Hi!
-Thanks very much for your submission to Ansible. It means a lot to us that you've taken time to contribute.
+Thanks very much for your submission to Ansible. It means a lot to us that you've taken the time to contribute.
It looks like the work from this pull request is a duplicate of the following PR(s):
@@ -8,12 +8,12 @@ It looks like the work from this pull request is a duplicate of the following PR
Based on this, we are going to close this PR in favor of the above as a consolidated location to keep track of the issue.
-However, we're absolutely always up for discussion.
-In the future, sometimes starting a discussion on the development list prior to implementing a feature
+However, we're always up for discussion.
+In the future, sometimes starting a discussion on the Ansible Forum before implementing a feature
can make getting things included a little easier, but it's not always necessary.
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
+If you or anyone else has any further questions, please let us know by using any of the communication methods listed on the page below:
*
diff --git a/hacking/ticket_stubs/pr_merged.md b/hacking/ticket_stubs/pr_merged.md
index 5d354e3586f..604e34a4104 100644
--- a/hacking/ticket_stubs/pr_merged.md
+++ b/hacking/ticket_stubs/pr_merged.md
@@ -6,9 +6,9 @@ For more info on our process see
diff --git a/hacking/ticket_stubs/proposal.md b/hacking/ticket_stubs/proposal.md
index 2d8182f12be..cfbaf7ae022 100644
--- a/hacking/ticket_stubs/proposal.md
+++ b/hacking/ticket_stubs/proposal.md
@@ -6,7 +6,7 @@ If you are still interested in seeing this new feature get into Ansible, please
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-The Forum is the best ways to ask questions, or post if you don't think this particular issue is resolved.
+The Ansible Forum is the best place to start a discussion about this particular issue.
*
diff --git a/hacking/ticket_stubs/question_not_bug.md b/hacking/ticket_stubs/question_not_bug.md
index dab0d2edba1..fa41b24ee60 100644
--- a/hacking/ticket_stubs/question_not_bug.md
+++ b/hacking/ticket_stubs/question_not_bug.md
@@ -2,16 +2,18 @@ Hi!
Thanks very much for your interest in Ansible. It means a lot to us.
-This appears to be a user question, and we'd like to direct these topic to the Ansible Forum.
+This appears to be a user question, and we'd like to direct this topic to the Ansible Forum.
* [Ansible Forum](https://forum.ansible.com)
-See this page for a complete and up to date list of communication channels and their purposes:
+See this page for a complete and up-to-date list of communication channels and their purposes:
*
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-If don't you think this particular issue is resolved, you should still stop by there first, we'd appreciate it.
+If you don't think this particular issue is resolved, you should still stop by there first, we'd appreciate it.
This allows us to keep the issue tracker for bugs, pull requests, RFEs and the like.
-Thank you once again and we look forward to seeing you on the list or IRC. Thanks!
+Thank you once again, and we look forward to seeing you on the Ansible Forum!
+
+Thanks!
diff --git a/hacking/ticket_stubs/resolved.md b/hacking/ticket_stubs/resolved.md
index f040d6d05a4..3e8c396949d 100644
--- a/hacking/ticket_stubs/resolved.md
+++ b/hacking/ticket_stubs/resolved.md
@@ -4,10 +4,10 @@ We have ascertained that the following PR/commits should resolve this question o
<< INSERT SHA/PR LINK HERE >>
-This should be included newer releases starting with << RELEASE/the next [major] release(s) >>.
+This is included in newer releases starting with << RELEASE/the next [major] release(s) >>.
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-The mailing list and irc are great ways to ask questions, or post if you don't think this particular issue is resolved.
+The Ansible Forum is the best place to start a discussion about this particular issue.
See this page for a complete list of communication channels and their purposes:
diff --git a/hacking/ticket_stubs/wider_discussion.md b/hacking/ticket_stubs/wider_discussion.md
index 3ab9073f443..2766e4c6723 100644
--- a/hacking/ticket_stubs/wider_discussion.md
+++ b/hacking/ticket_stubs/wider_discussion.md
@@ -1,6 +1,6 @@
Hi!
-Thanks very much for your submission to Ansible. It means a lot to us.
+Thanks very much for your submission to Ansible. It means a lot to us.
We are interested in this idea and would like to see a wider discussion on it on one of our lists.
Reasons for this include:
@@ -8,10 +8,9 @@ Reasons for this include:
* INSERT REASONS!
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-Can you please post Ansible Forum so we can talk about this idea with the wider group?
+Please post your idea on the Ansible Forum so we can discuss it with the wider group.
* [Ansible Core on the Ansible Forum](https://forum.ansible.com/tag/ansible-core)
-* Matrix: [#devel:ansible.im](https://matrix.to/#/#devel:ansible.im)
For other alternatives, check this page for a more complete list of communication channels and their purposes:
diff --git a/lib/ansible/_internal/__init__.py b/lib/ansible/_internal/__init__.py
new file mode 100644
index 00000000000..2975a528b6a
--- /dev/null
+++ b/lib/ansible/_internal/__init__.py
@@ -0,0 +1,53 @@
+from __future__ import annotations
+
+import importlib
+import typing as t
+
+from ansible.module_utils import _internal
+from ansible.module_utils._internal._json import _profiles
+
+
+def get_controller_serialize_map() -> dict[type, t.Callable]:
+ """
+ Injected into module_utils code to augment serialization maps with controller-only types.
+ This implementation replaces the no-op version in module_utils._internal in controller contexts.
+ """
+ from ansible._internal._templating import _lazy_containers
+ from ansible.parsing.vault import EncryptedString
+
+ return {
+ _lazy_containers._AnsibleLazyTemplateDict: _profiles._JSONSerializationProfile.discard_tags,
+ _lazy_containers._AnsibleLazyTemplateList: _profiles._JSONSerializationProfile.discard_tags,
+ EncryptedString: str, # preserves tags since this is an instance of EncryptedString; if tags should be discarded from str, another entry will handle it
+ }
+
+
+def import_controller_module(module_name: str, /) -> t.Any:
+ """
+ Injected into module_utils code to import and return the specified module.
+ This implementation replaces the no-op version in module_utils._internal in controller contexts.
+ """
+ return importlib.import_module(module_name)
+
+
+_T = t.TypeVar('_T')
+
+
+def experimental(obj: _T) -> _T:
+ """
+ Decorator for experimental types and methods outside the `_internal` package which accept or expose internal types.
+ As with internal APIs, these are subject to change at any time without notice.
+ """
+ return obj
+
+
+def setup() -> None:
+ """No-op function to ensure that side-effect only imports of this module are not flagged/removed as 'unused'."""
+
+
+# DTFIX-FUTURE: this is really fragile- disordered/incorrect imports (among other things) can mess it up. Consider a hosting-env-managed context
+# with an enum with at least Controller/Target/Unknown values, and possibly using lazy-init module shims or some other mechanism to allow controller-side
+# notification/augmentation of this kind of metadata.
+_internal.get_controller_serialize_map = get_controller_serialize_map
+_internal.import_controller_module = import_controller_module
+_internal.is_controller = True
diff --git a/test/integration/targets/rpm_key/defaults/main.yaml b/lib/ansible/_internal/_ansiballz/__init__.py
similarity index 100%
rename from test/integration/targets/rpm_key/defaults/main.yaml
rename to lib/ansible/_internal/_ansiballz/__init__.py
diff --git a/lib/ansible/_internal/_ansiballz/_builder.py b/lib/ansible/_internal/_ansiballz/_builder.py
new file mode 100644
index 00000000000..eff6392904c
--- /dev/null
+++ b/lib/ansible/_internal/_ansiballz/_builder.py
@@ -0,0 +1,101 @@
+from __future__ import annotations
+
+import dataclasses
+import json
+
+import typing as t
+
+from ansible.module_utils._internal._ansiballz import _extensions
+from ansible.module_utils._internal._ansiballz._extensions import _pydevd, _coverage
+from ansible.constants import config
+
+_T = t.TypeVar('_T')
+
+
+class ExtensionManager:
+ """AnsiballZ extension manager."""
+
+ def __init__(
+ self,
+ debugger: _pydevd.Options | None = None,
+ coverage: _coverage.Options | None = None,
+ ) -> None:
+ options = dict(
+ _pydevd=debugger,
+ _coverage=coverage,
+ )
+
+ self._debugger = debugger
+ self._coverage = coverage
+ self._extension_names = tuple(name for name, option in options.items() if option)
+ self._module_names = tuple(f'{_extensions.__name__}.{name}' for name in self._extension_names)
+
+ self.source_mapping: dict[str, str] = {}
+
+ @property
+ def debugger_enabled(self) -> bool:
+ """Returns True if the debugger extension is enabled, otherwise False."""
+ return bool(self._debugger)
+
+ @property
+ def extension_names(self) -> tuple[str, ...]:
+ """Names of extensions to include in the AnsiballZ payload."""
+ return self._extension_names
+
+ @property
+ def module_names(self) -> tuple[str, ...]:
+ """Python module names of extensions to include in the AnsiballZ payload."""
+ return self._module_names
+
+ def get_extensions(self) -> dict[str, dict[str, object]]:
+ """Return the configured extensions and their options."""
+ extension_options: dict[str, t.Any] = {}
+
+ if self._debugger:
+ extension_options['_pydevd'] = dataclasses.replace(
+ self._debugger,
+ source_mapping=self._get_source_mapping(),
+ )
+
+ if self._coverage:
+ extension_options['_coverage'] = self._coverage
+
+ extensions = {extension: dataclasses.asdict(options) for extension, options in extension_options.items()}
+
+ return extensions
+
+ def _get_source_mapping(self) -> dict[str, str]:
+ """Get the source mapping, adjusting the source root as needed."""
+ if self._debugger.source_mapping:
+ source_mapping = {self._translate_path(key): value for key, value in self.source_mapping.items()}
+ else:
+ source_mapping = self.source_mapping
+
+ return source_mapping
+
+ def _translate_path(self, path: str) -> str:
+ """Translate a local path to a foreign path."""
+ for replace, match in self._debugger.source_mapping.items():
+ if path.startswith(match):
+ return replace + path[len(match) :]
+
+ return path
+
+ @classmethod
+ def create(cls, task_vars: dict[str, object]) -> t.Self:
+ """Create an instance using the provided task vars."""
+ return cls(
+ debugger=cls._get_options('_ANSIBALLZ_DEBUGGER_CONFIG', _pydevd.Options, task_vars),
+ coverage=cls._get_options('_ANSIBALLZ_COVERAGE_CONFIG', _coverage.Options, task_vars),
+ )
+
+ @classmethod
+ def _get_options(cls, name: str, config_type: type[_T], task_vars: dict[str, object]) -> _T | None:
+ """Parse configuration from the named environment variable as the specified type, or None if not configured."""
+ if (value := config.get_config_value(name, variables=task_vars)) is None:
+ return None
+
+ data = json.loads(value) if isinstance(value, str) else value
+ options = config_type(**data)
+
+ return options
diff --git a/lib/ansible/_internal/_ansiballz/_wrapper.py b/lib/ansible/_internal/_ansiballz/_wrapper.py
new file mode 100644
index 00000000000..4d9d9b4f4bf
--- /dev/null
+++ b/lib/ansible/_internal/_ansiballz/_wrapper.py
@@ -0,0 +1,262 @@
+# shebang placeholder
+
+from __future__ import annotations
+
+import datetime
+
+# For test-module.py script to tell this is a ANSIBALLZ_WRAPPER
+_ANSIBALLZ_WRAPPER = True
+
+# This code is part of Ansible, but is an independent component.
+# The code in this particular templatable string, and this templatable string
+# only, is BSD licensed. Modules which end up using this snippet, which is
+# dynamically combined together by Ansible still belong to the author of the
+# module, and they may assign their own license to the complete work.
+#
+# Copyright (c), James Cammarata, 2016
+# Copyright (c), Toshio Kuratomi, 2016
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def _ansiballz_main(
+ zip_data: str,
+ ansible_module: str,
+ module_fqn: str,
+ params: str,
+ profile: str,
+ date_time: datetime.datetime,
+ extensions: dict[str, dict[str, object]],
+ rlimit_nofile: int,
+) -> None:
+ import os
+ import os.path
+
+ # Access to the working directory is required by Python when using pipelining, as well as for the coverage module.
+ # Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
+ try:
+ os.getcwd()
+ except OSError:
+ try:
+ os.chdir(os.path.expanduser('~'))
+ except OSError:
+ os.chdir('/')
+
+ if rlimit_nofile:
+ import resource
+
+ existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ # adjust soft limit subject to existing hard limit
+ requested_soft = min(existing_hard, rlimit_nofile)
+
+ if requested_soft != existing_soft:
+ try:
+ resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
+ except ValueError:
+ # some platforms (eg macOS) lie about their hard limit
+ pass
+
+ import sys
+ import __main__
+
+ # For some distros and python versions we pick up this script in the temporary
+ # directory. This leads to problems when the ansible module masks a python
+ # library that another import needs. We have not figured out what about the
+ # specific distros and python versions causes this to behave differently.
+ #
+ # Tested distros:
+ # Fedora23 with python3.4 Works
+ # Ubuntu15.10 with python2.7 Works
+ # Ubuntu15.10 with python3.4 Fails without this
+ # Ubuntu16.04.1 with python3.5 Fails without this
+ # To test on another platform:
+ # * use the copy module (since this shadows the stdlib copy module)
+ # * Turn off pipelining
+ # * Make sure that the destination file does not exist
+ # * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
+ # This will traceback in shutil. Looking at the complete traceback will show
+ # that shutil is importing copy which finds the ansible module instead of the
+ # stdlib module
+ scriptdir = None
+ try:
+ scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
+ except (AttributeError, OSError):
+ # Some platforms don't set __file__ when reading from stdin
+ # OSX raises OSError if using abspath() in a directory we don't have
+ # permission to read (realpath calls abspath)
+ pass
+
+ # Strip cwd from sys.path to avoid potential permissions issues
+ excludes = {'', '.', scriptdir}
+ sys.path = [p for p in sys.path if p not in excludes]
+
+ import base64
+ import shutil
+ import tempfile
+ import zipfile
+
+ def invoke_module(modlib_path: str, json_params: bytes) -> None:
+ # When installed via setuptools (including python setup.py install),
+ # ansible may be installed with an easy-install.pth file. That file
+ # may load the system-wide install of ansible rather than the one in
+ # the module. sitecustomize is the only way to override that setting.
+ z = zipfile.ZipFile(modlib_path, mode='a')
+
+ # py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
+ sitecustomize = u'import sys\\nsys.path.insert(0,"%s")\\n' % modlib_path
+ sitecustomize = sitecustomize.encode('utf-8')
+ # Use a ZipInfo to work around zipfile limitation on hosts with
+ # clocks set to a pre-1980 year (for instance, Raspberry Pi)
+ zinfo = zipfile.ZipInfo()
+ zinfo.filename = 'sitecustomize.py'
+ zinfo.date_time = date_time.utctimetuple()[:6]
+ z.writestr(zinfo, sitecustomize)
+ z.close()
+
+ # Put the zipped up module_utils we got from the controller first in the python path so that we
+ # can monkeypatch the right basic
+ sys.path.insert(0, modlib_path)
+
+ from ansible.module_utils._internal._ansiballz import _loader
+
+ _loader.run_module(
+ json_params=json_params,
+ profile=profile,
+ module_fqn=module_fqn,
+ modlib_path=modlib_path,
+ extensions=extensions,
+ )
+
+ def debug(command: str, modlib_path: str, json_params: bytes) -> None:
+ # The code here normally doesn't run. It's only used for debugging on the
+ # remote machine.
+ #
+ # The subcommands in this function make it easier to debug ansiballz
+ # modules. Here's the basic steps:
+ #
+ # Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
+ # to save the module file remotely::
+ # $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
+ #
+ # Part of the verbose output will tell you where on the remote machine the
+ # module was written to::
+ # [...]
+ # SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
+ # PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
+ # ControlPath=/home/badger/.ansible/cp/ansible-ssh-%h-%p-%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
+ # LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
+ # [...]
+ #
+ # Login to the remote machine and run the module file via from the previous
+ # step with the explode subcommand to extract the module payload into
+ # source files::
+ # $ ssh host1
+ # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
+ # Module expanded into:
+ # /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
+ #
+ # You can now edit the source files to instrument the code or experiment with
+ # different parameter values. When you're ready to run the code you've modified
+ # (instead of the code from the actual zipped module), use the execute subcommand like this::
+ # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
+
+ # Okay to use __file__ here because we're running from a kept file
+ basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
+ args_path = os.path.join(basedir, 'args')
+
+ if command == 'explode':
+ # transform the ZIPDATA into an exploded directory of code and then
+ # print the path to the code. This is an easy way for people to look
+ # at the code on the remote machine for debugging it in that
+ # environment
+ z = zipfile.ZipFile(modlib_path)
+ for filename in z.namelist():
+ if filename.startswith('/'):
+ raise Exception('Something wrong with this module zip file: should not contain absolute paths')
+
+ dest_filename = os.path.join(basedir, filename)
+ if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
+ os.makedirs(dest_filename)
+ else:
+ directory = os.path.dirname(dest_filename)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ with open(dest_filename, 'wb') as writer:
+ writer.write(z.read(filename))
+
+ # write the args file
+ with open(args_path, 'wb') as writer:
+ writer.write(json_params)
+
+ print('Module expanded into:')
+ print(basedir)
+
+ elif command == 'execute':
+ # Execute the exploded code instead of executing the module from the
+ # embedded ZIPDATA. This allows people to easily run their modified
+ # code on the remote machine to see how changes will affect it.
+
+ # Set pythonpath to the debug dir
+ sys.path.insert(0, basedir)
+
+ # read in the args file which the user may have modified
+ with open(args_path, 'rb') as reader:
+ json_params = reader.read()
+
+ from ansible.module_utils._internal._ansiballz import _loader
+
+ _loader.run_module(
+ json_params=json_params,
+ profile=profile,
+ module_fqn=module_fqn,
+ modlib_path=modlib_path,
+ extensions=extensions,
+ )
+
+ else:
+ print(f'FATAL: Unknown debug command {command!r}. Doing nothing.')
+
+ #
+ # See comments in the debug() method for information on debugging
+ #
+
+ encoded_params = params.encode()
+
+ # There's a race condition with the controller removing the
+ # remote_tmpdir and this module executing under async. So we cannot
+ # store this in remote_tmpdir (use system tempdir instead)
+ # Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
+ # (this helps ansible-test produce coverage stats)
+ # IMPORTANT: The real path must be used here to ensure a remote debugger such as PyCharm (using pydevd) can resolve paths correctly.
+ temp_path = os.path.realpath(tempfile.mkdtemp(prefix='ansible_' + ansible_module + '_payload_'))
+
+ try:
+ zipped_mod = os.path.join(temp_path, 'ansible_' + ansible_module + '_payload.zip')
+
+ with open(zipped_mod, 'wb') as modlib:
+ modlib.write(base64.b64decode(zip_data))
+
+ if len(sys.argv) == 2:
+ debug(sys.argv[1], zipped_mod, encoded_params)
+ else:
+ invoke_module(zipped_mod, encoded_params)
+ finally:
+ shutil.rmtree(temp_path, ignore_errors=True)
diff --git a/lib/ansible/_internal/_collection_proxy.py b/lib/ansible/_internal/_collection_proxy.py
new file mode 100644
index 00000000000..b14dcf386fa
--- /dev/null
+++ b/lib/ansible/_internal/_collection_proxy.py
@@ -0,0 +1,47 @@
+from __future__ import annotations as _annotations
+
+import collections.abc as _c
+import typing as _t
+
+_T_co = _t.TypeVar('_T_co', covariant=True)
+
+
+class SequenceProxy(_c.Sequence[_T_co]):
+ """A read-only sequence proxy."""
+
+ # DTFIX5: needs unit test coverage
+
+ __slots__ = ('__value',)
+
+ def __init__(self, value: _c.Sequence[_T_co]) -> None:
+ self.__value = value
+
+ @_t.overload
+ def __getitem__(self, index: int) -> _T_co: ...
+
+ @_t.overload
+ def __getitem__(self, index: slice) -> _c.Sequence[_T_co]: ...
+
+ def __getitem__(self, index: int | slice) -> _T_co | _c.Sequence[_T_co]:
+ if isinstance(index, slice):
+ return self.__class__(self.__value[index])
+
+ return self.__value[index]
+
+ def __len__(self) -> int:
+ return len(self.__value)
+
+ def __contains__(self, item: object) -> bool:
+ return item in self.__value
+
+ def __iter__(self) -> _t.Iterator[_T_co]:
+ yield from self.__value
+
+ def __reversed__(self) -> _c.Iterator[_T_co]:
+ return reversed(self.__value)
+
+ def index(self, *args) -> int:
+ return self.__value.index(*args)
+
+ def count(self, value: object) -> int:
+ return self.__value.count(value)
diff --git a/lib/ansible/_internal/_datatag/__init__.py b/lib/ansible/_internal/_datatag/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/_internal/_datatag/_tags.py b/lib/ansible/_internal/_datatag/_tags.py
new file mode 100644
index 00000000000..e8e39f28328
--- /dev/null
+++ b/lib/ansible/_internal/_datatag/_tags.py
@@ -0,0 +1,130 @@
+from __future__ import annotations
+
+import dataclasses
+import os
+import types
+import typing as t
+
+from ansible.module_utils._internal._datatag import _tag_dataclass_kwargs, AnsibleDatatagBase, AnsibleSingletonTagBase
+
+
+@dataclasses.dataclass(**_tag_dataclass_kwargs)
+class Origin(AnsibleDatatagBase):
+ """
+ A tag that stores origin metadata for a tagged value, intended for forensic/diagnostic use.
+ Origin metadata should not be used to make runtime decisions, as it is not guaranteed to be present or accurate.
+ Setting both `path` and `line_num` can result in diagnostic display of referenced file contents.
+ Either `path` or `description` must be present.
+ """
+
+ path: str | None = None
+ """The path from which the tagged content originated."""
+ description: str | None = None
+ """A description of the origin, for display to users."""
+ line_num: int | None = None
+ """An optional line number, starting at 1."""
+ col_num: int | None = None
+ """An optional column number, starting at 1."""
+
+ UNKNOWN: t.ClassVar[t.Self]
+
+ @classmethod
+ def get_or_create_tag(cls, value: t.Any, path: str | os.PathLike | None) -> Origin:
+ """Return the tag from the given value, creating a tag from the provided path if no tag was found."""
+ if not (origin := cls.get_tag(value)):
+ if path:
+ origin = Origin(path=str(path)) # convert tagged strings and path-like values to a native str
+ else:
+ origin = Origin.UNKNOWN
+
+ return origin
+
+ def replace(
+ self,
+ path: str | types.EllipsisType = ...,
+ description: str | types.EllipsisType = ...,
+ line_num: int | None | types.EllipsisType = ...,
+ col_num: int | None | types.EllipsisType = ...,
+ ) -> t.Self:
+ """Return a new origin based on an existing one, with the given fields replaced."""
+ return dataclasses.replace(
+ self,
+ **{
+ key: value
+ for key, value in dict(
+ path=path,
+ description=description,
+ line_num=line_num,
+ col_num=col_num,
+ ).items()
+ if value is not ...
+ }, # type: ignore[arg-type]
+ )
+
+ def _post_validate(self) -> None:
+ if self.path:
+ if not self.path.startswith('/'):
+ raise RuntimeError('The `src` field must be an absolute path.')
+ elif not self.description:
+ raise RuntimeError('The `src` or `description` field must be specified.')
+
+ def __str__(self) -> str:
+ """Renders the origin in the form of path:line_num:col_num, omitting missing/invalid elements from the right."""
+ if self.path:
+ value = self.path
+ else:
+ value = self.description
+
+ if self.line_num and self.line_num > 0:
+ value += f':{self.line_num}'
+
+ if self.col_num and self.col_num > 0:
+ value += f':{self.col_num}'
+
+ if self.path and self.description:
+ value += f' ({self.description})'
+
+ return value
+
+
+Origin.UNKNOWN = Origin(description='')
+
+
+@dataclasses.dataclass(**_tag_dataclass_kwargs)
+class VaultedValue(AnsibleDatatagBase):
+ """Tag for vault-encrypted strings that carries the original ciphertext for round-tripping."""
+
+ ciphertext: str
+
+ def _get_tag_to_propagate(self, src: t.Any, value: object, *, value_type: t.Optional[type] = None) -> t.Self | None:
+ # Since VaultedValue stores the encrypted representation of the value on which it is tagged,
+ # it is incorrect to propagate the tag to a value which is not equal to the original.
+ # If the tag were copied to another value and subsequently serialized as the original encrypted value,
+ # the result would then differ from the value on which the tag was applied.
+
+ # Comparisons which can trigger an exception are indicative of a bug and should not be handled here.
+ # For example:
+ # * When `src` is an undecryptable `EncryptedString` -- it is not valid to apply this tag to that type.
+ # * When `value` is a `Marker` -- this requires a templating, but vaulted values do not support templating.
+
+ if src == value: # assume the tag was correctly applied to src
+ return self # same plaintext value, tag propagation with same ciphertext is safe
+
+ return self.get_tag(value) # different value, preserve the existing tag, if any
+
+
+@dataclasses.dataclass(**_tag_dataclass_kwargs)
+class TrustedAsTemplate(AnsibleSingletonTagBase):
+ """
+ Indicates the tagged string is trusted to parse and render as a template.
+ Do *NOT* apply this tag to data from untrusted sources, as this would allow code injection during templating.
+ """
+
+
+@dataclasses.dataclass(**_tag_dataclass_kwargs)
+class SourceWasEncrypted(AnsibleSingletonTagBase):
+ """
+ For internal use only.
+ Indicates the tagged value was sourced from an encrypted file.
+ Currently applied only by DataLoader.get_text_file_contents() and by extension DataLoader.load_from_file().
+ """
diff --git a/lib/ansible/_internal/_datatag/_utils.py b/lib/ansible/_internal/_datatag/_utils.py
new file mode 100644
index 00000000000..bf57ae29ac3
--- /dev/null
+++ b/lib/ansible/_internal/_datatag/_utils.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
+
+
+def str_problematic_strip(value: str) -> str:
+ """
+ Return a copy of `value` with leading and trailing whitespace removed.
+ Used where `str.strip` is needed, but tags must be preserved *AND* the stripping behavior likely shouldn't exist.
+ If the stripping behavior is non-problematic, use `AnsibleTagHelper.tag_copy` around `str.strip` instead.
+ """
+ if (stripped_value := value.strip()) == value:
+ return value
+
+ # FUTURE: consider deprecating some/all usages of this method; they generally imply a code smell or pattern we shouldn't be supporting
+
+ stripped_value = AnsibleTagHelper.tag_copy(value, stripped_value)
+
+ return stripped_value
diff --git a/lib/ansible/_internal/_datatag/_wrappers.py b/lib/ansible/_internal/_datatag/_wrappers.py
new file mode 100644
index 00000000000..51cb4d54635
--- /dev/null
+++ b/lib/ansible/_internal/_datatag/_wrappers.py
@@ -0,0 +1,33 @@
+from __future__ import annotations
+
+import io
+import typing as _t
+
+from .._wrapt import ObjectProxy
+from ...module_utils._internal import _datatag
+
+
+class TaggedStreamWrapper(ObjectProxy):
+ """
+ Janky proxy around IOBase to allow streams to carry tags and support basic interrogation by the tagging API.
+ Most tagging operations will have undefined behavior for this type.
+ """
+
+ _self__ansible_tags_mapping: _datatag._AnsibleTagsMapping
+
+ def __init__(self, stream: io.IOBase, tags: _datatag.AnsibleDatatagBase | _t.Iterable[_datatag.AnsibleDatatagBase]) -> None:
+ super().__init__(stream)
+
+ tag_list: list[_datatag.AnsibleDatatagBase]
+
+ # noinspection PyProtectedMember
+ if type(tags) in _datatag._known_tag_types:
+ tag_list = [tags] # type: ignore[list-item]
+ else:
+ tag_list = list(tags) # type: ignore[arg-type]
+
+ self._self__ansible_tags_mapping = _datatag._AnsibleTagsMapping((type(tag), tag) for tag in tag_list)
+
+ @property
+ def _ansible_tags_mapping(self) -> _datatag._AnsibleTagsMapping:
+ return self._self__ansible_tags_mapping
diff --git a/lib/ansible/_internal/_errors/__init__.py b/lib/ansible/_internal/_errors/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/_internal/_errors/_alarm_timeout.py b/lib/ansible/_internal/_errors/_alarm_timeout.py
new file mode 100644
index 00000000000..bad494b60dd
--- /dev/null
+++ b/lib/ansible/_internal/_errors/_alarm_timeout.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+import contextlib
+import signal
+import types
+import typing as _t
+
+from ansible.module_utils import datatag
+
+
+class AnsibleTimeoutError(BaseException):
+ """A general purpose timeout."""
+
+ _MAX_TIMEOUT = 100_000_000
+ """
+ The maximum supported timeout value.
+ This value comes from BSD's alarm limit, which is due to that function using setitimer.
+ """
+
+ def __init__(self, timeout: int) -> None:
+ self.timeout = timeout
+
+ super().__init__(f"Timed out after {timeout} second(s).")
+
+ @classmethod
+ @contextlib.contextmanager
+ def alarm_timeout(cls, timeout: int | None) -> _t.Iterator[None]:
+ """
+ Context for running code under an optional timeout.
+ Raises an instance of this class if the timeout occurs.
+
+ New usages of this timeout mechanism are discouraged.
+ """
+ if timeout is not None:
+ if not isinstance(timeout, int):
+ raise TypeError(f"Timeout requires 'int' argument, not {datatag.native_type_name(timeout)!r}.")
+
+ if timeout < 0 or timeout > cls._MAX_TIMEOUT:
+ # On BSD based systems, alarm is implemented using setitimer.
+ # If out-of-bounds values are passed to alarm, they will return -1, which would be interpreted as an existing timer being set.
+ # To avoid that, bounds checking is performed in advance.
+ raise ValueError(f'Timeout {timeout} is invalid, it must be between 0 and {cls._MAX_TIMEOUT}.')
+
+ if not timeout:
+ yield # execute the context manager's body
+ return # no timeout to deal with, exit immediately
+
+ def on_alarm(_signal: int, _frame: types.FrameType) -> None:
+ raise cls(timeout)
+
+ if signal.signal(signal.SIGALRM, on_alarm):
+ raise RuntimeError("An existing alarm handler was present.")
+
+ try:
+ try:
+ if signal.alarm(timeout):
+ raise RuntimeError("An existing alarm was set.")
+
+ yield # execute the context manager's body
+ finally:
+ # Disable the alarm.
+ # If the alarm fires inside this finally block, the alarm is still disabled.
+ # This guarantees the cleanup code in the outer finally block runs without risk of encountering the `TaskTimeoutError` from the alarm.
+ signal.alarm(0)
+ finally:
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
diff --git a/lib/ansible/_internal/_errors/_captured.py b/lib/ansible/_internal/_errors/_captured.py
new file mode 100644
index 00000000000..2435ebcd4b1
--- /dev/null
+++ b/lib/ansible/_internal/_errors/_captured.py
@@ -0,0 +1,123 @@
+from __future__ import annotations
+
+import collections.abc as _c
+import dataclasses
+import typing as t
+
+from ansible._internal._errors import _error_utils
+from ansible.errors import AnsibleRuntimeError
+from ansible.module_utils._internal import _messages
+
+
+class AnsibleCapturedError(AnsibleRuntimeError):
+ """An exception representing error detail captured in another context where the error detail must be serialized to be preserved."""
+
+ context: t.ClassVar[str]
+
+ def __init__(
+ self,
+ *,
+ obj: t.Any = None,
+ event: _messages.Event,
+ ) -> None:
+ super().__init__(
+ obj=obj,
+ )
+
+ self._event = event
+
+
+class AnsibleResultCapturedError(AnsibleCapturedError, _error_utils.ContributesToTaskResult):
+ """
+ An exception representing error detail captured in a foreign context where an action/module result dictionary is involved.
+
+ This exception provides a result dictionary via the ContributesToTaskResult mixin.
+ """
+
+ def __init__(self, event: _messages.Event, result: dict[str, t.Any]) -> None:
+ super().__init__(event=event)
+
+ self._result = result
+
+ @property
+ def result_contribution(self) -> _c.Mapping[str, object]:
+ return self._result
+
+ @classmethod
+ def maybe_raise_on_result(cls, result: dict[str, t.Any]) -> None:
+ """Normalize the result and raise an exception if the result indicated failure."""
+ if error_summary := cls.normalize_result_exception(result):
+ raise error_summary.error_type(error_summary.event, result)
+
+ @classmethod
+ def normalize_result_exception(cls, result: dict[str, t.Any]) -> CapturedErrorSummary | None:
+ """
+ Normalize the result `exception`, if any, to be a `CapturedErrorSummary` instance.
+ If a new `CapturedErrorSummary` was created, the `error_type` will be `cls`.
+ The `exception` key will be removed if falsey.
+ A `CapturedErrorSummary` instance will be returned if `failed` is truthy.
+ """
+ if type(cls) is AnsibleResultCapturedError: # pylint: disable=unidiomatic-typecheck
+ raise TypeError('The normalize_result_exception method cannot be called on the AnsibleCapturedError base type, use a derived type.')
+
+ if not isinstance(result, dict):
+ raise TypeError(f'Malformed result. Received {type(result)} instead of {dict}.')
+
+ failed = result.get('failed') # DTFIX-FUTURE: warn if failed is present and not a bool, or exception is present without failed being True
+ exception = result.pop('exception', None)
+
+ if not failed and not exception:
+ return None
+
+ if isinstance(exception, CapturedErrorSummary):
+ error_summary = exception
+ elif isinstance(exception, _messages.ErrorSummary):
+ error_summary = CapturedErrorSummary(
+ event=exception.event,
+ error_type=cls,
+ )
+ else:
+ # translate non-ErrorDetail errors
+ error_summary = CapturedErrorSummary(
+ event=_messages.Event(
+ msg=str(result.get('msg', 'Unknown error.')),
+ formatted_traceback=cls._normalize_traceback(exception),
+ ),
+ error_type=cls,
+ )
+
+ result.update(exception=error_summary)
+
+ return error_summary if failed else None # even though error detail was normalized, only return it if the result indicated failure
+
+ @classmethod
+ def _normalize_traceback(cls, value: object | None) -> str | None:
+ """Normalize the provided traceback value, returning None if it is falsey."""
+ if not value:
+ return None
+
+ value = str(value).rstrip()
+
+ if not value:
+ return None
+
+ return value + '\n'
+
+
+class AnsibleActionCapturedError(AnsibleResultCapturedError):
+ """An exception representing error detail sourced directly by an action in its result dictionary."""
+
+ _default_message = 'Action failed.'
+ context = 'action'
+
+
+class AnsibleModuleCapturedError(AnsibleResultCapturedError):
+ """An exception representing error detail captured in a module context and returned from an action's result dictionary."""
+
+ _default_message = 'Module failed.'
+ context = 'target'
+
+
+@dataclasses.dataclass(**_messages._dataclass_kwargs)
+class CapturedErrorSummary(_messages.ErrorSummary):
+ error_type: type[AnsibleResultCapturedError] | None = None
diff --git a/lib/ansible/_internal/_errors/_error_factory.py b/lib/ansible/_internal/_errors/_error_factory.py
new file mode 100644
index 00000000000..7e48957e525
--- /dev/null
+++ b/lib/ansible/_internal/_errors/_error_factory.py
@@ -0,0 +1,89 @@
+from __future__ import annotations as _annotations
+
+from ansible.module_utils._internal import _errors, _messages
+
+
+class ControllerEventFactory(_errors.EventFactory):
+ """Factory for creating `Event` instances from `BaseException` instances on the controller."""
+
+ def _get_msg(self, exception: BaseException) -> str | None:
+ from ansible.errors import AnsibleError
+
+ if not isinstance(exception, AnsibleError):
+ return super()._get_msg(exception)
+
+ return exception._original_message.strip()
+
+ def _get_formatted_source_context(self, exception: BaseException) -> str | None:
+ from ansible.errors import AnsibleError
+
+ if not isinstance(exception, AnsibleError):
+ return super()._get_formatted_source_context(exception)
+
+ return exception._formatted_source_context
+
+ def _get_help_text(self, exception: BaseException) -> str | None:
+ from ansible.errors import AnsibleError
+
+ if not isinstance(exception, AnsibleError):
+ return super()._get_help_text(exception)
+
+ return exception._help_text
+
+ def _get_chain(self, exception: BaseException) -> _messages.EventChain | None:
+ from ansible._internal._errors import _captured # avoid circular import due to AnsibleError import
+
+ if isinstance(exception, _captured.AnsibleCapturedError):
+ # a captured error provides its own cause event, it never has a normal __cause__
+ return _messages.EventChain(
+ msg_reason=_errors.MSG_REASON_DIRECT_CAUSE,
+ traceback_reason=f'The above {exception.context} exception was the direct cause of the following controller exception:',
+ event=exception._event,
+ )
+
+ return super()._get_chain(exception)
+
+ def _follow_cause(self, exception: BaseException) -> bool:
+ from ansible.errors import AnsibleError
+
+ return not isinstance(exception, AnsibleError) or exception._include_cause_message
+
+ def _get_cause(self, exception: BaseException) -> BaseException | None:
+ # deprecated: description='remove support for orig_exc (deprecated in 2.23)' core_version='2.27'
+
+ cause = super()._get_cause(exception)
+
+ from ansible.errors import AnsibleError
+
+ if not isinstance(exception, AnsibleError):
+ return cause
+
+ try:
+ from ansible.utils.display import _display
+ except Exception: # pylint: disable=broad-except # if config is broken, this can raise things other than ImportError
+ _display = None
+
+ if cause:
+ if exception.orig_exc and exception.orig_exc is not cause and _display:
+ _display.warning(
+ msg=f"The `orig_exc` argument to `{type(exception).__name__}` was given, but differed from the cause given by `raise ... from`.",
+ )
+
+ return cause
+
+ if exception.orig_exc:
+ if _display:
+ # encourage the use of `raise ... from` before deprecating `orig_exc`
+ _display.warning(
+ msg=f"The `orig_exc` argument to `{type(exception).__name__}` was given without using `raise ... from orig_exc`.",
+ )
+
+ return exception.orig_exc
+
+ return None
+
+ def _get_events(self, exception: BaseException) -> tuple[_messages.Event, ...] | None:
+ if isinstance(exception, BaseExceptionGroup):
+ return tuple(self._convert_exception(ex) for ex in exception.exceptions)
+
+ return None
diff --git a/lib/ansible/_internal/_errors/_error_utils.py b/lib/ansible/_internal/_errors/_error_utils.py
new file mode 100644
index 00000000000..ee8c4b2c68c
--- /dev/null
+++ b/lib/ansible/_internal/_errors/_error_utils.py
@@ -0,0 +1,240 @@
+from __future__ import annotations
+
+import abc
+import collections.abc as _c
+import dataclasses
+import itertools
+import pathlib
+import textwrap
+import typing as t
+
+from ansible._internal._datatag._tags import Origin
+from ansible._internal._errors import _error_factory
+from ansible.module_utils._internal import _ambient_context, _event_utils, _messages, _traceback
+
+
+class ContributesToTaskResult(metaclass=abc.ABCMeta):
+ """Exceptions may include this mixin to contribute task result dictionary data directly to the final result."""
+
+ @property
+ @abc.abstractmethod
+ def result_contribution(self) -> _c.Mapping[str, object]:
+ """Mapping of results to apply to the task result."""
+
+ @property
+ def omit_exception_key(self) -> bool:
+ """Non-error exceptions (e.g., `AnsibleActionSkip`) must return `True` to ensure omission of the `exception` key."""
+ return False
+
+ @property
+ def omit_failed_key(self) -> bool:
+ """Exceptions representing non-failure scenarios (e.g., `skipped`, `unreachable`) must return `True` to ensure omisson of the `failed` key."""
+ return False
+
+
+class RedactAnnotatedSourceContext(_ambient_context.AmbientContextBase):
+ """When active, this context will redact annotated source lines, showing only the origin."""
+
+
+@dataclasses.dataclass(kw_only=True, frozen=True)
+class SourceContext:
+ origin: Origin
+ annotated_source_lines: list[str]
+ target_line: str | None
+
+ def __str__(self) -> str:
+ msg_lines = [f'Origin: {self.origin}']
+
+ if self.annotated_source_lines:
+ msg_lines.append('')
+ msg_lines.extend(self.annotated_source_lines)
+
+ return '\n'.join(msg_lines)
+
+ @classmethod
+ def from_value(cls, value: t.Any) -> SourceContext | None:
+ """Attempt to retrieve source and render a contextual indicator from the value's origin (if any)."""
+ if value is None:
+ return None
+
+ if isinstance(value, Origin):
+ origin = value
+ value = None
+ else:
+ origin = Origin.get_tag(value)
+
+ if RedactAnnotatedSourceContext.current(optional=True):
+ return cls.error('content redacted')
+
+ if origin and origin.path:
+ return cls.from_origin(origin)
+
+ if value is None:
+ truncated_value = None
+ annotated_source_lines = []
+ else:
+ # DTFIX-FUTURE: cleanup/share width
+ try:
+ value = str(value)
+ except Exception as ex:
+ value = f'<< context unavailable: {ex} >>'
+
+ truncated_value = textwrap.shorten(value, width=120)
+ annotated_source_lines = [truncated_value]
+
+ return SourceContext(
+ origin=origin or Origin.UNKNOWN,
+ annotated_source_lines=annotated_source_lines,
+ target_line=truncated_value,
+ )
+
+ @staticmethod
+ def error(message: str | None, origin: Origin | None = None) -> SourceContext:
+ return SourceContext(
+ origin=origin,
+ annotated_source_lines=[f'(source not shown: {message})'] if message else [],
+ target_line=None,
+ )
+
+ @classmethod
+ def from_origin(cls, origin: Origin) -> SourceContext:
+ """Attempt to retrieve source and render a contextual indicator of an error location."""
+ from ansible.parsing.vault import is_encrypted # avoid circular import
+
+ # DTFIX-FUTURE: support referencing the column after the end of the target line, so we can indicate where a missing character (quote) needs to be added
+ # this is also useful for cases like end-of-stream reported by the YAML parser
+
+ # DTFIX-FUTURE: Implement line wrapping and match annotated line width to the terminal display width.
+
+ context_line_count: t.Final = 2
+ max_annotated_line_width: t.Final = 120
+ truncation_marker: t.Final = '...'
+
+ target_line_num = origin.line_num
+
+ if RedactAnnotatedSourceContext.current(optional=True):
+ return cls.error('content redacted', origin)
+
+ if not target_line_num or target_line_num < 1:
+ return cls.error(None, origin) # message omitted since lack of line number is obvious from pos
+
+ start_line_idx = max(0, (target_line_num - 1) - context_line_count) # if near start of file
+ target_col_num = origin.col_num
+
+ try:
+ with pathlib.Path(origin.path).open() as src:
+ first_line = src.readline()
+ lines = list(itertools.islice(itertools.chain((first_line,), src), start_line_idx, target_line_num))
+ except Exception as ex:
+ return cls.error(type(ex).__name__, origin)
+
+ if is_encrypted(first_line):
+ return cls.error('content encrypted', origin)
+
+ if len(lines) != target_line_num - start_line_idx:
+ return cls.error('file truncated', origin)
+
+ annotated_source_lines = []
+
+ line_label_width = len(str(target_line_num))
+ max_src_line_len = max_annotated_line_width - line_label_width - 1
+
+ usable_line_len = max_src_line_len
+
+ for line_num, line in enumerate(lines, start_line_idx + 1):
+ line = line.rstrip('\n') # universal newline default mode on `open` ensures we'll never see anything but \n
+ line = line.replace('\t', ' ') # mixed tab/space handling is intentionally disabled since we're both format and display config agnostic
+
+ if len(line) > max_src_line_len:
+ line = line[: max_src_line_len - len(truncation_marker)] + truncation_marker
+ usable_line_len = max_src_line_len - len(truncation_marker)
+
+ annotated_source_lines.append(f'{str(line_num).rjust(line_label_width)}{" " if line else ""}{line}')
+
+ if target_col_num and usable_line_len >= target_col_num >= 1:
+ column_marker = f'column {target_col_num}'
+
+ target_col_idx = target_col_num - 1
+
+ if target_col_idx + 2 + len(column_marker) > max_src_line_len:
+ column_marker = f'{" " * (target_col_idx - len(column_marker) - 1)}{column_marker} ^'
+ else:
+ column_marker = f'{" " * target_col_idx}^ {column_marker}'
+
+ column_marker = f'{" " * line_label_width} {column_marker}'
+
+ annotated_source_lines.append(column_marker)
+ elif target_col_num is None:
+ underline_length = len(annotated_source_lines[-1]) - line_label_width - 1
+ annotated_source_lines.append(f'{" " * line_label_width} {"^" * underline_length}')
+
+ return SourceContext(
+ origin=origin,
+ annotated_source_lines=annotated_source_lines,
+ target_line=lines[-1].rstrip('\n'), # universal newline default mode on `open` ensures we'll never see anything but \n
+ )
+
+
+def format_exception_message(exception: BaseException) -> str:
+ """Return the full chain of exception messages by concatenating the cause(s) until all are exhausted."""
+ return _event_utils.format_event_brief_message(_error_factory.ControllerEventFactory.from_exception(exception, False))
+
+
+def result_dict_from_exception(exception: BaseException, accept_result_contribution: bool = False) -> dict[str, object]:
+ """Return a failed task result dict from the given exception."""
+ event = _error_factory.ControllerEventFactory.from_exception(exception, _traceback.is_traceback_enabled(_traceback.TracebackEvent.ERROR))
+
+ result: dict[str, object] = {}
+ omit_failed_key = False
+ omit_exception_key = False
+
+ if accept_result_contribution:
+ while exception:
+ if isinstance(exception, ContributesToTaskResult):
+ result = dict(exception.result_contribution)
+ omit_failed_key = exception.omit_failed_key
+ omit_exception_key = exception.omit_exception_key
+ break
+
+ exception = exception.__cause__
+
+ if omit_failed_key:
+ result.pop('failed', None)
+ else:
+ result.update(failed=True)
+
+ if omit_exception_key:
+ result.pop('exception', None)
+ else:
+ result.update(exception=_messages.ErrorSummary(event=event))
+
+ if 'msg' not in result:
+ # if nothing contributed `msg`, generate one from the exception messages
+ result.update(msg=_event_utils.format_event_brief_message(event))
+
+ return result
+
+
+def result_dict_from_captured_errors(
+ msg: str,
+ *,
+ errors: list[_messages.ErrorSummary] | None = None,
+) -> dict[str, object]:
+ """Return a failed task result dict from the given error message and captured errors."""
+ _skip_stackwalk = True
+
+ event = _messages.Event(
+ msg=msg,
+ formatted_traceback=_traceback.maybe_capture_traceback(msg, _traceback.TracebackEvent.ERROR),
+ events=tuple(error.event for error in errors) if errors else None,
+ )
+
+ result = dict(
+ failed=True,
+ exception=_messages.ErrorSummary(
+ event=event,
+ ),
+ msg=_event_utils.format_event_brief_message(event),
+ )
+
+ return result
diff --git a/lib/ansible/_internal/_errors/_handler.py b/lib/ansible/_internal/_errors/_handler.py
new file mode 100644
index 00000000000..360b0981cf1
--- /dev/null
+++ b/lib/ansible/_internal/_errors/_handler.py
@@ -0,0 +1,91 @@
+from __future__ import annotations
+
+import contextlib
+import enum
+import typing as t
+
+from ansible.utils.display import Display
+from ansible.constants import config
+
+display = Display()
+
+# FUTURE: add sanity test to detect use of skip_on_ignore without Skippable (and vice versa)
+
+
+class ErrorAction(enum.Enum):
+ """Action to take when an error is encountered."""
+
+ IGNORE = enum.auto()
+ WARNING = enum.auto()
+ ERROR = enum.auto()
+
+ @classmethod
+ def from_config(cls, setting: str, variables: dict[str, t.Any] | None = None) -> t.Self:
+ """Return an `ErrorAction` enum from the specified Ansible config setting."""
+ return cls[config.get_config_value(setting, variables=variables).upper()]
+
+
+class _SkipException(BaseException):
+ """Internal flow control exception for skipping code blocks within a `Skippable` context manager."""
+
+ def __init__(self) -> None:
+ super().__init__('Skipping ignored action due to use of `skip_on_ignore`. It is a bug to encounter this message outside of debugging.')
+
+
+class _SkippableContextManager:
+ """Internal context manager to support flow control for skipping code blocks."""
+
+ def __enter__(self) -> None:
+ pass
+
+ def __exit__(self, exc_type, _exc_val, _exc_tb) -> bool:
+ if exc_type is None:
+ raise RuntimeError('A `Skippable` context manager was entered, but a `skip_on_ignore` handler was never invoked.')
+
+ return exc_type is _SkipException # only mask a _SkipException, allow all others to raise
+
+
+Skippable = _SkippableContextManager()
+"""Context manager singleton required to enclose `ErrorHandler.handle` invocations when `skip_on_ignore` is `True`."""
+
+
+class ErrorHandler:
+ """
+ Provides a configurable error handler context manager for a specific list of exception types.
+ Unhandled errors leaving the context manager can be ignored, treated as warnings, or allowed to raise by setting `ErrorAction`.
+ """
+
+ def __init__(self, action: ErrorAction) -> None:
+ self.action = action
+
+ @contextlib.contextmanager
+ def handle(self, *args: type[BaseException], skip_on_ignore: bool = False) -> t.Iterator[None]:
+ """
+ Handle the specified exception(s) using the defined error action.
+ If `skip_on_ignore` is `True`, the body of the context manager will be skipped for `ErrorAction.IGNORE`.
+ Use of `skip_on_ignore` requires enclosure within the `Skippable` context manager.
+ """
+ if not args:
+ raise ValueError('At least one exception type is required.')
+
+ if skip_on_ignore and self.action == ErrorAction.IGNORE:
+ raise _SkipException() # skipping ignored action
+
+ try:
+ yield
+ except args as ex:
+ match self.action:
+ case ErrorAction.WARNING:
+ display.error_as_warning(msg=None, exception=ex)
+ case ErrorAction.ERROR:
+ raise
+ case _: # ErrorAction.IGNORE
+ pass
+
+ if skip_on_ignore:
+ raise _SkipException() # completed skippable action, ensures the `Skippable` context was used
+
+ @classmethod
+ def from_config(cls, setting: str, variables: dict[str, t.Any] | None = None) -> t.Self:
+ """Return an `ErrorHandler` instance configured using the specified Ansible config setting."""
+ return cls(ErrorAction.from_config(setting, variables=variables))
diff --git a/lib/ansible/_internal/_errors/_task_timeout.py b/lib/ansible/_internal/_errors/_task_timeout.py
new file mode 100644
index 00000000000..a4295d0e4d2
--- /dev/null
+++ b/lib/ansible/_internal/_errors/_task_timeout.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from collections import abc as _c
+
+from ansible._internal._errors._alarm_timeout import AnsibleTimeoutError
+from ansible._internal._errors._error_utils import ContributesToTaskResult
+from ansible.module_utils.datatag import deprecate_value
+
+
+class TaskTimeoutError(AnsibleTimeoutError, ContributesToTaskResult):
+ """
+ A task-specific timeout.
+
+ This exception provides a result dictionary via the ContributesToTaskResult mixin.
+ """
+
+ @property
+ def result_contribution(self) -> _c.Mapping[str, object]:
+ help_text = "Configure `DISPLAY_TRACEBACK` to see a traceback on timeout errors."
+
+ frame = deprecate_value(
+ value=help_text,
+ msg="The `timedout.frame` task result key is deprecated.",
+ help_text=help_text,
+ version="2.23",
+ )
+
+ return dict(timedout=dict(frame=frame, period=self.timeout))
diff --git a/lib/ansible/_internal/_event_formatting.py b/lib/ansible/_internal/_event_formatting.py
new file mode 100644
index 00000000000..b06af1b3f20
--- /dev/null
+++ b/lib/ansible/_internal/_event_formatting.py
@@ -0,0 +1,127 @@
+from __future__ import annotations as _annotations
+
+import collections.abc as _c
+import textwrap as _textwrap
+
+from ansible.module_utils._internal import _event_utils, _messages
+
+
+def format_event(event: _messages.Event, include_traceback: bool) -> str:
+ """Format an event into a verbose message and traceback."""
+ msg = format_event_verbose_message(event)
+
+ if include_traceback:
+ msg += '\n' + format_event_traceback(event)
+
+ msg = msg.strip()
+
+ if '\n' in msg:
+ msg += '\n\n'
+ else:
+ msg += '\n'
+
+ return msg
+
+
+def format_event_traceback(event: _messages.Event) -> str:
+ """Format an event into a traceback."""
+ segments: list[str] = []
+
+ while event:
+ segment = event.formatted_traceback or '(traceback missing)\n'
+
+ if event.events:
+ child_tracebacks = [format_event_traceback(child) for child in event.events]
+ segment += _format_event_children("Sub-Traceback", child_tracebacks)
+
+ segments.append(segment)
+
+ if event.chain:
+ segments.append(f'\n{event.chain.traceback_reason}\n\n')
+
+ event = event.chain.event
+ else:
+ event = None
+
+ return ''.join(reversed(segments))
+
+
+def format_event_verbose_message(event: _messages.Event) -> str:
+ """
+ Format an event into a verbose message.
+ Help text, contextual information and sub-events will be included.
+ """
+ segments: list[str] = []
+ original_event = event
+
+ while event:
+ messages = [event.msg]
+ chain: _messages.EventChain = event.chain
+
+ while chain and chain.follow:
+ if chain.event.events:
+ break # do not collapse a chained event with sub-events, since they would be lost
+
+ if chain.event.formatted_source_context or chain.event.help_text:
+ if chain.event.formatted_source_context != event.formatted_source_context or chain.event.help_text != event.help_text:
+ break # do not collapse a chained event with different details, since they would be lost
+
+ if chain.event.chain and chain.msg_reason != chain.event.chain.msg_reason:
+ break # do not collapse a chained event which has a chain with a different msg_reason
+
+ messages.append(chain.event.msg)
+
+ chain = chain.event.chain
+
+ msg = _event_utils.deduplicate_message_parts(messages)
+ segment = '\n'.join(_get_message_lines(msg, event.help_text, event.formatted_source_context)) + '\n'
+
+ if event.events:
+ child_msgs = [format_event_verbose_message(child) for child in event.events]
+ segment += _format_event_children("Sub-Event", child_msgs)
+
+ segments.append(segment)
+
+ if chain and chain.follow:
+ segments.append(f'\n{chain.msg_reason}\n\n')
+
+ event = chain.event
+ else:
+ event = None
+
+ if len(segments) > 1:
+ segments.insert(0, _event_utils.format_event_brief_message(original_event) + '\n\n')
+
+ return ''.join(segments)
+
+
+def _format_event_children(label: str, children: _c.Iterable[str]) -> str:
+ """Format the given list of child messages into a single string."""
+ items = list(children)
+ count = len(items)
+ lines = ['\n']
+
+ for idx, item in enumerate(items):
+ lines.append(f'+--[ {label} {idx + 1} of {count} ]---\n')
+ lines.append(_textwrap.indent(f"\n{item}\n", "| ", lambda value: True))
+
+ lines.append(f'+--[ End {label} ]---\n')
+
+ return ''.join(lines)
+
+
+def _get_message_lines(message: str, help_text: str | None, formatted_source_context: str | None) -> list[str]:
+ """Return a list of message lines constructed from the given message, help text and formatted source context."""
+ if help_text and not formatted_source_context and '\n' not in message and '\n' not in help_text:
+ return [f'{message} {help_text}'] # prefer a single-line message with help text when there is no source context
+
+ message_lines = [message]
+
+ if formatted_source_context:
+ message_lines.append(formatted_source_context)
+
+ if help_text:
+ message_lines.append('')
+ message_lines.append(help_text)
+
+ return message_lines
diff --git a/lib/ansible/_internal/_json/__init__.py b/lib/ansible/_internal/_json/__init__.py
new file mode 100644
index 00000000000..94b53fcc8fa
--- /dev/null
+++ b/lib/ansible/_internal/_json/__init__.py
@@ -0,0 +1,214 @@
+"""Internal utilities for serialization and deserialization."""
+
+# DTFIX-FUTURE: most of this isn't JSON specific, find a better home
+
+from __future__ import annotations
+
+import enum
+import json
+import typing as t
+
+from ansible.errors import AnsibleVariableTypeError
+
+from ansible.module_utils._internal._datatag import (
+ _ANSIBLE_ALLOWED_MAPPING_VAR_TYPES,
+ _ANSIBLE_ALLOWED_NON_SCALAR_COLLECTION_VAR_TYPES,
+ _ANSIBLE_ALLOWED_VAR_TYPES,
+ _AnsibleTaggedStr,
+ AnsibleTagHelper,
+)
+from ansible.module_utils._internal._json._profiles import _tagless
+from ansible.parsing.vault import EncryptedString
+from ansible._internal._datatag._tags import Origin, TrustedAsTemplate
+from ansible._internal._templating import _transform
+from ansible.module_utils import _internal
+from ansible.module_utils._internal import _datatag
+
+_T = t.TypeVar('_T')
+_sentinel = object()
+
+
+class HasCurrent(t.Protocol):
+ """Utility protocol for mixin type safety."""
+
+ _current: t.Any
+
+
+class StateTrackingMixIn(HasCurrent):
+ """Mixin for use with `AnsibleVariableVisitor` to track current visitation context."""
+
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+
+ self._stack: list[t.Any] = []
+
+ def __enter__(self) -> None:
+ self._stack.append(self._current)
+
+ def __exit__(self, *_args, **_kwargs) -> None:
+ self._stack.pop()
+
+ def _get_stack(self) -> list[t.Any]:
+ if not self._stack:
+ return []
+
+ return self._stack[1:] + [self._current]
+
+
+class EncryptedStringBehavior(enum.Enum):
+ """How `AnsibleVariableVisitor` will handle instances of `EncryptedString`."""
+
+ PRESERVE = enum.auto()
+ """Preserves the unmodified `EncryptedString` instance."""
+ DECRYPT = enum.auto()
+ """Replaces the value with its decrypted plaintext."""
+ REDACT = enum.auto()
+ """Replaces the value with a placeholder string."""
+ FAIL = enum.auto()
+ """Raises an `AnsibleVariableTypeError` error."""
+
+
+class AnsibleVariableVisitor:
+ """Utility visitor base class to recursively apply various behaviors and checks to variable object graphs."""
+
+ def __init__(
+ self,
+ *,
+ trusted_as_template: bool = False,
+ origin: Origin | None = None,
+ convert_mapping_to_dict: bool = False,
+ convert_sequence_to_list: bool = False,
+ convert_custom_scalars: bool = False,
+ convert_to_native_values: bool = False,
+ apply_transforms: bool = False,
+ visit_keys: bool = False,
+ encrypted_string_behavior: EncryptedStringBehavior = EncryptedStringBehavior.DECRYPT,
+ ):
+ super().__init__() # supports StateTrackingMixIn
+
+ self.trusted_as_template = trusted_as_template
+ self.origin = origin
+ self.convert_mapping_to_dict = convert_mapping_to_dict
+ self.convert_sequence_to_list = convert_sequence_to_list
+ self.convert_custom_scalars = convert_custom_scalars
+ self.convert_to_native_values = convert_to_native_values
+ self.apply_transforms = apply_transforms
+ self.visit_keys = visit_keys
+ self.encrypted_string_behavior = encrypted_string_behavior
+
+ if apply_transforms:
+ from ansible._internal._templating import _engine
+
+ self._template_engine = _engine.TemplateEngine()
+ else:
+ self._template_engine = None
+
+ self._current: t.Any = None # supports StateTrackingMixIn
+
+ def __enter__(self) -> t.Any:
+ """No-op context manager dispatcher (delegates to mixin behavior if present)."""
+ if func := getattr(super(), '__enter__', None):
+ func()
+
+ def __exit__(self, *args, **kwargs) -> t.Any:
+ """No-op context manager dispatcher (delegates to mixin behavior if present)."""
+ if func := getattr(super(), '__exit__', None):
+ func(*args, **kwargs)
+
+ def visit(self, value: _T) -> _T:
+ """
+ Enforces Ansible's variable type system restrictions before a var is accepted in inventory. Also, conditionally implements template trust
+ compatibility, depending on the plugin's declared understanding (or lack thereof). This always recursively copies inputs to fully isolate
+ inventory data from what the plugin provided, and prevent any later mutation.
+ """
+ return self._visit(None, value)
+
+ def _early_visit(self, value, value_type) -> t.Any:
+ """Overridable hook point to allow custom string handling in derived visitors."""
+ if value_type in (str, _AnsibleTaggedStr):
+ # apply compatibility behavior
+ if self.trusted_as_template:
+ result = TrustedAsTemplate().tag(value)
+ else:
+ result = value
+ else:
+ result = _sentinel
+
+ return result
+
+ def _visit_key(self, key: t.Any) -> t.Any:
+ """Internal implementation to recursively visit a key if visit_keys is enabled."""
+ if not self.visit_keys:
+ return key
+
+ return self._visit(None, key) # key=None prevents state tracking from seeing the key as value
+
+ def _visit(self, key: t.Any, value: _T) -> _T:
+ """Internal implementation to recursively visit a data structure's contents."""
+ self._current = key # supports StateTrackingMixIn
+
+ value_type: type = type(value)
+
+ # handle EncryptedString conversion before more generic transformation and native conversions
+ if value_type is EncryptedString: # pylint: disable=unidiomatic-typecheck
+ match self.encrypted_string_behavior:
+ case EncryptedStringBehavior.DECRYPT:
+ value = str(value) # type: ignore[assignment]
+ value_type = str
+ case EncryptedStringBehavior.REDACT:
+ value = "" # type: ignore[assignment]
+ value_type = str
+ case EncryptedStringBehavior.FAIL:
+ raise AnsibleVariableTypeError.from_value(obj=value)
+ elif self.apply_transforms and value_type in _transform._type_transform_mapping:
+ value = self._template_engine.transform(value)
+ value_type = type(value)
+
+ if self.convert_to_native_values and isinstance(value, _datatag.AnsibleTaggedObject):
+ value = value._native_copy()
+ value_type = type(value)
+
+ result: _T
+
+ # DTFIX-FUTURE: Visitor generally ignores dict/mapping keys by default except for debugging and schema-aware checking.
+ # It could be checking keys destined for variable storage to apply more strict rules about key shape and type.
+
+ if (result := self._early_visit(value, value_type)) is not _sentinel:
+ pass
+ # DTFIX7: de-duplicate and optimize; extract inline generator expressions and fallback function or mapping for native type calculation?
+ elif value_type in _ANSIBLE_ALLOWED_MAPPING_VAR_TYPES: # check mappings first, because they're also collections
+ with self: # supports StateTrackingMixIn
+ result = AnsibleTagHelper.tag_copy(value, ((self._visit_key(k), self._visit(k, v)) for k, v in value.items()), value_type=value_type)
+ elif value_type in _ANSIBLE_ALLOWED_NON_SCALAR_COLLECTION_VAR_TYPES:
+ with self: # supports StateTrackingMixIn
+ result = AnsibleTagHelper.tag_copy(value, (self._visit(k, v) for k, v in enumerate(t.cast(t.Iterable, value))), value_type=value_type)
+ elif self.convert_mapping_to_dict and _internal.is_intermediate_mapping(value):
+ with self: # supports StateTrackingMixIn
+ result = {self._visit_key(k): self._visit(k, v) for k, v in value.items()} # type: ignore[assignment]
+ elif self.convert_sequence_to_list and _internal.is_intermediate_iterable(value):
+ with self: # supports StateTrackingMixIn
+ result = [self._visit(k, v) for k, v in enumerate(t.cast(t.Iterable, value))] # type: ignore[assignment]
+ elif self.convert_custom_scalars and isinstance(value, str):
+ result = str(value) # type: ignore[assignment]
+ elif self.convert_custom_scalars and isinstance(value, float):
+ result = float(value) # type: ignore[assignment]
+ elif self.convert_custom_scalars and isinstance(value, int) and not isinstance(value, bool):
+ result = int(value) # type: ignore[assignment]
+ elif value_type in _ANSIBLE_ALLOWED_VAR_TYPES:
+ # supported scalar type that requires no special handling, just return as-is
+ result = value
+ elif self.encrypted_string_behavior is EncryptedStringBehavior.PRESERVE and isinstance(value, EncryptedString):
+ result = value # type: ignore[assignment]
+ else:
+ raise AnsibleVariableTypeError.from_value(obj=value)
+
+ if self.origin and not Origin.is_tagged_on(result):
+ # apply shared instance default origin tag
+ result = self.origin.tag(result)
+
+ return result
+
+
+def json_dumps_formatted(value: object) -> str:
+ """Return a JSON dump of `value` with formatting and keys sorted."""
+ return json.dumps(value, cls=_tagless.Encoder, sort_keys=True, indent=4)
diff --git a/lib/ansible/_internal/_json/_legacy_encoder.py b/lib/ansible/_internal/_json/_legacy_encoder.py
new file mode 100644
index 00000000000..431c245a1c9
--- /dev/null
+++ b/lib/ansible/_internal/_json/_legacy_encoder.py
@@ -0,0 +1,34 @@
+from __future__ import annotations as _annotations
+
+import typing as _t
+
+from ansible.module_utils._internal._json import _profiles
+from ansible._internal._json._profiles import _legacy
+from ansible.parsing import vault as _vault
+
+
+class LegacyControllerJSONEncoder(_legacy.Encoder):
+ """Compatibility wrapper over `legacy` profile JSON encoder to support trust stripping and vault value plaintext conversion."""
+
+ def __init__(self, preprocess_unsafe: bool = False, vault_to_text: bool = False, _decode_bytes: bool = False, **kwargs) -> None:
+ self._preprocess_unsafe = preprocess_unsafe
+ self._vault_to_text = vault_to_text
+ self._decode_bytes = _decode_bytes
+
+ super().__init__(**kwargs)
+
+ def default(self, o: _t.Any) -> _t.Any:
+ """Hooked default that can conditionally bypass base encoder behavior based on this instance's config."""
+ if type(o) is _profiles._WrappedValue: # pylint: disable=unidiomatic-typecheck
+ o = o.wrapped
+
+ if not self._preprocess_unsafe and type(o) is _legacy._Untrusted: # pylint: disable=unidiomatic-typecheck
+ return o.value # if not emitting unsafe markers, bypass custom unsafe serialization and just return the raw value
+
+ if self._vault_to_text and type(o) is _vault.EncryptedString: # pylint: disable=unidiomatic-typecheck
+ return str(o) # decrypt and return the plaintext (or fail trying)
+
+ if self._decode_bytes and isinstance(o, bytes):
+ return o.decode(errors='surrogateescape') # backward compatibility with `ansible.module_utils.basic.jsonify`
+
+ return super().default(o)
diff --git a/lib/ansible/_internal/_json/_profiles/__init__.py b/lib/ansible/_internal/_json/_profiles/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/_internal/_json/_profiles/_cache_persistence.py b/lib/ansible/_internal/_json/_profiles/_cache_persistence.py
new file mode 100644
index 00000000000..6b76cb56d59
--- /dev/null
+++ b/lib/ansible/_internal/_json/_profiles/_cache_persistence.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+import datetime as _datetime
+
+from ansible.module_utils._internal import _datatag
+from ansible.module_utils._internal._json import _profiles
+from ansible.parsing import vault as _vault
+from ansible._internal._datatag import _tags
+
+
+class _Profile(_profiles._JSONSerializationProfile):
+ """Profile for external cache persistence of inventory/fact data that preserves most tags."""
+
+ serialize_map = {}
+ schema_id = 1
+
+ @classmethod
+ def post_init(cls, **kwargs):
+ cls.allowed_ansible_serializable_types = (
+ _profiles._common_module_types
+ | _profiles._common_module_response_types
+ | {
+ _datatag._AnsibleTaggedDate,
+ _datatag._AnsibleTaggedTime,
+ _datatag._AnsibleTaggedDateTime,
+ _datatag._AnsibleTaggedStr,
+ _datatag._AnsibleTaggedInt,
+ _datatag._AnsibleTaggedFloat,
+ _datatag._AnsibleTaggedList,
+ _datatag._AnsibleTaggedSet,
+ _datatag._AnsibleTaggedTuple,
+ _datatag._AnsibleTaggedDict,
+ _tags.SourceWasEncrypted,
+ _tags.Origin,
+ _tags.TrustedAsTemplate,
+ _vault.EncryptedString,
+ _vault.VaultedValue,
+ }
+ )
+
+ cls.serialize_map = {
+ set: cls.serialize_as_list,
+ tuple: cls.serialize_as_list,
+ _datetime.date: _datatag.AnsibleSerializableDate,
+ _datetime.time: _datatag.AnsibleSerializableTime,
+ _datetime.datetime: _datatag.AnsibleSerializableDateTime,
+ }
+
+ cls.handle_key = cls._handle_key_str_fallback # legacy stdlib-compatible key behavior
+
+
+class Encoder(_profiles.AnsibleProfileJSONEncoder):
+ _profile = _Profile
+
+
+class Decoder(_profiles.AnsibleProfileJSONDecoder):
+ _profile = _Profile
diff --git a/lib/ansible/_internal/_json/_profiles/_inventory_legacy.py b/lib/ansible/_internal/_json/_profiles/_inventory_legacy.py
new file mode 100644
index 00000000000..97941957fdf
--- /dev/null
+++ b/lib/ansible/_internal/_json/_profiles/_inventory_legacy.py
@@ -0,0 +1,40 @@
+"""
+Backwards compatibility profile for serialization for persisted ansible-inventory output.
+Behavior is equivalent to pre 2.18 `AnsibleJSONEncoder` with vault_to_text=True.
+"""
+
+from __future__ import annotations
+
+from ... import _json
+from . import _legacy
+
+
+class _InventoryVariableVisitor(_legacy._LegacyVariableVisitor, _json.StateTrackingMixIn):
+ """State-tracking visitor implementation that only applies trust to `_meta.hostvars` and `vars` inventory values."""
+
+ # DTFIX5: does the variable visitor need to support conversion of sequence/mapping for inventory?
+
+ @property
+ def _allow_trust(self) -> bool:
+ stack = self._get_stack()
+
+ if len(stack) >= 4 and stack[:2] == ['_meta', 'hostvars']:
+ return True
+
+ if len(stack) >= 3 and stack[1] == 'vars':
+ return True
+
+ return False
+
+
+class _Profile(_legacy._Profile):
+ visitor_type = _InventoryVariableVisitor
+ encode_strings_as_utf8 = True
+
+
+class Encoder(_legacy.Encoder):
+ _profile = _Profile
+
+
+class Decoder(_legacy.Decoder):
+ _profile = _Profile
diff --git a/lib/ansible/_internal/_json/_profiles/_legacy.py b/lib/ansible/_internal/_json/_profiles/_legacy.py
new file mode 100644
index 00000000000..4c2f36a2c16
--- /dev/null
+++ b/lib/ansible/_internal/_json/_profiles/_legacy.py
@@ -0,0 +1,189 @@
+"""
+Backwards compatibility profile for serialization other than inventory (which should use inventory_legacy for backward-compatible trust behavior).
+Behavior is equivalent to pre 2.19 `AnsibleJSONEncoder` with vault_to_text=True.
+"""
+
+from __future__ import annotations as _annotations
+
+import datetime as _datetime
+import typing as _t
+
+from ansible._internal import _json
+from ansible._internal._datatag import _tags
+from ansible.module_utils._internal import _datatag
+from ansible.module_utils._internal._json import _profiles
+from ansible.parsing import vault as _vault
+
+
+class _Untrusted:
+ """
+ Temporarily wraps strings which are not trusted for templating.
+ Used before serialization of strings not tagged TrustedAsTemplate when trust inversion is enabled and trust is allowed in the string's context.
+ Used during deserialization of `__ansible_unsafe` strings to indicate they should not be tagged TrustedAsTemplate.
+ """
+
+ __slots__ = ('value',)
+
+ def __init__(self, value: str) -> None:
+ self.value = value
+
+
+class _LegacyVariableVisitor(_json.AnsibleVariableVisitor):
+ """Variable visitor that supports optional trust inversion for legacy serialization."""
+
+ def __init__(
+ self,
+ *,
+ trusted_as_template: bool = False,
+ invert_trust: bool = False,
+ origin: _tags.Origin | None = None,
+ convert_mapping_to_dict: bool = False,
+ convert_sequence_to_list: bool = False,
+ convert_custom_scalars: bool = False,
+ ):
+ super().__init__(
+ trusted_as_template=trusted_as_template,
+ origin=origin,
+ convert_mapping_to_dict=convert_mapping_to_dict,
+ convert_sequence_to_list=convert_sequence_to_list,
+ convert_custom_scalars=convert_custom_scalars,
+ encrypted_string_behavior=_json.EncryptedStringBehavior.PRESERVE,
+ )
+
+ self.invert_trust = invert_trust
+
+ if trusted_as_template and invert_trust:
+ raise ValueError('trusted_as_template is mutually exclusive with invert_trust')
+
+ @property
+ def _allow_trust(self) -> bool:
+ """
+ This profile supports trust application in all contexts.
+ Derived implementations can override this behavior for application-dependent/schema-aware trust.
+ """
+ return True
+
+ def _early_visit(self, value, value_type) -> _t.Any:
+ """Similar to base implementation, but supports an intermediate wrapper for trust inversion."""
+ if value_type in (str, _datatag._AnsibleTaggedStr):
+ # apply compatibility behavior
+ if self.trusted_as_template and self._allow_trust:
+ result = _tags.TrustedAsTemplate().tag(value)
+ elif self.invert_trust and not _tags.TrustedAsTemplate.is_tagged_on(value) and self._allow_trust:
+ result = _Untrusted(value)
+ else:
+ result = value
+ elif value_type is _Untrusted:
+ result = value.value
+ else:
+ result = _json._sentinel
+
+ return result
+
+
+class _Profile(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
+ visitor_type = _LegacyVariableVisitor
+
+ @classmethod
+ def serialize_untrusted(cls, value: _Untrusted) -> dict[str, str] | str:
+ return dict(
+ __ansible_unsafe=_datatag.AnsibleTagHelper.untag(value.value),
+ )
+
+ @classmethod
+ def serialize_tagged_str(cls, value: _datatag.AnsibleTaggedObject) -> _t.Any:
+ if ciphertext := _vault.VaultHelper.get_ciphertext(value, with_tags=False):
+ return dict(
+ __ansible_vault=ciphertext,
+ )
+
+ return _datatag.AnsibleTagHelper.untag(value)
+
+ @classmethod
+ def deserialize_unsafe(cls, value: dict[str, _t.Any]) -> _Untrusted:
+ ansible_unsafe = value['__ansible_unsafe']
+
+ if type(ansible_unsafe) is not str: # pylint: disable=unidiomatic-typecheck
+ raise TypeError(f"__ansible_unsafe is {type(ansible_unsafe)} not {str}")
+
+ return _Untrusted(ansible_unsafe)
+
+ @classmethod
+ def deserialize_vault(cls, value: dict[str, _t.Any]) -> _vault.EncryptedString:
+ ansible_vault = value['__ansible_vault']
+
+ if type(ansible_vault) is not str: # pylint: disable=unidiomatic-typecheck
+ raise TypeError(f"__ansible_vault is {type(ansible_vault)} not {str}")
+
+ encrypted_string = _vault.EncryptedString(ciphertext=ansible_vault)
+
+ return encrypted_string
+
+ @classmethod
+ def serialize_encrypted_string(cls, value: _vault.EncryptedString) -> dict[str, str]:
+ return dict(
+ __ansible_vault=_vault.VaultHelper.get_ciphertext(value, with_tags=False),
+ )
+
+ @classmethod
+ def post_init(cls) -> None:
+ cls.serialize_map = {
+ set: cls.serialize_as_list,
+ tuple: cls.serialize_as_list,
+ _datetime.date: cls.serialize_as_isoformat, # existing devel behavior
+ _datetime.time: cls.serialize_as_isoformat, # always failed pre-2.18, so okay to include for consistency
+ _datetime.datetime: cls.serialize_as_isoformat, # existing devel behavior
+ _datatag._AnsibleTaggedDate: cls.discard_tags,
+ _datatag._AnsibleTaggedTime: cls.discard_tags,
+ _datatag._AnsibleTaggedDateTime: cls.discard_tags,
+ _vault.EncryptedString: cls.serialize_encrypted_string,
+ _datatag._AnsibleTaggedStr: cls.serialize_tagged_str, # for VaultedValue tagged str
+ _datatag._AnsibleTaggedInt: cls.discard_tags,
+ _datatag._AnsibleTaggedFloat: cls.discard_tags,
+ _datatag._AnsibleTaggedList: cls.discard_tags,
+ _datatag._AnsibleTaggedSet: cls.discard_tags,
+ _datatag._AnsibleTaggedTuple: cls.discard_tags,
+ _datatag._AnsibleTaggedDict: cls.discard_tags,
+ _Untrusted: cls.serialize_untrusted, # equivalent to AnsibleJSONEncoder(preprocess_unsafe=True) in devel
+ }
+
+ cls.deserialize_map = {
+ '__ansible_unsafe': cls.deserialize_unsafe,
+ '__ansible_vault': cls.deserialize_vault,
+ }
+
+ cls.handle_key = cls._handle_key_str_fallback # type: ignore[method-assign] # legacy stdlib-compatible key behavior
+
+ @classmethod
+ def pre_serialize(cls, encoder: Encoder, o: _t.Any) -> _t.Any:
+ # DTFIX7: these conversion args probably aren't needed
+ avv = cls.visitor_type(invert_trust=True, convert_mapping_to_dict=True, convert_sequence_to_list=True, convert_custom_scalars=True)
+
+ return avv.visit(o)
+
+ @classmethod
+ def post_deserialize(cls, decoder: Decoder, o: _t.Any) -> _t.Any:
+ avv = cls.visitor_type(trusted_as_template=decoder._trusted_as_template, origin=decoder._origin)
+
+ return avv.visit(o)
+
+
+class Encoder(_profiles.AnsibleProfileJSONEncoder):
+ _profile = _Profile
+
+
+class Decoder(_profiles.AnsibleProfileJSONDecoder):
+ _profile = _Profile
+
+ def __init__(self, **kwargs) -> None:
+ super().__init__(**kwargs)
+
+ # NB: these can only be sampled properly when loading strings, eg, `json.loads`; the global `json.load` function does not expose the file-like to us
+ self._origin: _tags.Origin | None = None
+ self._trusted_as_template: bool = False
+
+ def raw_decode(self, s: str, idx: int = 0) -> tuple[_t.Any, int]:
+ self._origin = _tags.Origin.get_tag(s)
+ self._trusted_as_template = _tags.TrustedAsTemplate.is_tagged_on(s)
+
+ return super().raw_decode(s, idx)
diff --git a/lib/ansible/_internal/_locking.py b/lib/ansible/_internal/_locking.py
new file mode 100644
index 00000000000..1b04fa37c82
--- /dev/null
+++ b/lib/ansible/_internal/_locking.py
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+import contextlib
+import fcntl
+import typing as t
+
+
+@contextlib.contextmanager
+def named_mutex(path: str) -> t.Iterator[None]:
+ """
+ Lightweight context manager wrapper over `fcntl.flock` to provide IPC locking via a shared filename.
+ Entering the context manager blocks until the lock is acquired.
+ The lock file will be created automatically, but creation of the parent directory and deletion of the lockfile are the caller's responsibility.
+ """
+ with open(path, 'a') as file:
+ fcntl.flock(file, fcntl.LOCK_EX)
+
+ try:
+ yield
+ finally:
+ fcntl.flock(file, fcntl.LOCK_UN)
diff --git a/lib/ansible/_internal/_plugins/__init__.py b/lib/ansible/_internal/_plugins/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/_internal/_plugins/_cache.py b/lib/ansible/_internal/_plugins/_cache.py
new file mode 100644
index 00000000000..463b0a8ed66
--- /dev/null
+++ b/lib/ansible/_internal/_plugins/_cache.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+import functools
+import json
+import json.encoder
+import json.decoder
+import typing as t
+
+from .._wrapt import ObjectProxy
+from .._json._profiles import _cache_persistence
+
+
+class PluginInterposer(ObjectProxy):
+ """Proxies a Cache plugin instance to implement transparent encapsulation of serialized Ansible internal data types."""
+
+ _PAYLOAD_KEY = '__payload__'
+ """The key used to store the serialized payload."""
+
+ def get(self, key: str) -> dict[str, object]:
+ return self._decode(self.__wrapped__.get(self._get_key(key)))
+
+ def set(self, key: str, value: dict[str, object]) -> None:
+ self.__wrapped__.set(self._get_key(key), self._encode(value))
+
+ def keys(self) -> t.Sequence[str]:
+ return [k for k in (self._restore_key(k) for k in self.__wrapped__.keys()) if k is not None]
+
+ def contains(self, key: t.Any) -> bool:
+ return self.__wrapped__.contains(self._get_key(key))
+
+ def delete(self, key: str) -> None:
+ self.__wrapped__.delete(self._get_key(key))
+
+ @classmethod
+ def _restore_key(cls, wrapped_key: str) -> str | None:
+ prefix = cls._get_wrapped_key_prefix()
+
+ if not wrapped_key.startswith(prefix):
+ return None
+
+ return wrapped_key[len(prefix) :]
+
+ @classmethod
+ @functools.cache
+ def _get_wrapped_key_prefix(cls) -> str:
+ return f's{_cache_persistence._Profile.schema_id}_'
+
+ @classmethod
+ def _get_key(cls, key: str) -> str:
+ """Augment the supplied key with a schema identifier to allow for side-by-side caching across incompatible schemas."""
+ return f'{cls._get_wrapped_key_prefix()}{key}'
+
+ def _encode(self, value: dict[str, object]) -> dict[str, object]:
+ return {self._PAYLOAD_KEY: json.dumps(value, cls=_cache_persistence.Encoder)}
+
+ def _decode(self, value: dict[str, t.Any]) -> dict[str, object]:
+ return json.loads(value[self._PAYLOAD_KEY], cls=_cache_persistence.Decoder)
diff --git a/lib/ansible/_internal/_ssh/__init__.py b/lib/ansible/_internal/_ssh/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/_internal/_ssh/_agent_launch.py b/lib/ansible/_internal/_ssh/_agent_launch.py
new file mode 100644
index 00000000000..3c2ddf59437
--- /dev/null
+++ b/lib/ansible/_internal/_ssh/_agent_launch.py
@@ -0,0 +1,91 @@
+from __future__ import annotations
+
+import atexit
+import os
+import subprocess
+
+from ansible import constants as C
+from ansible._internal._errors import _alarm_timeout
+from ansible._internal._ssh._ssh_agent import SshAgentClient
+from ansible.cli import display
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.process import get_bin_path
+
+_SSH_AGENT_STDOUT_READ_TIMEOUT = 5 # seconds
+
+
+def launch_ssh_agent() -> None:
+ """If configured via `SSH_AGENT`, launch an ssh-agent for Ansible's use and/or verify access to an existing one."""
+ try:
+ _launch_ssh_agent()
+ except Exception as ex:
+ raise AnsibleError("Failed to launch ssh agent.") from ex
+
+
+def _launch_ssh_agent() -> None:
+ ssh_agent_cfg = C.config.get_config_value('SSH_AGENT')
+
+ match ssh_agent_cfg:
+ case 'none':
+ display.debug('SSH_AGENT set to none')
+ return
+ case 'auto':
+ try:
+ ssh_agent_bin = get_bin_path(C.config.get_config_value('SSH_AGENT_EXECUTABLE'))
+ except ValueError as e:
+ raise AnsibleError('SSH_AGENT set to auto, but cannot find ssh-agent binary.') from e
+
+ ssh_agent_dir = os.path.join(C.DEFAULT_LOCAL_TMP, 'ssh_agent')
+ os.mkdir(ssh_agent_dir, 0o700)
+ sock = os.path.join(ssh_agent_dir, 'agent.sock')
+ display.vvv('SSH_AGENT: starting...')
+
+ try:
+ p = subprocess.Popen(
+ [ssh_agent_bin, '-D', '-s', '-a', sock],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ )
+ except OSError as e:
+ raise AnsibleError('Could not start ssh-agent.') from e
+
+ atexit.register(p.terminate)
+
+ help_text = f'The ssh-agent {ssh_agent_bin!r} might be an incompatible agent.'
+ expected_stdout = 'SSH_AUTH_SOCK'
+
+ try:
+ with _alarm_timeout.AnsibleTimeoutError.alarm_timeout(_SSH_AGENT_STDOUT_READ_TIMEOUT):
+ stdout = p.stdout.read(len(expected_stdout))
+ except _alarm_timeout.AnsibleTimeoutError as e:
+ display.error_as_warning(
+ msg=f'Timed out waiting for expected stdout {expected_stdout!r} from ssh-agent.',
+ exception=e,
+ help_text=help_text,
+ )
+ else:
+ if stdout != expected_stdout:
+ display.warning(
+ msg=f'The ssh-agent output {stdout!r} did not match expected {expected_stdout!r}.',
+ help_text=help_text,
+ )
+
+ if p.poll() is not None:
+ raise AnsibleError(
+ message='The ssh-agent terminated prematurely.',
+ help_text=f'{help_text}\n\nReturn Code: {p.returncode}\nStandard Error:\n{p.stderr.read()}',
+ )
+
+ display.vvv(f'SSH_AGENT: ssh-agent[{p.pid}] started and bound to {sock}')
+ case _:
+ sock = ssh_agent_cfg
+
+ try:
+ with SshAgentClient(sock) as client:
+ client.list()
+ except Exception as e:
+ raise AnsibleError(f'Could not communicate with ssh-agent using auth sock {sock!r}.') from e
+
+ os.environ['SSH_AUTH_SOCK'] = os.environ['ANSIBLE_SSH_AGENT'] = sock
diff --git a/lib/ansible/_internal/_ssh/_ssh_agent.py b/lib/ansible/_internal/_ssh/_ssh_agent.py
new file mode 100644
index 00000000000..345284d1b69
--- /dev/null
+++ b/lib/ansible/_internal/_ssh/_ssh_agent.py
@@ -0,0 +1,619 @@
+# Copyright: Contributors to the Ansible project
+# BSD 3 Clause License (see licenses/BSD-3-Clause.txt or https://opensource.org/license/bsd-3-clause/)
+
+from __future__ import annotations
+
+import binascii
+import copy
+import dataclasses
+import enum
+import functools
+import hashlib
+import socket
+import types
+import typing as t
+
+try:
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.hazmat.primitives.asymmetric.dsa import (
+ DSAParameterNumbers,
+ DSAPrivateKey,
+ DSAPublicKey,
+ DSAPublicNumbers,
+ )
+ from cryptography.hazmat.primitives.asymmetric.ec import (
+ EllipticCurve,
+ EllipticCurvePrivateKey,
+ EllipticCurvePublicKey,
+ SECP256R1,
+ SECP384R1,
+ SECP521R1,
+ )
+ from cryptography.hazmat.primitives.asymmetric.ed25519 import (
+ Ed25519PrivateKey,
+ Ed25519PublicKey,
+ )
+ from cryptography.hazmat.primitives.asymmetric.rsa import (
+ RSAPrivateKey,
+ RSAPublicKey,
+ RSAPublicNumbers,
+ )
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+else:
+ HAS_CRYPTOGRAPHY = True
+
+ CryptoPublicKey = t.Union[
+ DSAPublicKey,
+ EllipticCurvePublicKey,
+ Ed25519PublicKey,
+ RSAPublicKey,
+ ]
+
+ CryptoPrivateKey = t.Union[
+ DSAPrivateKey,
+ EllipticCurvePrivateKey,
+ Ed25519PrivateKey,
+ RSAPrivateKey,
+ ]
+
+
+if t.TYPE_CHECKING:
+ from cryptography.hazmat.primitives.asymmetric.dsa import DSAPrivateNumbers
+ from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateNumbers
+ from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateNumbers
+
+
+_SSH_AGENT_CLIENT_SOCKET_TIMEOUT = 10
+
+
+class ProtocolMsgNumbers(enum.IntEnum):
+ # Responses
+ SSH_AGENT_FAILURE = 5
+ SSH_AGENT_SUCCESS = 6
+ SSH_AGENT_IDENTITIES_ANSWER = 12
+ SSH_AGENT_SIGN_RESPONSE = 14
+ SSH_AGENT_EXTENSION_FAILURE = 28
+ SSH_AGENT_EXTENSION_RESPONSE = 29
+
+ # Constraints
+ SSH_AGENT_CONSTRAIN_LIFETIME = 1
+ SSH_AGENT_CONSTRAIN_CONFIRM = 2
+ SSH_AGENT_CONSTRAIN_EXTENSION = 255
+
+ # Requests
+ SSH_AGENTC_REQUEST_IDENTITIES = 11
+ SSH_AGENTC_SIGN_REQUEST = 13
+ SSH_AGENTC_ADD_IDENTITY = 17
+ SSH_AGENTC_REMOVE_IDENTITY = 18
+ SSH_AGENTC_REMOVE_ALL_IDENTITIES = 19
+ SSH_AGENTC_ADD_SMARTCARD_KEY = 20
+ SSH_AGENTC_REMOVE_SMARTCARD_KEY = 21
+ SSH_AGENTC_LOCK = 22
+ SSH_AGENTC_UNLOCK = 23
+ SSH_AGENTC_ADD_ID_CONSTRAINED = 25
+ SSH_AGENTC_ADD_SMARTCARD_KEY_CONSTRAINED = 26
+ SSH_AGENTC_EXTENSION = 27
+
+ def to_blob(self) -> bytes:
+ return bytes([self])
+
+
+class SshAgentFailure(RuntimeError):
+ """Server failure or unexpected response."""
+
+
+# NOTE: Classes below somewhat represent "Data Type Representations Used in the SSH Protocols"
+# as specified by RFC4251
+
+
+@t.runtime_checkable
+class SupportsToBlob(t.Protocol):
+ def to_blob(self) -> bytes: ...
+
+
+@t.runtime_checkable
+class SupportsFromBlob(t.Protocol):
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self: ...
+
+ @classmethod
+ def consume_from_blob(cls, blob: memoryview | bytes) -> tuple[t.Self, memoryview | bytes]: ...
+
+
+def _split_blob(blob: memoryview | bytes, length: int) -> tuple[memoryview | bytes, memoryview | bytes]:
+ if len(blob) < length:
+ raise ValueError("_split_blob: unexpected data length")
+ return blob[:length], blob[length:]
+
+
+class VariableSized:
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self:
+ raise NotImplementedError
+
+ @classmethod
+ def consume_from_blob(cls, blob: memoryview | bytes) -> tuple[t.Self, memoryview | bytes]:
+ length = uint32.from_blob(blob[:4])
+ blob = blob[4:]
+ data, rest = _split_blob(blob, length)
+ return cls.from_blob(data), rest
+
+
+class uint32(int):
+ def to_blob(self) -> bytes:
+ return self.to_bytes(length=4, byteorder='big')
+
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self:
+ return cls.from_bytes(blob, byteorder='big')
+
+ @classmethod
+ def consume_from_blob(cls, blob: memoryview | bytes) -> tuple[t.Self, memoryview | bytes]:
+ length = uint32(4)
+ data, rest = _split_blob(blob, length)
+ return cls.from_blob(data), rest
+
+
+class mpint(int, VariableSized):
+ def to_blob(self) -> bytes:
+ if self < 0:
+ raise ValueError("negative mpint not allowed")
+ if not self:
+ return b""
+ nbytes = (self.bit_length() + 8) // 8
+ ret = bytearray(self.to_bytes(length=nbytes, byteorder='big'))
+ ret[:0] = uint32(len(ret)).to_blob()
+ return ret
+
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self:
+ if blob and blob[0] > 127:
+ raise ValueError("Invalid data")
+ return cls.from_bytes(blob, byteorder='big')
+
+
+class constraints(bytes):
+ def to_blob(self) -> bytes:
+ return self
+
+
+class binary_string(bytes, VariableSized):
+ def to_blob(self) -> bytes:
+ if length := len(self):
+ return uint32(length).to_blob() + self
+ else:
+ return b""
+
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self:
+ return cls(blob)
+
+
+class unicode_string(str, VariableSized):
+ def to_blob(self) -> bytes:
+ val = self.encode('utf-8')
+ if length := len(val):
+ return uint32(length).to_blob() + val
+ else:
+ return b""
+
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self:
+ return cls(bytes(blob).decode('utf-8'))
+
+
+class KeyAlgo(str, VariableSized, enum.Enum):
+ RSA = "ssh-rsa"
+ DSA = "ssh-dss"
+ ECDSA256 = "ecdsa-sha2-nistp256"
+ SKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com"
+ ECDSA384 = "ecdsa-sha2-nistp384"
+ ECDSA521 = "ecdsa-sha2-nistp521"
+ ED25519 = "ssh-ed25519"
+ SKED25519 = "sk-ssh-ed25519@openssh.com"
+ RSASHA256 = "rsa-sha2-256"
+ RSASHA512 = "rsa-sha2-512"
+
+ @property
+ def main_type(self) -> str:
+ match self:
+ case self.RSA:
+ return 'RSA'
+ case self.DSA:
+ return 'DSA'
+ case self.ECDSA256 | self.ECDSA384 | self.ECDSA521:
+ return 'ECDSA'
+ case self.ED25519:
+ return 'ED25519'
+ case _:
+ raise NotImplementedError(self.name)
+
+ def to_blob(self) -> bytes:
+ b_self = self.encode('utf-8')
+ return uint32(len(b_self)).to_blob() + b_self
+
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self:
+ return cls(bytes(blob).decode('utf-8'))
+
+
+if HAS_CRYPTOGRAPHY:
+ _ECDSA_KEY_TYPE: dict[KeyAlgo, type[EllipticCurve]] = {
+ KeyAlgo.ECDSA256: SECP256R1,
+ KeyAlgo.ECDSA384: SECP384R1,
+ KeyAlgo.ECDSA521: SECP521R1,
+ }
+
+
+@dataclasses.dataclass
+class Msg:
+ def to_blob(self) -> bytes:
+ rv = bytearray()
+ for field in dataclasses.fields(self):
+ fv = getattr(self, field.name)
+ if isinstance(fv, SupportsToBlob):
+ rv.extend(fv.to_blob())
+ else:
+ raise NotImplementedError(field.type)
+ return rv
+
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self:
+ args: list[t.Any] = []
+ for _field_name, field_type in t.get_type_hints(cls).items():
+ if isinstance(field_type, SupportsFromBlob):
+ fv, blob = field_type.consume_from_blob(blob)
+ args.append(fv)
+ else:
+ raise NotImplementedError(str(field_type))
+ return cls(*args)
+
+
+@dataclasses.dataclass
+class PrivateKeyMsg(Msg):
+ @staticmethod
+ def from_private_key(private_key: CryptoPrivateKey) -> PrivateKeyMsg:
+ match private_key:
+ case RSAPrivateKey():
+ rsa_pn: RSAPrivateNumbers = private_key.private_numbers()
+ return RSAPrivateKeyMsg(
+ KeyAlgo.RSA,
+ mpint(rsa_pn.public_numbers.n),
+ mpint(rsa_pn.public_numbers.e),
+ mpint(rsa_pn.d),
+ mpint(rsa_pn.iqmp),
+ mpint(rsa_pn.p),
+ mpint(rsa_pn.q),
+ )
+ case DSAPrivateKey():
+ dsa_pn: DSAPrivateNumbers = private_key.private_numbers()
+ return DSAPrivateKeyMsg(
+ KeyAlgo.DSA,
+ mpint(dsa_pn.public_numbers.parameter_numbers.p),
+ mpint(dsa_pn.public_numbers.parameter_numbers.q),
+ mpint(dsa_pn.public_numbers.parameter_numbers.g),
+ mpint(dsa_pn.public_numbers.y),
+ mpint(dsa_pn.x),
+ )
+ case EllipticCurvePrivateKey():
+ ecdsa_pn: EllipticCurvePrivateNumbers = private_key.private_numbers()
+ key_size = private_key.key_size
+ return EcdsaPrivateKeyMsg(
+ getattr(KeyAlgo, f'ECDSA{key_size}'),
+ unicode_string(f'nistp{key_size}'),
+ binary_string(
+ private_key.public_key().public_bytes(
+ encoding=serialization.Encoding.X962,
+ format=serialization.PublicFormat.UncompressedPoint,
+ )
+ ),
+ mpint(ecdsa_pn.private_value),
+ )
+ case Ed25519PrivateKey():
+ public_bytes = private_key.public_key().public_bytes(
+ encoding=serialization.Encoding.Raw,
+ format=serialization.PublicFormat.Raw,
+ )
+ private_bytes = private_key.private_bytes(
+ encoding=serialization.Encoding.Raw,
+ format=serialization.PrivateFormat.Raw,
+ encryption_algorithm=serialization.NoEncryption(),
+ )
+ return Ed25519PrivateKeyMsg(
+ KeyAlgo.ED25519,
+ binary_string(public_bytes),
+ binary_string(private_bytes + public_bytes),
+ )
+ case _:
+ raise NotImplementedError(private_key)
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class RSAPrivateKeyMsg(PrivateKeyMsg):
+ type: KeyAlgo
+ n: mpint
+ e: mpint
+ d: mpint
+ iqmp: mpint
+ p: mpint
+ q: mpint
+ comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
+ constraints: constraints = dataclasses.field(default=constraints(b''))
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class DSAPrivateKeyMsg(PrivateKeyMsg):
+ type: KeyAlgo
+ p: mpint
+ q: mpint
+ g: mpint
+ y: mpint
+ x: mpint
+ comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
+ constraints: constraints = dataclasses.field(default=constraints(b''))
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class EcdsaPrivateKeyMsg(PrivateKeyMsg):
+ type: KeyAlgo
+ ecdsa_curve_name: unicode_string
+ Q: binary_string
+ d: mpint
+ comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
+ constraints: constraints = dataclasses.field(default=constraints(b''))
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class Ed25519PrivateKeyMsg(PrivateKeyMsg):
+ type: KeyAlgo
+ enc_a: binary_string
+ k_env_a: binary_string
+ comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
+ constraints: constraints = dataclasses.field(default=constraints(b''))
+
+
+@dataclasses.dataclass
+class PublicKeyMsg(Msg):
+ @staticmethod
+ def get_dataclass(type: KeyAlgo) -> type[
+ t.Union[
+ RSAPublicKeyMsg,
+ EcdsaPublicKeyMsg,
+ Ed25519PublicKeyMsg,
+ DSAPublicKeyMsg,
+ ]
+ ]:
+ match type:
+ case KeyAlgo.RSA:
+ return RSAPublicKeyMsg
+ case KeyAlgo.ECDSA256 | KeyAlgo.ECDSA384 | KeyAlgo.ECDSA521:
+ return EcdsaPublicKeyMsg
+ case KeyAlgo.ED25519:
+ return Ed25519PublicKeyMsg
+ case KeyAlgo.DSA:
+ return DSAPublicKeyMsg
+ case _:
+ raise NotImplementedError(type)
+
+ @functools.cached_property
+ def public_key(self) -> CryptoPublicKey:
+ type: KeyAlgo = self.type
+ match type:
+ case KeyAlgo.RSA:
+ return RSAPublicNumbers(self.e, self.n).public_key()
+ case KeyAlgo.ECDSA256 | KeyAlgo.ECDSA384 | KeyAlgo.ECDSA521:
+ curve = _ECDSA_KEY_TYPE[KeyAlgo(type)]
+ return EllipticCurvePublicKey.from_encoded_point(curve(), self.Q)
+ case KeyAlgo.ED25519:
+ return Ed25519PublicKey.from_public_bytes(self.enc_a)
+ case KeyAlgo.DSA:
+ return DSAPublicNumbers(self.y, DSAParameterNumbers(self.p, self.q, self.g)).public_key()
+ case _:
+ raise NotImplementedError(type)
+
+ @staticmethod
+ def from_public_key(public_key: CryptoPublicKey) -> PublicKeyMsg:
+ match public_key:
+ case DSAPublicKey():
+ dsa_pn: DSAPublicNumbers = public_key.public_numbers()
+ return DSAPublicKeyMsg(
+ KeyAlgo.DSA,
+ mpint(dsa_pn.parameter_numbers.p),
+ mpint(dsa_pn.parameter_numbers.q),
+ mpint(dsa_pn.parameter_numbers.g),
+ mpint(dsa_pn.y),
+ )
+ case EllipticCurvePublicKey():
+ return EcdsaPublicKeyMsg(
+ getattr(KeyAlgo, f'ECDSA{public_key.curve.key_size}'),
+ unicode_string(f'nistp{public_key.curve.key_size}'),
+ binary_string(
+ public_key.public_bytes(
+ encoding=serialization.Encoding.X962,
+ format=serialization.PublicFormat.UncompressedPoint,
+ )
+ ),
+ )
+ case Ed25519PublicKey():
+ return Ed25519PublicKeyMsg(
+ KeyAlgo.ED25519,
+ binary_string(
+ public_key.public_bytes(
+ encoding=serialization.Encoding.Raw,
+ format=serialization.PublicFormat.Raw,
+ )
+ ),
+ )
+ case RSAPublicKey():
+ rsa_pn: RSAPublicNumbers = public_key.public_numbers()
+ return RSAPublicKeyMsg(KeyAlgo.RSA, mpint(rsa_pn.e), mpint(rsa_pn.n))
+ case _:
+ raise NotImplementedError(public_key)
+
+ @functools.cached_property
+ def fingerprint(self) -> str:
+ digest = hashlib.sha256()
+ msg = copy.copy(self)
+ msg.comments = unicode_string('')
+ k = msg.to_blob()
+ digest.update(k)
+ return binascii.b2a_base64(digest.digest(), newline=False).rstrip(b'=').decode('utf-8')
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class RSAPublicKeyMsg(PublicKeyMsg):
+ type: KeyAlgo
+ e: mpint
+ n: mpint
+ comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class DSAPublicKeyMsg(PublicKeyMsg):
+ type: KeyAlgo
+ p: mpint
+ q: mpint
+ g: mpint
+ y: mpint
+ comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class EcdsaPublicKeyMsg(PublicKeyMsg):
+ type: KeyAlgo
+ ecdsa_curve_name: unicode_string
+ Q: binary_string
+ comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class Ed25519PublicKeyMsg(PublicKeyMsg):
+ type: KeyAlgo
+ enc_a: binary_string
+ comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class KeyList(Msg):
+ nkeys: uint32
+ keys: PublicKeyMsgList
+
+ def __post_init__(self) -> None:
+ if self.nkeys != len(self.keys):
+ raise SshAgentFailure("agent: invalid number of keys received for identities list")
+
+
+@dataclasses.dataclass(order=True, slots=True)
+class PublicKeyMsgList(Msg):
+ keys: list[PublicKeyMsg]
+
+ def __iter__(self) -> t.Iterator[PublicKeyMsg]:
+ yield from self.keys
+
+ def __len__(self) -> int:
+ return len(self.keys)
+
+ @classmethod
+ def from_blob(cls, blob: memoryview | bytes) -> t.Self: ...
+
+ @classmethod
+ def consume_from_blob(cls, blob: memoryview | bytes) -> tuple[t.Self, memoryview | bytes]:
+ args: list[PublicKeyMsg] = []
+ while blob:
+ prev_blob = blob
+ key_blob, key_blob_length, comment_blob = cls._consume_field(blob)
+
+ peek_key_algo, _length, _blob = cls._consume_field(key_blob)
+ pub_key_msg_cls = PublicKeyMsg.get_dataclass(KeyAlgo(bytes(peek_key_algo).decode('utf-8')))
+
+ _fv, comment_blob_length, blob = cls._consume_field(comment_blob)
+ key_plus_comment = prev_blob[4 : (4 + key_blob_length) + (4 + comment_blob_length)]
+
+ args.append(pub_key_msg_cls.from_blob(key_plus_comment))
+ return cls(args), b""
+
+ @staticmethod
+ def _consume_field(blob: memoryview | bytes) -> tuple[memoryview | bytes, uint32, memoryview | bytes]:
+ length = uint32.from_blob(blob[:4])
+ blob = blob[4:]
+ data, rest = _split_blob(blob, length)
+ return data, length, rest
+
+
+class SshAgentClient:
+ def __init__(self, auth_sock: str) -> None:
+ self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self._sock.settimeout(_SSH_AGENT_CLIENT_SOCKET_TIMEOUT)
+ self._sock.connect(auth_sock)
+
+ def close(self) -> None:
+ self._sock.close()
+
+ def __enter__(self) -> t.Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: types.TracebackType | None,
+ ) -> None:
+ self.close()
+
+ def send(self, msg: bytes) -> bytes:
+ length = uint32(len(msg)).to_blob()
+ self._sock.sendall(length + msg)
+ bufsize = uint32.from_blob(self._sock.recv(4))
+ resp = self._sock.recv(bufsize)
+ if resp[0] == ProtocolMsgNumbers.SSH_AGENT_FAILURE:
+ raise SshAgentFailure('agent: failure')
+ return resp
+
+ def remove_all(self) -> None:
+ self.send(ProtocolMsgNumbers.SSH_AGENTC_REMOVE_ALL_IDENTITIES.to_blob())
+
+ def remove(self, public_key: CryptoPublicKey) -> None:
+ key_blob = PublicKeyMsg.from_public_key(public_key).to_blob()
+ self.send(ProtocolMsgNumbers.SSH_AGENTC_REMOVE_IDENTITY.to_blob() + uint32(len(key_blob)).to_blob() + key_blob)
+
+ def add(
+ self,
+ private_key: CryptoPrivateKey,
+ comments: str | None = None,
+ lifetime: int | None = None,
+ confirm: bool | None = None,
+ ) -> None:
+ key_msg = PrivateKeyMsg.from_private_key(private_key)
+ key_msg.comments = unicode_string(comments or '')
+ if lifetime:
+ key_msg.constraints += constraints([ProtocolMsgNumbers.SSH_AGENT_CONSTRAIN_LIFETIME]).to_blob() + uint32(lifetime).to_blob()
+ if confirm:
+ key_msg.constraints += constraints([ProtocolMsgNumbers.SSH_AGENT_CONSTRAIN_CONFIRM]).to_blob()
+
+ if key_msg.constraints:
+ msg = ProtocolMsgNumbers.SSH_AGENTC_ADD_ID_CONSTRAINED.to_blob()
+ else:
+ msg = ProtocolMsgNumbers.SSH_AGENTC_ADD_IDENTITY.to_blob()
+ msg += key_msg.to_blob()
+ self.send(msg)
+
+ def list(self) -> KeyList:
+ req = ProtocolMsgNumbers.SSH_AGENTC_REQUEST_IDENTITIES.to_blob()
+ r = memoryview(bytearray(self.send(req)))
+ if r[0] != ProtocolMsgNumbers.SSH_AGENT_IDENTITIES_ANSWER:
+ raise SshAgentFailure('agent: non-identities answer received for identities list')
+ return KeyList.from_blob(r[1:])
+
+ def __contains__(self, public_key: CryptoPublicKey) -> bool:
+ msg = PublicKeyMsg.from_public_key(public_key)
+ return msg in self.list().keys
+
+
+@functools.cache
+def key_data_into_crypto_objects(key_data: bytes, passphrase: bytes | None) -> tuple[CryptoPrivateKey, CryptoPublicKey, str]:
+ private_key = serialization.ssh.load_ssh_private_key(key_data, passphrase)
+ public_key = private_key.public_key()
+ fingerprint = PublicKeyMsg.from_public_key(public_key).fingerprint
+
+ return private_key, public_key, fingerprint
diff --git a/lib/ansible/_internal/_task.py b/lib/ansible/_internal/_task.py
new file mode 100644
index 00000000000..6a5e8a63f8b
--- /dev/null
+++ b/lib/ansible/_internal/_task.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import dataclasses
+import typing as t
+
+from collections import abc as c
+
+from ansible import constants
+from ansible._internal._templating import _engine
+from ansible._internal._templating._chain_templar import ChainTemplar
+from ansible.errors import AnsibleError
+from ansible.module_utils._internal._ambient_context import AmbientContextBase
+from ansible.module_utils.datatag import native_type_name
+from ansible.parsing import vault as _vault
+from ansible.utils.display import Display
+
+if t.TYPE_CHECKING:
+ from ansible.playbook.task import Task
+
+
+@dataclasses.dataclass
+class TaskContext(AmbientContextBase):
+ """Ambient context that wraps task execution on workers. It provides access to the currently executing task."""
+
+ task: Task
+
+
+TaskArgsFinalizerCallback = t.Callable[[str, t.Any, _engine.TemplateEngine, t.Any], t.Any]
+"""Type alias for the shape of the `ActionBase.finalize_task_arg` method."""
+
+
+class TaskArgsChainTemplar(ChainTemplar):
+ """
+ A ChainTemplar that carries a user-provided context object, optionally provided by `ActionBase.get_finalize_task_args_context`.
+ TaskArgsFinalizer provides the context to each `ActionBase.finalize_task_arg` call to allow for more complex/stateful customization.
+ """
+
+ def __init__(self, *sources: c.Mapping, templar: _engine.TemplateEngine, callback: TaskArgsFinalizerCallback, context: t.Any) -> None:
+ super().__init__(*sources, templar=templar)
+
+ self.callback = callback
+ self.context = context
+
+ def template(self, key: t.Any, value: t.Any) -> t.Any:
+ return self.callback(key, value, self.templar, self.context)
+
+
+class TaskArgsFinalizer:
+ """Invoked during task args finalization; allows actions to override default arg processing (e.g., templating)."""
+
+ def __init__(self, *args: c.Mapping[str, t.Any] | str | None, templar: _engine.TemplateEngine) -> None:
+ self._args_layers = [arg for arg in args if arg is not None]
+ self._templar = templar
+
+ def finalize(self, callback: TaskArgsFinalizerCallback, context: t.Any) -> dict[str, t.Any]:
+ resolved_layers: list[c.Mapping[str, t.Any]] = []
+
+ for layer in self._args_layers:
+ if isinstance(layer, (str, _vault.EncryptedString)): # EncryptedString can hide a template
+ if constants.config.get_config_value('INJECT_FACTS_AS_VARS'):
+ Display().warning(
+ "Using a template for task args is unsafe in some situations "
+ "(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe).",
+ obj=layer,
+ )
+
+ resolved_layer = self._templar.resolve_to_container(layer, options=_engine.TemplateOptions(value_for_omit={}))
+ else:
+ resolved_layer = layer
+
+ if not isinstance(resolved_layer, dict):
+ raise AnsibleError(f'Task args must resolve to a {native_type_name(dict)!r} not {native_type_name(resolved_layer)!r}.', obj=layer)
+
+ resolved_layers.append(resolved_layer)
+
+ ct = TaskArgsChainTemplar(*reversed(resolved_layers), templar=self._templar, callback=callback, context=context)
+
+ return ct.as_dict()
diff --git a/lib/ansible/_internal/_templating/__init__.py b/lib/ansible/_internal/_templating/__init__.py
new file mode 100644
index 00000000000..0fe0a555d73
--- /dev/null
+++ b/lib/ansible/_internal/_templating/__init__.py
@@ -0,0 +1,12 @@
+from __future__ import annotations
+
+import importlib.metadata
+
+jinja2_version = importlib.metadata.version('jinja2')
+
+# DTFIX-FUTURE: sanity test to ensure this doesn't drift from requirements
+_MINIMUM_JINJA_VERSION = (3, 1)
+_CURRENT_JINJA_VERSION = tuple(map(int, jinja2_version.split('.', maxsplit=2)[:2]))
+
+if _CURRENT_JINJA_VERSION < _MINIMUM_JINJA_VERSION:
+ raise RuntimeError(f'Jinja version {".".join(map(str, _MINIMUM_JINJA_VERSION))} or higher is required (current version {jinja2_version}).')
diff --git a/lib/ansible/_internal/_templating/_access.py b/lib/ansible/_internal/_templating/_access.py
new file mode 100644
index 00000000000..d69a92df9fc
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_access.py
@@ -0,0 +1,86 @@
+from __future__ import annotations
+
+import abc
+import typing as t
+
+from contextvars import ContextVar
+
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
+
+
+class NotifiableAccessContextBase(metaclass=abc.ABCMeta):
+ """Base class for a context manager that, when active, receives notification of managed access for types/tags in which it has registered an interest."""
+
+ _type_interest: t.FrozenSet[type] = frozenset()
+ """Set of types (including tag types) for which this context will be notified upon access."""
+
+ _mask: t.ClassVar[bool] = False
+ """When true, only the innermost (most recently created) context of this type will be notified."""
+
+ def __enter__(self):
+ # noinspection PyProtectedMember
+ AnsibleAccessContext.current()._register_interest(self)
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
+ # noinspection PyProtectedMember
+ AnsibleAccessContext.current()._unregister_interest(self)
+ return None
+
+ @abc.abstractmethod
+ def _notify(self, o: t.Any) -> t.Any:
+ """Derived classes implement custom notification behavior when a registered type or tag is accessed."""
+
+
+class AnsibleAccessContext:
+ """
+ Broker object for managed access registration and notification.
+ Each thread or other logical callstack has a dedicated `AnsibleAccessContext` object with which `NotifiableAccessContext` objects can register interest.
+ When a managed access occurs on an object, each active `NotifiableAccessContext` within the current callstack that has registered interest in that
+ object's type or a tag present on it will be notified.
+ """
+
+ _contextvar: t.ClassVar[ContextVar[AnsibleAccessContext]] = ContextVar('AnsibleAccessContext')
+
+ @staticmethod
+ def current() -> AnsibleAccessContext:
+ """Creates or retrieves an `AnsibleAccessContext` for the current logical callstack."""
+ try:
+ ctx: AnsibleAccessContext = AnsibleAccessContext._contextvar.get()
+ except LookupError:
+ # didn't exist; create it
+ ctx = AnsibleAccessContext()
+ AnsibleAccessContext._contextvar.set(ctx) # we ignore the token, since this should live for the life of the thread/async ctx
+
+ return ctx
+
+ def __init__(self) -> None:
+ self._notify_contexts: list[NotifiableAccessContextBase] = []
+
+ def _register_interest(self, context: NotifiableAccessContextBase) -> None:
+ self._notify_contexts.append(context)
+
+ def _unregister_interest(self, context: NotifiableAccessContextBase) -> None:
+ ctx = self._notify_contexts.pop()
+
+ if ctx is not context:
+ raise RuntimeError(f'Out-of-order context deactivation detected. Found {ctx} instead of {context}.')
+
+ def access(self, value: t.Any) -> None:
+ """Notify all contexts which have registered interest in the given value that it is being accessed."""
+ if not self._notify_contexts:
+ return
+
+ value_types = AnsibleTagHelper.tag_types(value) | frozenset((type(value),))
+ masked: set[type] = set()
+
+ for ctx in reversed(self._notify_contexts):
+ if ctx._mask:
+ if (ctx_type := type(ctx)) in masked:
+ continue
+
+ masked.add(ctx_type)
+
+ # noinspection PyProtectedMember
+ if ctx._type_interest.intersection(value_types):
+ ctx._notify(value)
diff --git a/lib/ansible/_internal/_templating/_chain_templar.py b/lib/ansible/_internal/_templating/_chain_templar.py
new file mode 100644
index 00000000000..896dcc053aa
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_chain_templar.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import collections.abc as c
+import itertools
+import typing as t
+
+from ansible.errors import AnsibleValueOmittedError, AnsibleError
+
+from ._engine import TemplateEngine
+
+
+class ChainTemplar:
+ """A basic variable layering mechanism that supports templating and obliteration of `omit` values."""
+
+ def __init__(self, *sources: c.Mapping, templar: TemplateEngine) -> None:
+ self.sources = sources
+ self.templar = templar
+
+ def template(self, key: t.Any, value: t.Any) -> t.Any:
+ """
+ Render the given value using the templar.
+ Intended to be overridden by subclasses.
+ """
+ return self.templar.template(value)
+
+ def get(self, key: t.Any) -> t.Any:
+ """Get the value for the given key, templating the result before returning it."""
+ for source in self.sources:
+ if key not in source:
+ continue
+
+ value = source[key]
+
+ try:
+ return self.template(key, value)
+ except AnsibleValueOmittedError:
+ break # omit == obliterate - matches historical behavior where dict layers were squashed before templating was applied
+ except Exception as ex:
+ raise AnsibleError(f'Error while resolving value for {key!r}.', obj=value) from ex
+
+ raise KeyError(key)
+
+ def keys(self) -> t.Iterable[t.Any]:
+ """
+ Returns a sorted iterable of all keys present in all source layers, without templating associated values.
+ Values that resolve to `omit` are thus included.
+ """
+ return sorted(set(itertools.chain.from_iterable(self.sources)))
+
+ def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
+ """
+ Returns a sorted iterable of (key, templated value) tuples.
+ Any tuple where the templated value resolves to `omit` will not be included in the result.
+ """
+ for key in self.keys():
+ try:
+ yield key, self.get(key)
+ except KeyError:
+ pass
+
+ def as_dict(self) -> dict[t.Any, t.Any]:
+ """Returns a dict representing all layers, squashed and templated, with `omit` values dropped."""
+ return dict(self.items())
diff --git a/lib/ansible/_internal/_templating/_datatag.py b/lib/ansible/_internal/_templating/_datatag.py
new file mode 100644
index 00000000000..db03fd1cb31
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_datatag.py
@@ -0,0 +1,95 @@
+from __future__ import annotations
+
+import contextlib as _contextlib
+import dataclasses
+import typing as t
+
+from ansible.module_utils._internal._datatag import AnsibleSingletonTagBase, _tag_dataclass_kwargs
+from ansible.module_utils._internal._datatag._tags import Deprecated
+from ansible._internal._datatag._tags import Origin
+from ansible.utils.display import Display
+
+from ._access import NotifiableAccessContextBase
+from ._utils import TemplateContext
+
+display = Display()
+
+
+@dataclasses.dataclass(**_tag_dataclass_kwargs)
+class _JinjaConstTemplate(AnsibleSingletonTagBase):
+ # deprecated: description='embedded Jinja constant string template support' core_version='2.23'
+ pass
+
+
+@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
+class _TrippedDeprecationInfo:
+ template: str
+ deprecated: Deprecated
+
+
+class DeprecatedAccessAuditContext(NotifiableAccessContextBase):
+ """When active, captures metadata about managed accesses to `Deprecated` tagged objects."""
+
+ _type_interest = frozenset([Deprecated])
+
+ @classmethod
+ def when(cls, condition: bool, /) -> t.Self | _contextlib.nullcontext:
+ """Returns a new instance if `condition` is True (usually `TemplateContext.is_top_level`), otherwise a `nullcontext` instance."""
+ if condition:
+ return cls()
+
+ return _contextlib.nullcontext()
+
+ def __init__(self) -> None:
+ self._tripped_deprecation_info: dict[int, _TrippedDeprecationInfo] = {}
+
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
+ result = super().__exit__(exc_type, exc_val, exc_tb)
+
+ for item in self._tripped_deprecation_info.values():
+ if Origin.is_tagged_on(item.template):
+ msg = item.deprecated.msg
+ else:
+ # without an origin, we need to include what context we do have (the template)
+ msg = f'While processing {item.template!r}: {item.deprecated.msg}'
+
+ display._deprecated_with_plugin_info(
+ msg=msg,
+ help_text=item.deprecated.help_text,
+ version=item.deprecated.version,
+ date=item.deprecated.date,
+ obj=item.template,
+ deprecator=item.deprecated.deprecator,
+ formatted_traceback=item.deprecated.formatted_traceback,
+ )
+
+ return result
+
+ def _notify(self, o: t.Any) -> None:
+ deprecated = Deprecated.get_required_tag(o)
+ deprecated_key = id(deprecated)
+
+ if deprecated_key in self._tripped_deprecation_info:
+ return # record only the first access for each deprecated tag in a given context
+
+ template_ctx = TemplateContext.current(optional=True)
+ template = template_ctx.template_value if template_ctx else None
+
+ # when the current template input is a container, provide a descriptive string with origin propagated (if possible)
+ if not isinstance(template, str):
+ # DTFIX-FUTURE: ascend the template stack to try and find the nearest string source template
+ origin = Origin.get_tag(template)
+
+ # DTFIX-FUTURE: this should probably use a synthesized description value on the tag
+ # it is reachable from the data_tagging_controller test: ../playbook_output_validator/filter.py actual_stdout.txt actual_stderr.txt
+ # -[DEPRECATION WARNING]: `something_old` is deprecated, don't use it! This feature will be removed in version 1.2.3.
+ # +[DEPRECATION WARNING]: While processing '<>': `something_old` is deprecated, don't use it! This feature will be removed in ...
+ template = '<>'
+
+ if origin:
+ origin.tag(template)
+
+ self._tripped_deprecation_info[deprecated_key] = _TrippedDeprecationInfo(
+ template=template,
+ deprecated=deprecated,
+ )
diff --git a/lib/ansible/_internal/_templating/_engine.py b/lib/ansible/_internal/_templating/_engine.py
new file mode 100644
index 00000000000..de3d70e38d1
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_engine.py
@@ -0,0 +1,592 @@
+# (c) 2012-2014, Michael DeHaan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import annotations
+
+import copy
+import dataclasses
+import enum
+import textwrap
+import typing as t
+import collections.abc as c
+import re
+
+from collections import ChainMap
+
+from ansible.errors import (
+ AnsibleError,
+ AnsibleValueOmittedError,
+ AnsibleUndefinedVariable,
+ AnsibleTemplateSyntaxError,
+ AnsibleBrokenConditionalError,
+ AnsibleTemplateTransformLimitError,
+ TemplateTrustCheckFailedError,
+)
+
+from ansible.module_utils._internal._datatag import AnsibleTaggedObject, NotTaggableError, AnsibleTagHelper
+from ansible._internal._errors._handler import Skippable
+from ansible._internal._datatag._tags import Origin, TrustedAsTemplate
+from ansible.utils.display import Display
+from ansible.utils.vars import validate_variable_name
+from ansible.parsing.dataloader import DataLoader
+
+from ._datatag import DeprecatedAccessAuditContext
+from ._jinja_bits import (
+ AnsibleTemplate,
+ _TemplateCompileContext,
+ TemplateOverrides,
+ AnsibleEnvironment,
+ defer_template_error,
+ create_template_error,
+ is_possibly_template,
+ is_possibly_all_template,
+ AnsibleTemplateExpression,
+ _finalize_template_result,
+ FinalizeMode,
+)
+from ._jinja_common import _TemplateConfig, MarkerError, ExceptionMarker
+from ._lazy_containers import _AnsibleLazyTemplateMixin
+from ._marker_behaviors import MarkerBehavior, FAIL_ON_UNDEFINED
+from ._transform import _type_transform_mapping
+from ._utils import Omit, TemplateContext, IGNORE_SCALAR_VAR_TYPES, LazyOptions
+from ...module_utils.datatag import native_type_name
+
+_display = Display()
+
+
+_shared_empty_unmask_type_names: frozenset[str] = frozenset()
+
+TRANSFORM_CHAIN_LIMIT: int = 10
+"""Arbitrary limit for chained transforms to prevent cycles; an exception will be raised if exceeded."""
+
+
+class TemplateMode(enum.Enum):
+ # DTFIX-FUTURE: this enum ideally wouldn't exist - revisit/rename before making public
+ DEFAULT = enum.auto()
+ STOP_ON_TEMPLATE = enum.auto()
+ STOP_ON_CONTAINER = enum.auto()
+ ALWAYS_FINALIZE = enum.auto()
+
+
+@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
+class TemplateOptions:
+ DEFAULT: t.ClassVar[t.Self]
+
+ value_for_omit: object = Omit
+ escape_backslashes: bool = True
+ preserve_trailing_newlines: bool = True
+ # DTFIX-FUTURE: these aren't really overrides anymore, rename the dataclass and this field
+ # also mention in docstring this has no effect unless used to template a string
+ overrides: TemplateOverrides = TemplateOverrides.DEFAULT
+
+
+TemplateOptions.DEFAULT = TemplateOptions()
+
+
+class TemplateEncountered(Exception):
+ pass
+
+
+class TemplateEngine:
+ """
+ The main class for templating, with the main entry-point of template().
+ """
+
+ _sentinel = object()
+
+ def __init__(
+ self,
+ loader: DataLoader | None = None,
+ variables: dict[str, t.Any] | ChainMap[str, t.Any] | None = None,
+ variables_factory: t.Callable[[], dict[str, t.Any] | ChainMap[str, t.Any]] | None = None,
+ marker_behavior: MarkerBehavior | None = None,
+ ):
+ self._loader = loader
+ self._variables = variables
+ self._variables_factory = variables_factory
+ self._environment: AnsibleEnvironment | None = None
+
+ # inherit marker behavior from the active template context's templar unless otherwise specified
+ if not marker_behavior:
+ if template_ctx := TemplateContext.current(optional=True):
+ marker_behavior = template_ctx.templar.marker_behavior
+ else:
+ marker_behavior = FAIL_ON_UNDEFINED
+
+ self._marker_behavior = marker_behavior
+
+ def copy(self) -> t.Self:
+ new_engine = copy.copy(self)
+ new_engine._environment = None
+
+ return new_engine
+
+ def extend(self, marker_behavior: MarkerBehavior | None = None) -> t.Self:
+ new_templar = type(self)(
+ loader=self._loader,
+ variables=self._variables,
+ variables_factory=self._variables_factory,
+ marker_behavior=marker_behavior or self._marker_behavior,
+ )
+
+ if self._environment:
+ new_templar._environment = self._environment
+
+ return new_templar
+
+ @property
+ def marker_behavior(self) -> MarkerBehavior:
+ return self._marker_behavior
+
+ @property
+ def basedir(self) -> str:
+ """The basedir from DataLoader."""
+ return self._loader.get_basedir() if self._loader else '.'
+
+ @property
+ def environment(self) -> AnsibleEnvironment:
+ if not self._environment:
+ self._environment = AnsibleEnvironment(ansible_basedir=self.basedir)
+
+ return self._environment
+
+ def _create_overlay(self, template: str, overrides: TemplateOverrides) -> tuple[str, AnsibleEnvironment]:
+ try:
+ template, overrides = overrides._extract_template_overrides(template)
+ except Exception as ex:
+ raise AnsibleTemplateSyntaxError("Syntax error in template.", obj=template) from ex
+
+ env = self.environment
+
+ if overrides is not TemplateOverrides.DEFAULT and (overlay_kwargs := overrides.overlay_kwargs()):
+ env = t.cast(AnsibleEnvironment, env.overlay(**overlay_kwargs))
+
+ return template, env
+
+ @staticmethod
+ def _count_newlines_from_end(in_str):
+ """
+ Counts the number of newlines at the end of a string. This is used during
+ the jinja2 templating to ensure the count matches the input, since some newlines
+ may be thrown away during the templating.
+ """
+
+ i = len(in_str)
+ j = i - 1
+
+ try:
+ while in_str[j] == '\n':
+ j -= 1
+ except IndexError:
+ # Uncommon cases: zero length string and string containing only newlines
+ return i
+
+ return i - 1 - j
+
+ @property
+ def available_variables(self) -> dict[str, t.Any] | ChainMap[str, t.Any]:
+ """Available variables this instance will use when templating."""
+ # DTFIX3: ensure that we're always accessing this as a shallow container-level snapshot, and eliminate uses of anything
+ # that directly mutates this value. _new_context may resolve this for us?
+ if self._variables is None:
+ self._variables = self._variables_factory() if self._variables_factory else {}
+
+ return self._variables
+
+ @available_variables.setter
+ def available_variables(self, variables: dict[str, t.Any]) -> None:
+ self._variables = variables
+
+ def resolve_variable_expression(
+ self,
+ expression: str,
+ *,
+ local_variables: dict[str, t.Any] | None = None,
+ ) -> t.Any:
+ """
+ Resolve a potentially untrusted string variable expression consisting only of valid identifiers, integers, dots, and indexing containing these.
+ Optional local variables may be provided, which can only be referenced directly by the given expression.
+ Valid: x, x.y, x[y].z, x[1], 1, x[y.z]
+ Error: 'x', x['y'], q('env')
+ """
+ components = re.split(r'[.\[\]]', expression)
+
+ try:
+ for component in components:
+ if re.fullmatch('[0-9]*', component):
+ continue # allow empty strings and integers
+
+ validate_variable_name(component)
+ except Exception as ex:
+ raise AnsibleError(f'Invalid variable expression: {expression}', obj=expression) from ex
+
+ return self.evaluate_expression(TrustedAsTemplate().tag(expression), local_variables=local_variables)
+
+ @staticmethod
+ def variable_name_as_template(name: str) -> str:
+ """Return a trusted template string that will resolve the provided variable name. Raises an error if `name` is not a valid identifier."""
+ validate_variable_name(name)
+ return AnsibleTagHelper.tag('{{' + name + '}}', (AnsibleTagHelper.tags(name) | {TrustedAsTemplate()}))
+
+ def transform(self, variable: t.Any) -> t.Any:
+ """Recursively apply transformations to the given value and return the result."""
+ return self.template(variable, mode=TemplateMode.ALWAYS_FINALIZE, lazy_options=LazyOptions.SKIP_TEMPLATES_AND_ACCESS)
+
+ def template(
+ self,
+ variable: t.Any, # DTFIX-FUTURE: once we settle the new/old API boundaries, rename this (here and in other methods)
+ *,
+ options: TemplateOptions = TemplateOptions.DEFAULT,
+ mode: TemplateMode = TemplateMode.DEFAULT,
+ lazy_options: LazyOptions = LazyOptions.DEFAULT,
+ ) -> t.Any:
+ """Templates (possibly recursively) any given data as input."""
+ original_variable = variable
+
+ for _attempt in range(TRANSFORM_CHAIN_LIMIT):
+ if variable is None or (value_type := type(variable)) in IGNORE_SCALAR_VAR_TYPES:
+ return variable # quickly ignore supported scalar types which are not be templated
+
+ value_is_str = isinstance(variable, str)
+
+ if template_ctx := TemplateContext.current(optional=True):
+ stop_on_template = template_ctx.stop_on_template
+ else:
+ stop_on_template = False
+
+ if mode is TemplateMode.STOP_ON_TEMPLATE:
+ stop_on_template = True
+
+ with (
+ TemplateContext(template_value=variable, templar=self, options=options, stop_on_template=stop_on_template) as ctx,
+ DeprecatedAccessAuditContext.when(ctx.is_top_level),
+ ):
+ try:
+ if not value_is_str:
+ # transforms are currently limited to non-str types as an optimization
+ if (transform := _type_transform_mapping.get(value_type)) and value_type.__name__ not in lazy_options.unmask_type_names:
+ variable = transform(variable)
+ continue
+
+ template_result = _AnsibleLazyTemplateMixin._try_create(variable, lazy_options)
+ elif not lazy_options.template:
+ template_result = variable
+ elif not is_possibly_template(variable, options.overrides):
+ template_result = variable
+ elif not self._trust_check(variable, skip_handler=stop_on_template):
+ template_result = variable
+ elif stop_on_template:
+ raise TemplateEncountered()
+ else:
+ compiled_template = self._compile_template(variable, options)
+
+ template_result = compiled_template(self.available_variables)
+ template_result = self._post_render_mutation(variable, template_result, options)
+ except TemplateEncountered:
+ raise
+ except Exception as ex:
+ template_result = defer_template_error(ex, variable, is_expression=False)
+
+ if ctx.is_top_level or mode is TemplateMode.ALWAYS_FINALIZE:
+ template_result = self._finalize_top_level_template_result(
+ variable, options, template_result, stop_on_container=mode is TemplateMode.STOP_ON_CONTAINER
+ )
+
+ return template_result
+
+ raise AnsibleTemplateTransformLimitError(obj=original_variable)
+
+ @staticmethod
+ def _finalize_top_level_template_result(
+ variable: t.Any,
+ options: TemplateOptions,
+ template_result: t.Any,
+ is_expression: bool = False,
+ stop_on_container: bool = False,
+ ) -> t.Any:
+ """
+ This method must be called for expressions and top-level templates to recursively finalize the result.
+ This renders any embedded templates and triggers `Marker` and omit behaviors.
+ """
+ try:
+ if template_result is Omit:
+ # When the template result is Omit, raise an AnsibleValueOmittedError if value_for_omit is Omit, otherwise return value_for_omit.
+ # Other occurrences of Omit will simply drop out of containers during _finalize_template_result.
+ if options.value_for_omit is Omit:
+ raise AnsibleValueOmittedError()
+
+ return options.value_for_omit # trust that value_for_omit is an allowed type
+
+ if stop_on_container and type(template_result) in AnsibleTaggedObject._collection_types:
+ # Use of stop_on_container implies the caller will perform necessary checks on values,
+ # most likely by passing them back into the templating system.
+ try:
+ return template_result._non_lazy_copy()
+ except AttributeError:
+ return template_result # non-lazy containers are returned as-is
+
+ return _finalize_template_result(template_result, FinalizeMode.TOP_LEVEL)
+ except TemplateEncountered:
+ raise
+ except Exception as ex:
+ raise_from: BaseException
+
+ if isinstance(ex, MarkerError):
+ exception_to_raise = ex.source._as_exception()
+
+ # MarkerError is never suitable for use as the cause of another exception, it is merely a raiseable container for the source marker
+ # used for flow control (so its stack trace is rarely useful). However, if the source derives from a ExceptionMarker, its contained
+ # exception (previously raised) should be used as the cause. Other sources do not contain exceptions, so cannot provide a cause.
+ raise_from = exception_to_raise if isinstance(ex.source, ExceptionMarker) else None
+ else:
+ exception_to_raise = ex
+ raise_from = ex
+
+ exception_to_raise = create_template_error(exception_to_raise, variable, is_expression)
+
+ if exception_to_raise is ex:
+ raise # when the exception to raise is the active exception, just re-raise it
+
+ if exception_to_raise is raise_from:
+ raise_from = exception_to_raise.__cause__ # preserve the exception's cause, if any, otherwise no cause will be used
+
+ raise exception_to_raise from raise_from # always raise from something to avoid the currently active exception becoming __context__
+
+ def _compile_template(self, template: str, options: TemplateOptions) -> t.Callable[[c.Mapping[str, t.Any]], t.Any]:
+ # NOTE: Creating an overlay that lives only inside _compile_template means that overrides are not applied
+ # when templating nested variables, where Templar.environment is used, not the overlay. They are, however,
+ # applied to includes and imports.
+ try:
+ stripped_template, env = self._create_overlay(template, options.overrides)
+
+ with _TemplateCompileContext(escape_backslashes=options.escape_backslashes):
+ return t.cast(AnsibleTemplate, env.from_string(stripped_template))
+ except Exception as ex:
+ return self._defer_jinja_compile_error(ex, template, False)
+
+ def _compile_expression(self, expression: str, options: TemplateOptions) -> t.Callable[[c.Mapping[str, t.Any]], t.Any]:
+ """
+ Compile a Jinja expression, applying optional compile-time behavior via an environment overlay (if needed). The overlay is
+ necessary to avoid mutating settings on the Templar's shared environment, which could be visible to other code running concurrently.
+ In the specific case of escape_backslashes, the setting only applies to a top-level template at compile-time, not runtime, to
+ ensure that any nested template calls (e.g., include and import) do not inherit the (lack of) escaping behavior.
+ """
+ try:
+ with _TemplateCompileContext(escape_backslashes=options.escape_backslashes):
+ return AnsibleTemplateExpression(self.environment.compile_expression(expression, False))
+ except Exception as ex:
+ return self._defer_jinja_compile_error(ex, expression, True)
+
+ def _defer_jinja_compile_error(self, ex: Exception, variable: str, is_expression: bool) -> t.Callable[[c.Mapping[str, t.Any]], t.Any]:
+ deferred_error = defer_template_error(ex, variable, is_expression=is_expression)
+
+ def deferred_exception(_jinja_vars: c.Mapping[str, t.Any]) -> t.Any:
+ # a template/expression compile error always results in a single node representing the compile error
+ return self.marker_behavior.handle_marker(deferred_error)
+
+ return deferred_exception
+
+ def _post_render_mutation(self, template: str, result: t.Any, options: TemplateOptions) -> t.Any:
+ if options.preserve_trailing_newlines and isinstance(result, str):
+ # The low level calls above do not preserve the newline
+ # characters at the end of the input data, so we
+ # calculate the difference in newlines and append them
+ # to the resulting output for parity
+ #
+ # Using AnsibleEnvironment's keep_trailing_newline instead would
+ # result in change in behavior when trailing newlines
+ # would be kept also for included templates, for example:
+ # "Hello {% include 'world.txt' %}!" would render as
+ # "Hello world\n!\n" instead of "Hello world!\n".
+ data_newlines = self._count_newlines_from_end(template)
+ res_newlines = self._count_newlines_from_end(result)
+
+ if data_newlines > res_newlines:
+ newlines = options.overrides.newline_sequence * (data_newlines - res_newlines)
+ result = AnsibleTagHelper.tag_copy(result, result + newlines)
+
+ # If the input string template was source-tagged and the result is not, propagate the source tag to the new value.
+ # This provides further contextual information when a template-derived value/var causes an error.
+ if not Origin.is_tagged_on(result) and (origin := Origin.get_tag(template)):
+ try:
+ result = origin.tag(result)
+ except NotTaggableError:
+ pass # best effort- if we can't, oh well
+
+ return result
+
+ def is_template(self, data: t.Any, overrides: TemplateOverrides = TemplateOverrides.DEFAULT) -> bool:
+ """
+ Evaluate the input data to determine if it contains a template, even if that template is invalid. Containers will be recursively searched.
+ Objects subject to template-time transforms that do not yield a template are not considered templates by this method.
+ Gating a conditional call to `template` with this method is redundant and inefficient -- request templating unconditionally instead.
+ """
+ options = TemplateOptions(overrides=overrides) if overrides is not TemplateOverrides.DEFAULT else TemplateOptions.DEFAULT
+
+ try:
+ self.template(data, options=options, mode=TemplateMode.STOP_ON_TEMPLATE)
+ except TemplateEncountered:
+ return True
+ else:
+ return False
+
+ def resolve_to_container(self, variable: t.Any, options: TemplateOptions = TemplateOptions.DEFAULT) -> t.Any:
+ """
+ Recursively resolve scalar string template input, stopping at the first container encountered (if any).
+ Used for e.g., partial templating of task arguments, where the plugin needs to handle final resolution of some args internally.
+ """
+ return self.template(variable, options=options, mode=TemplateMode.STOP_ON_CONTAINER)
+
+ def evaluate_expression(
+ self,
+ expression: str,
+ *,
+ local_variables: dict[str, t.Any] | None = None,
+ escape_backslashes: bool = True,
+ _render_jinja_const_template: bool = False,
+ ) -> t.Any:
+ """
+ Evaluate a trusted string expression and return its result.
+ Optional local variables may be provided, which can only be referenced directly by the given expression.
+ """
+ if not isinstance(expression, str):
+ raise TypeError(f"Expressions must be {str!r}, got {type(expression)!r}.")
+
+ options = TemplateOptions(escape_backslashes=escape_backslashes, preserve_trailing_newlines=False)
+
+ with (
+ TemplateContext(template_value=expression, templar=self, options=options, _render_jinja_const_template=_render_jinja_const_template) as ctx,
+ DeprecatedAccessAuditContext.when(ctx.is_top_level),
+ ):
+ try:
+ if not TrustedAsTemplate.is_tagged_on(expression):
+ raise TemplateTrustCheckFailedError(obj=expression)
+
+ template_variables = ChainMap(local_variables, self.available_variables) if local_variables else self.available_variables
+ compiled_template = self._compile_expression(expression, options)
+
+ template_result = compiled_template(template_variables)
+ template_result = self._post_render_mutation(expression, template_result, options)
+ except Exception as ex:
+ template_result = defer_template_error(ex, expression, is_expression=True)
+
+ return self._finalize_top_level_template_result(expression, options, template_result, is_expression=True)
+
+ _BROKEN_CONDITIONAL_ALLOWED_FRAGMENT = 'Broken conditionals are currently allowed because the `ALLOW_BROKEN_CONDITIONALS` configuration option is enabled.'
+ _CONDITIONAL_AS_TEMPLATE_MSG = 'Conditionals should not be surrounded by templating delimiters such as {{ }} or {% %}.'
+
+ def _strip_conditional_handle_empty(self, conditional) -> t.Any:
+ """
+ Strips leading/trailing whitespace from the input expression.
+ If `ALLOW_BROKEN_CONDITIONALS` is enabled, None/empty is coerced to True (legacy behavior, deprecated).
+ Otherwise, None/empty results in a broken conditional error being raised.
+ """
+ if isinstance(conditional, str):
+ # Leading/trailing whitespace on conditional expressions is not a problem, except we can't tell if the expression is empty (which *is* a problem).
+ # Always strip conditional input strings. Neither conditional expressions nor all-template conditionals have legit reasons to preserve
+ # surrounding whitespace, and they complicate detection and processing of all-template fallback cases.
+ conditional = AnsibleTagHelper.tag_copy(conditional, conditional.strip())
+
+ if conditional in (None, ''):
+ # deprecated backward-compatible behavior; None/empty input conditionals are always True
+ if _TemplateConfig.allow_broken_conditionals:
+ _display.deprecated(
+ msg='Empty conditional expression was evaluated as True.',
+ help_text=self._BROKEN_CONDITIONAL_ALLOWED_FRAGMENT,
+ obj=conditional,
+ version='2.23',
+ )
+
+ return True
+
+ raise AnsibleBrokenConditionalError("Empty conditional expressions are not allowed.", obj=conditional)
+
+ return conditional
+
+ def _normalize_and_evaluate_conditional(self, conditional: str | bool) -> t.Any:
+ """Validate and normalize a conditional input value, resolving allowed embedded template cases and evaluating the resulting expression."""
+ conditional = self._strip_conditional_handle_empty(conditional)
+
+ # this must follow `_strip_conditional_handle_empty`, since None/empty are coerced to bool (deprecated)
+ if type(conditional) is bool: # pylint: disable=unidiomatic-typecheck
+ return conditional
+
+ try:
+ if not isinstance(conditional, str):
+ if _TemplateConfig.allow_broken_conditionals:
+ # because the input isn't a string, the result will never be a bool; the broken conditional warning in the caller will apply on the result
+ return self.template(conditional, mode=TemplateMode.ALWAYS_FINALIZE)
+
+ raise AnsibleBrokenConditionalError(message="Conditional expressions must be strings.", obj=conditional)
+
+ if is_possibly_all_template(conditional):
+ # Indirection of trusted expressions is always allowed. If the expression appears to be entirely wrapped in template delimiters,
+ # we must resolve it. e.g. `when: "{{ some_var_resolving_to_a_trusted_expression_string }}"`.
+ # Some invalid meta-templating corner cases may sneak through here (e.g., `when: '{{ "foo" }} == {{ "bar" }}'`); these will
+ # result in an untrusted expression error.
+ result = self.template(conditional, mode=TemplateMode.ALWAYS_FINALIZE)
+ result = self._strip_conditional_handle_empty(result)
+
+ if not isinstance(result, str):
+ _display.deprecated(msg=self._CONDITIONAL_AS_TEMPLATE_MSG, obj=conditional, version='2.23')
+
+ return result # not an expression
+
+ # The only allowed use of templates for conditionals is for indirect usage of an expression.
+ # Any other usage should simply be an expression, not an attempt at meta templating.
+ expression = result
+ else:
+ expression = conditional
+
+ # Disable escape_backslashes when processing conditionals, to maintain backwards compatibility.
+ # This is necessary because conditionals were previously evaluated using {% %}, which was *NOT* affected by escape_backslashes.
+ # Now that conditionals use expressions, they would be affected by escape_backslashes if it was not disabled.
+ return self.evaluate_expression(expression, escape_backslashes=False, _render_jinja_const_template=True)
+
+ except AnsibleUndefinedVariable as ex:
+ # DTFIX-FUTURE: we're only augmenting the message for context here; once we have proper contextual tracking, we can dump the re-raise
+ raise AnsibleUndefinedVariable("Error while evaluating conditional.", obj=conditional) from ex
+
+ def evaluate_conditional(self, conditional: str | bool) -> bool:
+ """
+ Evaluate a trusted string expression or boolean and return its boolean result. A non-boolean result will raise `AnsibleBrokenConditionalError`.
+ The ALLOW_BROKEN_CONDITIONALS configuration option can temporarily relax this requirement, allowing truthy conditionals to succeed.
+ """
+ result = self._normalize_and_evaluate_conditional(conditional)
+
+ if isinstance(result, bool):
+ return result
+
+ bool_result = bool(result)
+
+ msg = (
+ f'Conditional result was {textwrap.shorten(str(result), width=40)!r} of type {native_type_name(result)!r}, '
+ f'which evaluates to {bool_result}. Conditionals must have a boolean result.'
+ )
+
+ if _TemplateConfig.allow_broken_conditionals:
+ _display.deprecated(
+ msg=msg,
+ obj=conditional,
+ help_text=self._BROKEN_CONDITIONAL_ALLOWED_FRAGMENT,
+ version='2.23',
+ )
+
+ return bool_result
+
+ raise AnsibleBrokenConditionalError(msg, obj=conditional)
+
+ @staticmethod
+ def _trust_check(value: str, skip_handler: bool = False) -> bool:
+ """
+ Return True if the given value is trusted for templating, otherwise return False.
+ When the value is not trusted, a warning or error may be generated, depending on configuration.
+ """
+ if TrustedAsTemplate.is_tagged_on(value):
+ return True
+
+ if not skip_handler:
+ with Skippable, _TemplateConfig.untrusted_template_handler.handle(TemplateTrustCheckFailedError, skip_on_ignore=True):
+ raise TemplateTrustCheckFailedError(obj=value)
+
+ return False
diff --git a/lib/ansible/_internal/_templating/_errors.py b/lib/ansible/_internal/_templating/_errors.py
new file mode 100644
index 00000000000..587b63f6b25
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_errors.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from ansible.errors import AnsibleTemplatePluginError
+
+
+class AnsibleTemplatePluginRuntimeError(AnsibleTemplatePluginError):
+ """The specified template plugin (lookup/filter/test) raised an exception during execution."""
+
+ def __init__(self, plugin_type: str, plugin_name: str) -> None:
+ super().__init__(f'The {plugin_type} plugin {plugin_name!r} failed.')
+
+
+class AnsibleTemplatePluginLoadError(AnsibleTemplatePluginError):
+ """The specified template plugin (lookup/filter/test) failed to load."""
+
+ def __init__(self, plugin_type: str, plugin_name: str) -> None:
+ super().__init__(f'The {plugin_type} plugin {plugin_name!r} failed to load.')
+
+
+class AnsibleTemplatePluginNotFoundError(AnsibleTemplatePluginError, KeyError):
+ """
+ The specified template plugin (lookup/filter/test) was not found.
+ This exception extends KeyError since Jinja filter/test resolution requires a KeyError to detect missing plugins.
+ Jinja compilation fails if a non-KeyError is raised for a missing filter/test, even if the plugin will not be invoked (inconsistent with stock Jinja).
+ """
+
+ def __init__(self, plugin_type: str, plugin_name: str) -> None:
+ super().__init__(f'The {plugin_type} plugin {plugin_name!r} was not found.')
diff --git a/lib/ansible/_internal/_templating/_jinja_bits.py b/lib/ansible/_internal/_templating/_jinja_bits.py
new file mode 100644
index 00000000000..1190bbef60f
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_jinja_bits.py
@@ -0,0 +1,1106 @@
+from __future__ import annotations
+
+import ast
+import collections.abc as c
+import dataclasses
+import enum
+import pathlib
+import tempfile
+import types
+import typing as t
+
+from collections import ChainMap
+
+import jinja2.nodes
+
+from jinja2 import pass_context, defaults, TemplateSyntaxError, FileSystemLoader
+from jinja2.environment import Environment, Template, TemplateModule, TemplateExpression
+from jinja2.compiler import Frame
+from jinja2.lexer import TOKEN_VARIABLE_BEGIN, TOKEN_VARIABLE_END, TOKEN_STRING, Lexer
+from jinja2.nativetypes import NativeCodeGenerator
+from jinja2.nodes import Const, EvalContext
+from jinja2.runtime import Context, Macro
+from jinja2.sandbox import SandboxedEnvironment
+from jinja2.utils import missing, LRUCache
+
+from ansible.utils.display import Display
+from ansible.errors import AnsibleVariableTypeError, AnsibleTemplateSyntaxError, AnsibleTemplateError
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils._internal._datatag import (
+ _AnsibleTaggedDict,
+ _AnsibleTaggedList,
+ _AnsibleTaggedTuple,
+ _AnsibleTaggedStr,
+ AnsibleTagHelper,
+)
+
+from ansible._internal._errors._handler import ErrorAction
+from ansible._internal._datatag._tags import Origin, TrustedAsTemplate
+
+from ._access import AnsibleAccessContext
+from ._datatag import _JinjaConstTemplate
+from ._utils import LazyOptions
+from ._jinja_common import (
+ MarkerError,
+ Marker,
+ CapturedExceptionMarker,
+ UndefinedMarker,
+ _TemplateConfig,
+ TruncationMarker,
+ validate_arg_type,
+ JinjaCallContext,
+ _SandboxMode,
+)
+from ._jinja_plugins import JinjaPluginIntercept, _query, _lookup, _now, _wrap_plugin_output, get_first_marker_arg, _DirectCall, _jinja_const_template_warning
+from ._lazy_containers import (
+ _AnsibleLazyTemplateMixin,
+ _AnsibleLazyTemplateDict,
+ _AnsibleLazyTemplateList,
+ _AnsibleLazyAccessTuple,
+ lazify_container_args,
+ lazify_container_kwargs,
+ lazify_container,
+ register_known_types,
+)
+from ._utils import Omit, TemplateContext, PASS_THROUGH_SCALAR_VAR_TYPES
+
+from ansible.module_utils._internal._json._profiles import _json_subclassable_scalar_types
+from ansible.module_utils import _internal
+from ansible.module_utils._internal import _ambient_context, _dataclass_validation
+from ansible.plugins.loader import filter_loader, test_loader
+from ansible.vars.hostvars import HostVars, HostVarsVars
+from ...module_utils.datatag import native_type_name
+
+JINJA2_OVERRIDE = '#jinja2:'
+"""
+String values prefixed with this sequence are interpreted as templates, even without template delimiters.
+The values following this prefix up to the first newline are parsed as Jinja2 template overrides.
+To include this literal value at the start of a string, a space or other character must precede it.
+"""
+
+JINJA_KEYWORDS = frozenset(
+ {
+ # scalar singletons (see jinja2.nodes.Name.can_assign)
+ 'true',
+ 'false',
+ 'none',
+ 'True',
+ 'False',
+ 'None',
+ # other
+ 'not', # unary operator always applicable to names
+ }
+)
+"""Names which have special meaning to Jinja and cannot be resolved as variable names."""
+
+display = Display()
+
+
+@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
+class TemplateOverrides:
+ DEFAULT: t.ClassVar[t.Self]
+
+ block_start_string: str = defaults.BLOCK_START_STRING
+ block_end_string: str = defaults.BLOCK_END_STRING
+ variable_start_string: str = defaults.VARIABLE_START_STRING
+ variable_end_string: str = defaults.VARIABLE_END_STRING
+ comment_start_string: str = defaults.COMMENT_START_STRING
+ comment_end_string: str = defaults.COMMENT_END_STRING
+ line_statement_prefix: str | None = defaults.LINE_STATEMENT_PREFIX
+ line_comment_prefix: str | None = defaults.LINE_COMMENT_PREFIX
+ trim_blocks: bool = True # AnsibleEnvironment overrides this default, so don't use the Jinja default here
+ lstrip_blocks: bool = defaults.LSTRIP_BLOCKS
+ newline_sequence: t.Literal['\n', '\r\n', '\r'] = defaults.NEWLINE_SEQUENCE
+ keep_trailing_newline: bool = defaults.KEEP_TRAILING_NEWLINE
+
+ def __post_init__(self) -> None:
+ pass # overridden by _dataclass_validation._inject_post_init_validation
+
+ def _post_validate(self) -> None:
+ if not (self.block_start_string != self.variable_start_string != self.comment_start_string != self.block_start_string):
+ raise ValueError('Block, variable and comment start strings must be different.')
+
+ def overlay_kwargs(self) -> dict[str, t.Any]:
+ """
+ Return a dictionary of arguments for passing to Environment.overlay.
+ The dictionary will be empty if all fields have their default value.
+ """
+ # DTFIX-FUTURE: calculate default/non-default during __post_init__
+ fields = [(field, getattr(self, field.name)) for field in dataclasses.fields(self)]
+ kwargs = {field.name: value for field, value in fields if value != field.default}
+
+ return kwargs
+
+ def _contains_start_string(self, value: str) -> bool:
+ """Returns True if the given value contains a variable, block or comment start string."""
+ # DTFIX-FUTURE: this is inefficient, use a compiled regex instead
+
+ for marker in (self.block_start_string, self.variable_start_string, self.comment_start_string):
+ if marker in value:
+ return True
+
+ return False
+
+ def _starts_and_ends_with_jinja_delimiters(self, value: str) -> bool:
+ """Returns True if the given value starts and ends with Jinja variable, block or comment delimiters."""
+ # DTFIX-FUTURE: this is inefficient, use a compiled regex instead
+
+ for marker in (self.block_start_string, self.variable_start_string, self.comment_start_string):
+ if value.startswith(marker):
+ break
+ else:
+ return False
+
+ for marker in (self.block_end_string, self.variable_end_string, self.comment_end_string):
+ if value.endswith(marker):
+ return True
+
+ return False
+
+ def _extract_template_overrides(self, template: str) -> tuple[str, TemplateOverrides]:
+ if template.startswith(JINJA2_OVERRIDE):
+ eol = template.find('\n')
+
+ if eol == -1:
+ raise ValueError(f"Missing newline after {JINJA2_OVERRIDE!r} override.")
+
+ line = template[len(JINJA2_OVERRIDE) : eol]
+ template = template[eol + 1 :]
+ override_kwargs = {}
+
+ for pair in line.split(','):
+ if not pair.strip():
+ raise ValueError(f"Empty {JINJA2_OVERRIDE!r} override pair not allowed.")
+
+ if ':' not in pair:
+ raise ValueError(f"Missing key-value separator `:` in {JINJA2_OVERRIDE!r} override pair {pair!r}.")
+
+ key, val = pair.split(':', 1)
+ key = key.strip()
+
+ if key not in _TEMPLATE_OVERRIDE_FIELD_NAMES:
+ raise ValueError(f"Invalid {JINJA2_OVERRIDE!r} override key {key!r}.")
+
+ override_kwargs[key] = ast.literal_eval(val)
+
+ overrides = dataclasses.replace(self, **override_kwargs)
+ else:
+ overrides = self
+
+ return template, overrides
+
+ def merge(self, kwargs: dict[str, t.Any] | None, /) -> TemplateOverrides:
+ """Return a new instance based on the current instance with the given kwargs overridden."""
+ if kwargs:
+ return self.from_kwargs(dataclasses.asdict(self) | kwargs)
+
+ return self
+
+ @classmethod
+ def from_kwargs(cls, kwargs: dict[str, t.Any] | None, /) -> TemplateOverrides:
+ """TemplateOverrides instance factory; instances resolving to all default values will instead return the DEFAULT singleton for optimization."""
+ if kwargs:
+ value = cls(**kwargs)
+
+ if value.overlay_kwargs():
+ return value
+
+ return cls.DEFAULT
+
+
+_dataclass_validation.inject_post_init_validation(TemplateOverrides, allow_subclasses=True)
+
+TemplateOverrides.DEFAULT = TemplateOverrides()
+
+_TEMPLATE_OVERRIDE_FIELD_NAMES: t.Final[tuple[str, ...]] = tuple(sorted(field.name for field in dataclasses.fields(TemplateOverrides)))
+
+
+class AnsibleContext(Context):
+ """
+ A custom context which intercepts resolve_or_missing() calls and
+ runs them through AnsibleAccessContext. This allows usage of variables
+ to be tracked. If needed, values can also be modified before being returned.
+ """
+
+ environment: AnsibleEnvironment # narrow the type specified by the base
+
+ def __init__(self, *args, **kwargs):
+ super(AnsibleContext, self).__init__(*args, **kwargs)
+
+ __repr__ = object.__repr__ # prevent Jinja from dumping vars in case this gets repr'd
+
+ def get_all(self):
+ """
+ Override Jinja's default get_all to return all vars in the context as a ChainMap with a mutable layer at the bottom.
+ This provides some isolation against accidental changes to inherited variable contexts without requiring copies.
+ """
+ layers = []
+
+ if self.vars:
+ layers.append(self.vars)
+ if self.parent:
+ layers.append(self.parent)
+
+ # HACK: always include a sacrificial plain-dict on the bottom layer, since Jinja's debug and stacktrace rewrite code invokes
+ # `__setitem__` outside a call context; this will ensure that it always occurs on a plain dict instead of a lazy one.
+ return ChainMap({}, *layers)
+
+ # noinspection PyShadowingBuiltins
+ def derived(self, locals: t.Optional[t.Dict[str, t.Any]] = None) -> Context:
+ # this is a clone of Jinja's impl of derived, but using our lazy-aware _new_context
+
+ context = _new_context(
+ environment=self.environment,
+ template_name=self.name,
+ blocks={},
+ shared=True,
+ jinja_locals=locals,
+ jinja_vars=self.get_all(),
+ )
+ context.eval_ctx = self.eval_ctx
+ context.blocks.update((k, list(v)) for k, v in self.blocks.items())
+ return context
+
+ def keys(self, *args, **kwargs):
+ """Base Context delegates to `dict.keys` against `get_all`, which would fail since we return a ChainMap. No known usage."""
+ raise NotImplementedError()
+
+ def values(self, *args, **kwargs):
+ """Base Context delegates to `dict.values` against `get_all`, which would fail since we return a ChainMap. No known usage."""
+ raise NotImplementedError()
+
+ def items(self, *args, **kwargs):
+ """Base Context delegates to built-in `dict.items` against `get_all`, which would fail since we return a ChainMap. No known usage."""
+ raise NotImplementedError()
+
+
+@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
+class ArgSmuggler:
+ """
+ Utility wrapper to wrap/unwrap args passed to Jinja `Template.render` and `TemplateExpression.__call__`.
+ e.g., see https://github.com/pallets/jinja/blob/3.1.3/src/jinja2/environment.py#L1296 and
+ https://github.com/pallets/jinja/blob/3.1.3/src/jinja2/environment.py#L1566.
+ """
+
+ jinja_vars: c.Mapping[str, t.Any] | None
+
+ @classmethod
+ def package_jinja_vars(cls, jinja_vars: c.Mapping[str, t.Any]) -> dict[str, ArgSmuggler]:
+ """Wrap the supplied vars dict in an ArgSmuggler to prevent premature templating from Jinja's internal dict copy."""
+ return dict(_smuggled_vars=ArgSmuggler(jinja_vars=jinja_vars))
+
+ @classmethod
+ def extract_jinja_vars(cls, maybe_smuggled_vars: c.Mapping[str, t.Any] | None) -> c.Mapping[str, t.Any]:
+ """
+ If the supplied vars dict contains an ArgSmuggler instance with the expected key, unwrap it and return the smuggled value.
+ Otherwise, return the supplied dict as-is.
+ """
+ if maybe_smuggled_vars and ((smuggler := maybe_smuggled_vars.get('_smuggled_vars')) and isinstance(smuggler, ArgSmuggler)):
+ return smuggler.jinja_vars
+
+ return maybe_smuggled_vars
+
+
+class AnsibleTemplateExpression:
+ """
+ Wrapper around Jinja's TemplateExpression for converting MarkerError back into Marker.
+ This is needed to make expression error handling consistent with templates, since Jinja does not support a custom type for Environment.compile_expression.
+ """
+
+ def __init__(self, template_expression: TemplateExpression) -> None:
+ self._template_expression = template_expression
+
+ def __call__(self, jinja_vars: c.Mapping[str, t.Any]) -> t.Any:
+ try:
+ return self._template_expression(ArgSmuggler.package_jinja_vars(jinja_vars))
+ except MarkerError as ex:
+ return ex.source
+
+
+class AnsibleTemplate(Template):
+ """
+ A helper class, which prevents Jinja2 from running lazy containers through dict().
+ """
+
+ _python_source_temp_path: pathlib.Path | None = None
+
+ def __del__(self):
+ # DTFIX-FUTURE: this still isn't working reliably; something else must be keeping the template object alive
+ if self._python_source_temp_path:
+ self._python_source_temp_path.unlink(missing_ok=True)
+
+ def __call__(self, jinja_vars: c.Mapping[str, t.Any]) -> t.Any:
+ return self.render(ArgSmuggler.package_jinja_vars(jinja_vars))
+
+ # noinspection PyShadowingBuiltins
+ def new_context(
+ self,
+ vars: c.Mapping[str, t.Any] | None = None,
+ shared: bool = False,
+ locals: c.Mapping[str, t.Any] | None = None,
+ ) -> Context:
+ return _new_context(
+ environment=self.environment,
+ template_name=self.name,
+ blocks=self.blocks,
+ shared=shared,
+ jinja_locals=locals,
+ jinja_vars=ArgSmuggler.extract_jinja_vars(vars),
+ jinja_globals=self.globals,
+ )
+
+
+class AnsibleCodeGenerator(NativeCodeGenerator):
+ """
+ Custom code generation behavior to support deprecated Ansible features and fill in gaps in Jinja native.
+ This can be removed once the deprecated Ansible features are removed and the native fixes are upstreamed in Jinja.
+ """
+
+ def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
+ """
+ Prevent Jinja's code generation from stringifying single nodes before generating its repr.
+ This complements the behavioral change in AnsibleEnvironment.concat which returns single nodes without stringifying them.
+ """
+ # DTFIX-FUTURE: contribute this upstream as a fix to Jinja's native support
+ group_list = list(group)
+
+ if len(group_list) == 1:
+ return repr(group_list[0])
+
+ # NB: This is slightly more efficient than Jinja's _output_const_repr, which generates a throw-away list instance to pass to join.
+ # Before removing this, ensure that upstream Jinja has this change.
+ return repr("".join(map(str, group_list)))
+
+ def visit_Const(self, node: Const, frame: Frame) -> None:
+ """
+ Override Jinja's visit_Const to inject a runtime call to AnsibleEnvironment._access_const for constant strings that are possibly templates, which
+ may require special handling at runtime. See that method for details. An example that hits this path:
+ {{ lookup("file", "{{ output_dir }}/bla") }}
+ """
+ value = node.as_const(frame.eval_ctx)
+
+ if _TemplateConfig.allow_embedded_templates and type(value) is str and is_possibly_template(value): # pylint: disable=unidiomatic-typecheck
+ # deprecated: description='embedded Jinja constant string template support' core_version='2.23'
+ self.write(f'environment._access_const({value!r})')
+ else:
+ # NB: This is actually more efficient than Jinja's visit_Const, which contains obsolete (as of Py2.7/3.1) float conversion instance checks. Before
+ # removing this override entirely, ensure that upstream Jinja has removed the obsolete code.
+ # See https://docs.python.org/release/2.7/whatsnew/2.7.html#python-3-1-features for more details.
+ self.write(repr(value))
+
+
+@pass_context
+def _ansible_finalize(_ctx: AnsibleContext, value: t.Any) -> t.Any:
+ """
+ This function is called by Jinja with the result of each variable template block (e.g., {{ }}) encountered in a template.
+ The pass_context decorator prevents finalize from being called on constants at template compile time.
+ The passed in AnsibleContext is unused -- it is the result of using the pass_context decorator.
+ The important part for us is that this blocks constant folding, which ensures our custom visit_Const is used.
+ It also ensures that template results are wrapped in lazy containers.
+ """
+ return lazify_container(value)
+
+
+@dataclasses.dataclass(kw_only=True, slots=True)
+class _TemplateCompileContext(_ambient_context.AmbientContextBase):
+ """
+ This context is active during Ansible's explicit compilation of templates/expressions, but not during Jinja's runtime compilation.
+ Historically, Ansible-specific pre-processing like `escape_backslashes` was not applied to imported/included templates.
+ """
+
+ escape_backslashes: bool
+
+
+class _CompileStateSmugglingCtx(_ambient_context.AmbientContextBase):
+ template_source: str | None = None
+ python_source: str | None = None
+ python_source_temp_path: pathlib.Path | None = None
+
+
+class AnsibleLexer(Lexer):
+ """
+ Lexer override to escape backslashes in string constants within Jinja expressions; prevents Jinja from double-escaping them.
+
+ NOTE: This behavior is only applied to string constants within Jinja expressions (eg {{ "c:\newfile" }}), *not* statements ("{% set foo="c:\\newfile" %}").
+
+ This is useful when templates are sourced from YAML double-quoted strings, as it avoids having backslashes processed twice: first by the
+ YAML parser, and then again by the Jinja parser. Instead, backslashes are only processed by YAML.
+
+ Example YAML:
+
+ - debug:
+ msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
+
+ Since the outermost YAML string is double-quoted, the YAML parser converts the double backslashes to single backslashes. Without escaping, Jinja
+ would see only a single backslash ('\1') while processing the embedded template expression, interpret it as an escape sequence, and convert it
+ to '\x01' (ASCII "SOH"). This is clearly not the intended `\1` backreference argument to the `regex_replace` filter (which would require the
+ double-escaped string '\\\\1' to yield the intended result).
+
+ Since the "\\3" in the input YAML was not part of a template expression, the YAML-parsed "\3" remains after Jinja rendering. This would be
+ confusing for playbook authors, as different escaping rules would be needed inside and outside the template expression.
+
+ When templates are not sourced from YAML, escaping backslashes will prevent use of backslash escape sequences such as "\n" and "\t".
+
+ See relevant Jinja lexer impl at e.g.: https://github.com/pallets/jinja/blob/3.1.2/src/jinja2/lexer.py#L646-L653.
+ """
+
+ def tokeniter(self, *args, **kwargs) -> t.Iterator[t.Tuple[int, str, str]]:
+ """Pre-escape backslashes in expression ({{ }}) raw string constants before Jinja's Lexer.wrap() can interpret them as ASCII escape sequences."""
+ token_stream = super().tokeniter(*args, **kwargs)
+
+ # if we have no context, Jinja's doing a nested compile at runtime (eg, import/include); historically, no backslash escaping is performed
+ if not (tcc := _TemplateCompileContext.current(optional=True)) or not tcc.escape_backslashes:
+ yield from token_stream
+ return
+
+ in_variable = False
+
+ for token in token_stream:
+ token_type = token[1]
+
+ if token_type == TOKEN_VARIABLE_BEGIN:
+ in_variable = True
+ elif token_type == TOKEN_VARIABLE_END:
+ in_variable = False
+ elif in_variable and token_type == TOKEN_STRING:
+ token = token[0], token_type, token[2].replace('\\', '\\\\')
+
+ yield token
+
+
+def defer_template_error(ex: Exception, variable: t.Any, *, is_expression: bool) -> Marker:
+ if not ex.__traceback__:
+ raise AssertionError('ex must be a previously raised exception')
+
+ if isinstance(ex, MarkerError):
+ return ex.source
+
+ exception_to_raise = create_template_error(ex, variable, is_expression)
+
+ if exception_to_raise is ex:
+ return CapturedExceptionMarker(ex) # capture the previously raised exception
+
+ try:
+ raise exception_to_raise from ex # raise the newly synthesized exception before capturing it
+ except Exception as captured_ex:
+ return CapturedExceptionMarker(captured_ex)
+
+
+def create_template_error(ex: Exception, variable: t.Any, is_expression: bool) -> AnsibleTemplateError:
+ if isinstance(ex, AnsibleTemplateError):
+ exception_to_raise = ex
+ else:
+ kind = "expression" if is_expression else "template"
+ ex_type = AnsibleTemplateError # always raise an AnsibleTemplateError/subclass
+
+ if isinstance(ex, RecursionError):
+ msg = f"Recursive loop detected in {kind}."
+ elif isinstance(ex, TemplateSyntaxError):
+ msg = f"Syntax error in {kind}."
+
+ if is_expression and is_possibly_template(variable):
+ msg += " Template delimiters are not supported in expressions."
+
+ ex_type = AnsibleTemplateSyntaxError
+ else:
+ msg = f"Error rendering {kind}."
+
+ exception_to_raise = ex_type(msg, obj=variable)
+
+ if exception_to_raise.obj is None:
+ exception_to_raise.obj = TemplateContext.current().template_value
+
+ # DTFIX-FUTURE: Look through the TemplateContext hierarchy to find the most recent non-template
+ # caller and use that for origin when no origin is available on obj. This could be useful for situations where the template
+ # was embedded in a plugin, or a plugin is otherwise responsible for losing the origin and/or trust. We can't just use the first
+ # non-template caller as that will lead to false positives for re-entrant calls (e.g. template plugins that call into templar).
+
+ return exception_to_raise
+
+
+_BUILTIN_FILTER_ALIASES: dict[str, str] = {}
+_BUILTIN_TEST_ALIASES: dict[str, str] = {
+ '!=': 'ne',
+ '<': 'lt',
+ '<=': 'le',
+ '==': 'eq',
+ '>': 'gt',
+ '>=': 'ge',
+}
+
+_BUILTIN_FILTERS = filter_loader._wrap_funcs(defaults.DEFAULT_FILTERS, _BUILTIN_FILTER_ALIASES)
+_BUILTIN_TESTS = test_loader._wrap_funcs(t.cast(dict[str, t.Callable], defaults.DEFAULT_TESTS), _BUILTIN_TEST_ALIASES)
+
+
+class AnsibleEnvironment(SandboxedEnvironment):
+ """
+ Our custom environment, which simply allows us to override the class-level
+ values for the Template and Context classes used by jinja2 internally.
+ """
+
+ context_class = AnsibleContext
+ template_class = AnsibleTemplate
+ code_generator_class = AnsibleCodeGenerator
+ intercepted_binops = frozenset(('eq',))
+
+ _allowed_unsafe_attributes: dict[str, type | tuple[type, ...]] = dict(
+ # Allow bitwise operations on int until bitwise filters are available.
+ # see: https://github.com/ansible/ansible/issues/85204
+ __and__=int,
+ __lshift__=int,
+ __or__=int,
+ __rshift__=int,
+ __xor__=int,
+ )
+ """
+ Attributes which are considered unsafe by `is_safe_attribute`, which should be allowed when used on specific types.
+ The attributes allowed here are intended only for backward compatibility with existing use cases.
+ They should be exposed as filters in a future release and eventually deprecated.
+ """
+
+ _lexer_cache = LRUCache(50)
+
+ # DTFIX-FUTURE: bikeshed a name/mechanism to control template debugging
+ _debuggable_template_source = False
+ _debuggable_template_source_path: pathlib.Path = pathlib.Path(__file__).parent.parent.parent.parent / '.template_debug_source'
+
+ def __init__(self, *args, ansible_basedir: str | None = None, **kwargs) -> None:
+ if ansible_basedir:
+ kwargs.update(loader=FileSystemLoader(ansible_basedir))
+
+ super().__init__(*args, extensions=_TemplateConfig.jinja_extensions, **kwargs)
+
+ self.filters = JinjaPluginIntercept(_BUILTIN_FILTERS, filter_loader) # type: ignore[assignment]
+ self.tests = JinjaPluginIntercept(_BUILTIN_TESTS, test_loader) # type: ignore[assignment,arg-type]
+
+ # future Jinja releases may default-enable autoescape; force-disable to prevent the problems it could cause
+ # see https://github.com/pallets/jinja/blob/3.1.2/docs/api.rst?plain=1#L69
+ self.autoescape = False
+
+ self.trim_blocks = True
+
+ self.undefined = UndefinedMarker
+ self.finalize = _ansible_finalize
+
+ self.globals.update(
+ range=range, # the sandboxed environment limits range in ways that may cause us problems; use the real Python one
+ now=_now,
+ undef=_undef,
+ omit=Omit,
+ lookup=_lookup,
+ query=_query,
+ q=_query,
+ )
+
+ # Disabling the optimizer prevents compile-time constant expression folding, which prevents our
+ # visit_Const recursive inline template expansion tricks from working in many cases where Jinja's
+ # ignorance of our embedded templates are optimized away as fully-constant expressions,
+ # eg {{ "{{'hi'}}" == "hi" }}. As of Jinja ~3.1, this specifically avoids cases where the @optimizeconst
+ # visitor decorator performs constant folding, which bypasses our visit_Const impl and causes embedded
+ # templates to be lost.
+ # See also optimizeconst impl: https://github.com/pallets/jinja/blob/3.1.0/src/jinja2/compiler.py#L48-L49
+ self.optimized = False
+
+ def get_template(
+ self,
+ name: str | Template,
+ parent: str | None = None,
+ globals: c.MutableMapping[str, t.Any] | None = None,
+ ) -> Template:
+ """Ensures that templates built via `get_template` are also source debuggable."""
+ with _CompileStateSmugglingCtx.when(self._debuggable_template_source) as ctx:
+ template_obj = t.cast(AnsibleTemplate, super().get_template(name, parent, globals))
+
+ if isinstance(ctx, _CompileStateSmugglingCtx): # only present if debugging is enabled
+ template_obj._python_source_temp_path = ctx.python_source_temp_path # facilitate deletion of the temp file when template_obj is deleted
+
+ return template_obj
+
+ def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
+ # deprecated: description="remove relaxed template sandbox mode support" core_version="2.23"
+ if _TemplateConfig.sandbox_mode == _SandboxMode.ALLOW_UNSAFE_ATTRIBUTES:
+ return True
+
+ if (type_or_tuple := self._allowed_unsafe_attributes.get(attr)) and isinstance(obj, type_or_tuple):
+ return True
+
+ return super().is_safe_attribute(obj, attr, value)
+
+ @property
+ def lexer(self) -> AnsibleLexer:
+ """Return/cache an AnsibleLexer with settings from the current AnsibleEnvironment"""
+ # DTFIX-FUTURE: optimization - we should pre-generate the default cached lexer before forking, not leave it to chance (e.g. simple playbooks)
+ key = tuple(getattr(self, name) for name in _TEMPLATE_OVERRIDE_FIELD_NAMES)
+
+ lex = self._lexer_cache.get(key)
+
+ if lex is None:
+ self._lexer_cache[key] = lex = AnsibleLexer(self)
+
+ return lex
+
+ def call_filter(
+ self,
+ name: str,
+ value: t.Any,
+ args: c.Sequence[t.Any] | None = None,
+ kwargs: c.Mapping[str, t.Any] | None = None,
+ context: Context | None = None,
+ eval_ctx: EvalContext | None = None,
+ ) -> t.Any:
+ """
+ Ensure that filters directly invoked by plugins will see non-templating lazy containers.
+ Without this, `_wrap_filter` will wrap `args` and `kwargs` in templating lazy containers.
+ This provides consistency with plugin output handling by preventing auto-templating of trusted templates passed in native containers.
+ """
+ # DTFIX-FUTURE: need better logic to handle non-list/non-dict inputs for args/kwargs
+ args = _AnsibleLazyTemplateMixin._try_create(list(args or []), LazyOptions.SKIP_TEMPLATES)
+ kwargs = _AnsibleLazyTemplateMixin._try_create(kwargs, LazyOptions.SKIP_TEMPLATES)
+
+ return super().call_filter(name, value, args, kwargs, context, eval_ctx)
+
+ def call_test(
+ self,
+ name: str,
+ value: t.Any,
+ args: c.Sequence[t.Any] | None = None,
+ kwargs: c.Mapping[str, t.Any] | None = None,
+ context: Context | None = None,
+ eval_ctx: EvalContext | None = None,
+ ) -> t.Any:
+ """
+ Ensure that tests directly invoked by plugins will see non-templating lazy containers.
+ Without this, `_wrap_test` will wrap `args` and `kwargs` in templating lazy containers.
+ This provides consistency with plugin output handling by preventing auto-templating of trusted templates passed in native containers.
+ """
+ # DTFIX-FUTURE: need better logic to handle non-list/non-dict inputs for args/kwargs
+ args = _AnsibleLazyTemplateMixin._try_create(list(args or []), LazyOptions.SKIP_TEMPLATES)
+ kwargs = _AnsibleLazyTemplateMixin._try_create(kwargs, LazyOptions.SKIP_TEMPLATES)
+
+ return super().call_test(name, value, args, kwargs, context, eval_ctx)
+
+ def compile_expression(self, source: str, *args, **kwargs) -> TemplateExpression:
+ # compile_expression parses and passes the tree to from_string; for debug support, activate the context here to capture the intermediate results
+ with _CompileStateSmugglingCtx.when(self._debuggable_template_source) as ctx:
+ if isinstance(ctx, _CompileStateSmugglingCtx): # only present if debugging is enabled
+ ctx.template_source = source
+
+ return super().compile_expression(source, *args, **kwargs)
+
+ def from_string(self, source: str | jinja2.nodes.Template, *args, **kwargs) -> AnsibleTemplate:
+ # if debugging is enabled, use existing context when present (e.g., from compile_expression)
+ current_ctx = _CompileStateSmugglingCtx.current(optional=True) if self._debuggable_template_source else None
+
+ with _CompileStateSmugglingCtx.when(self._debuggable_template_source and not current_ctx) as new_ctx:
+ template_obj = t.cast(AnsibleTemplate, super().from_string(source, *args, **kwargs))
+
+ if isinstance(ctx := current_ctx or new_ctx, _CompileStateSmugglingCtx): # only present if debugging is enabled
+ template_obj._python_source_temp_path = ctx.python_source_temp_path # facilitate deletion of the temp file when template_obj is deleted
+
+ return template_obj
+
+ def _parse(self, source: str, *args, **kwargs) -> jinja2.nodes.Template:
+ if csc := _CompileStateSmugglingCtx.current(optional=True):
+ csc.template_source = source
+
+ return super()._parse(source, *args, **kwargs)
+
+ def _compile(self, source: str, filename: str) -> types.CodeType:
+ if csc := _CompileStateSmugglingCtx.current(optional=True):
+ origin = Origin.get_tag(csc.template_source) or Origin.UNKNOWN
+
+ source = '\n'.join(
+ (
+ "import sys; breakpoint() if type(sys.breakpointhook) is not type(breakpoint) else None",
+ f"# original template source from {str(origin)!r}: ",
+ '\n'.join(f'# {line}' for line in (csc.template_source or '').splitlines()),
+ source,
+ )
+ )
+
+ source_temp_dir = self._debuggable_template_source_path
+ source_temp_dir.mkdir(parents=True, exist_ok=True)
+
+ with tempfile.NamedTemporaryFile(dir=source_temp_dir, mode='w', suffix='.py', prefix='j2_src_', delete=False) as source_file:
+ filename = source_file.name
+
+ source_file.write(source)
+ source_file.flush()
+
+ csc.python_source = source
+ csc.python_source_temp_path = pathlib.Path(filename)
+
+ res = super()._compile(source, filename)
+
+ return res
+
+ @staticmethod
+ def concat(nodes: t.Iterable[t.Any]) -> t.Any: # type: ignore[override]
+ node_list = list(_flatten_nodes(nodes))
+
+ if not node_list:
+ return None
+
+ # this code is complemented by our tweaked CodeGenerator _output_const_repr that ensures that literal constants
+ # in templates aren't double-repr'd in the generated code
+ if len(node_list) == 1:
+ return node_list[0]
+
+ # In order to ensure that all markers are tripped, do a recursive finalize before we repr (otherwise we can end up
+ # repr'ing a Marker). This requires two passes, but avoids the need for a parallel reimplementation of all repr methods.
+ try:
+ node_list = _finalize_template_result(node_list, FinalizeMode.CONCAT)
+ except MarkerError as ex:
+ return ex.source # return the first Marker encountered
+
+ return ''.join([to_text(v) for v in node_list])
+
+ @staticmethod
+ def _access_const(const_template: t.LiteralString) -> t.Any:
+ """
+ Called during template rendering on template-looking string constants embedded in the template.
+ It provides the following functionality:
+ * Propagates origin from the containing template.
+ * For backward compatibility when embedded templates are enabled:
+ * Conditionals - Renders embedded template constants and accesses the result. Warns on each constant immediately.
+ * Non-conditionals - Tags constants for deferred rendering of templates in lookup terms. Warns on each constant during lookup invocation.
+ """
+ ctx = TemplateContext.current()
+
+ if (tv := ctx.template_value) and (origin := Origin.get_tag(tv)):
+ const_template = origin.tag(const_template)
+
+ if ctx._render_jinja_const_template:
+ _jinja_const_template_warning(const_template, is_conditional=True)
+
+ result = ctx.templar.template(TrustedAsTemplate().tag(const_template))
+ AnsibleAccessContext.current().access(result)
+ else:
+ # warnings will be issued when lookup terms processing occurs, to avoid false positives
+ result = _JinjaConstTemplate().tag(const_template)
+
+ return result
+
+ def getitem(self, obj: t.Any, argument: t.Any) -> t.Any:
+ value = super().getitem(obj, argument)
+
+ AnsibleAccessContext.current().access(value)
+
+ return value
+
+ def getattr(self, obj: t.Any, attribute: str) -> t.Any:
+ """
+ Get `attribute` from the attributes of `obj`, falling back to items in `obj`.
+ If no item was found, return a sandbox-specific `UndefinedMarker` if `attribute` is protected by the sandbox,
+ otherwise return a normal `UndefinedMarker` instance.
+ This differs from the built-in Jinja behavior which will not fall back to items if `attribute` is protected by the sandbox.
+ """
+ # example template that uses this: "{{ some.thing }}" -- obj is the "some" dict, attribute is "thing"
+
+ is_safe = True
+
+ try:
+ value = getattr(obj, attribute)
+ except AttributeError:
+ value = _sentinel
+ else:
+ if not (is_safe := self.is_safe_attribute(obj, attribute, value)):
+ value = _sentinel
+
+ if value is _sentinel:
+ try:
+ value = obj[attribute]
+ except (TypeError, LookupError):
+ return self.undefined(obj=obj, name=attribute) if is_safe else self.unsafe_undefined(obj, attribute)
+
+ AnsibleAccessContext.current().access(value)
+
+ return value
+
+ def call(
+ self,
+ __context: Context,
+ __obj: t.Any,
+ *args: t.Any,
+ **kwargs: t.Any,
+ ) -> t.Any:
+ try:
+ if _DirectCall.is_marked(__obj):
+ # Both `_lookup` and `_query` handle arg proxying and `Marker` args internally.
+ # Performing either before calling them will interfere with that processing.
+ return super().call(__context, __obj, *args, **kwargs)
+
+ # Jinja's generated macro code handles Markers, so preemptive raise on Marker args and lazy retrieval should be disabled for the macro invocation.
+ is_macro = isinstance(__obj, Macro)
+
+ if not is_macro and (first_marker := get_first_marker_arg(args, kwargs)) is not None:
+ return first_marker
+
+ with JinjaCallContext(accept_lazy_markers=is_macro):
+ call_res = super().call(__context, __obj, *lazify_container_args(args), **lazify_container_kwargs(kwargs))
+
+ if __obj is range:
+ # Preserve the ability to do `range(1000000000) | random` by not converting range objects to lists.
+ # Historically, range objects were only converted on Jinja finalize and filter outputs, so they've always been floating around in templating
+ # code and visible to user plugins.
+ return call_res
+
+ return _wrap_plugin_output(call_res)
+
+ except MarkerError as ex:
+ return ex.source
+ except Exception as ex:
+ return CapturedExceptionMarker(ex)
+
+
+AnsibleTemplate.environment_class = AnsibleEnvironment
+
+_DEFAULT_UNDEF = UndefinedMarker("Mandatory variable has not been overridden", _no_template_source=True)
+
+_sentinel: t.Final[object] = object()
+
+
+@_DirectCall.mark
+def _undef(hint: str | None = None) -> UndefinedMarker:
+ """Jinja2 global function (undef) for creating getting a `UndefinedMarker` instance, optionally with a custom hint."""
+ validate_arg_type('hint', hint, (str, type(None)))
+
+ if not hint:
+ return _DEFAULT_UNDEF
+
+ return UndefinedMarker(hint)
+
+
+def _flatten_nodes(nodes: t.Iterable[t.Any]) -> t.Iterable[t.Any]:
+ """
+ Yield nodes from a potentially recursive iterable of nodes.
+ The recursion is required to expand template imports (TemplateModule).
+ Any exception raised while consuming a template node will be yielded as a Marker for that node.
+ """
+ iterator = iter(nodes)
+
+ while True:
+ try:
+ node = next(iterator)
+ except StopIteration:
+ break
+ except Exception as ex:
+ yield defer_template_error(ex, TemplateContext.current().template_value, is_expression=False)
+ # DTFIX-FUTURE: We should be able to determine if truncation occurred by having the code generator smuggle out the number of expected nodes.
+ yield TruncationMarker()
+ else:
+ if type(node) is TemplateModule: # pylint: disable=unidiomatic-typecheck
+ yield from _flatten_nodes(node._body_stream)
+ else:
+ yield node
+
+
+def _flatten_and_lazify_vars(mapping: c.Mapping) -> t.Iterable[c.Mapping]:
+ """Prevent deeply-nested Jinja vars ChainMaps from being created by nested contexts and ensure that all top-level containers support lazy templating."""
+ mapping_type = type(mapping)
+ if mapping_type is ChainMap:
+ # noinspection PyUnresolvedReferences
+ for m in mapping.maps:
+ yield from _flatten_and_lazify_vars(m)
+ elif mapping_type is _AnsibleLazyTemplateDict:
+ yield mapping
+ elif mapping_type in (dict, _AnsibleTaggedDict):
+ # don't propagate empty dictionary layers
+ if mapping:
+ yield _AnsibleLazyTemplateMixin._try_create(mapping)
+ else:
+ raise NotImplementedError(f"unsupported mapping type in Jinja vars: {mapping_type}")
+
+
+def _new_context(
+ *,
+ environment: Environment,
+ template_name: str | None,
+ blocks: dict[str, t.Callable[[Context], c.Iterator[str]]],
+ shared: bool = False,
+ jinja_locals: c.Mapping[str, t.Any] | None = None,
+ jinja_vars: c.Mapping[str, t.Any] | None = None,
+ jinja_globals: c.MutableMapping[str, t.Any] | None = None,
+) -> Context:
+ """Override Jinja's context vars setup to use ChainMaps and containers that support lazy templating."""
+ layers = []
+
+ if jinja_locals:
+ # Omit values set to Jinja's internal `missing` sentinel; they are locals that have not yet been
+ # initialized in the current context, and should not be exposed to child contexts. e.g.: {% import 'a' as b with context %}.
+ # The `b` local will be `missing` in the `a` context and should not be propagated as a local to the child context we're creating.
+ layers.append(_AnsibleLazyTemplateMixin._try_create({k: v for k, v in jinja_locals.items() if v is not missing}))
+
+ if jinja_vars:
+ layers.extend(_flatten_and_lazify_vars(jinja_vars))
+
+ if jinja_globals and not shared:
+ # Even though we don't currently support templating globals, it's easier to ensure that everything is template-able rather than trying to
+ # pick apart the ChainMaps to enforce non-template-able globals, or to risk things that *should* be template-able not being lazified.
+ layers.extend(_flatten_and_lazify_vars(jinja_globals))
+
+ if not layers:
+ # ensure we have at least one layer (which should be lazy), since _flatten_and_lazify_vars eliminates most empty layers
+ layers.append(_AnsibleLazyTemplateMixin._try_create({}))
+
+ # only return a ChainMap if we're combining layers, or we have none
+ parent = layers[0] if len(layers) == 1 else ChainMap(*layers)
+
+ # the `parent` cast is only to satisfy Jinja's overly-strict type hint
+ return environment.context_class(environment, t.cast(dict, parent), template_name, blocks, globals=jinja_globals)
+
+
+def is_possibly_template(value: str, overrides: TemplateOverrides = TemplateOverrides.DEFAULT):
+ """
+ A lightweight check to determine if the given string looks like it contains a template, even if that template is invalid.
+ Returns `True` if the given string starts with a Jinja overrides header or if it contains template start strings.
+ """
+ return value.startswith(JINJA2_OVERRIDE) or overrides._contains_start_string(value)
+
+
+def is_possibly_all_template(value: str, overrides: TemplateOverrides = TemplateOverrides.DEFAULT):
+ """
+ A lightweight check to determine if the given string looks like it contains *only* a template, even if that template is invalid.
+ Returns `True` if the given string starts with a Jinja overrides header or if it starts and ends with Jinja template delimiters.
+ """
+ return value.startswith(JINJA2_OVERRIDE) or overrides._starts_and_ends_with_jinja_delimiters(value)
+
+
+class FinalizeMode(enum.Enum):
+ TOP_LEVEL = enum.auto()
+ CONCAT = enum.auto()
+
+
+_FINALIZE_FAST_PATH_EXACT_MAPPING_TYPES = frozenset(
+ (
+ dict,
+ _AnsibleTaggedDict,
+ _AnsibleLazyTemplateDict,
+ HostVars,
+ HostVarsVars,
+ )
+)
+"""Fast-path exact mapping types for finalization. These types bypass diagnostic warnings for type conversion."""
+
+_FINALIZE_FAST_PATH_EXACT_ITERABLE_TYPES = frozenset(
+ (
+ list,
+ _AnsibleTaggedList,
+ _AnsibleLazyTemplateList,
+ tuple,
+ _AnsibleTaggedTuple,
+ _AnsibleLazyAccessTuple,
+ )
+)
+"""Fast-path exact iterable types for finalization. These types bypass diagnostic warnings for type conversion."""
+
+_FINALIZE_DISALLOWED_EXACT_TYPES = frozenset((range,))
+"""Exact types that cannot be finalized."""
+
+# Jinja passes these into filters/tests via @pass_environment
+register_known_types(
+ AnsibleContext,
+ AnsibleEnvironment,
+ EvalContext,
+)
+
+
+def _finalize_dict(o: t.Any, mode: FinalizeMode) -> t.Iterator[tuple[t.Any, t.Any]]:
+ for k, v in o.items():
+ if v is not Omit:
+ yield _finalize_template_result(k, mode), _finalize_template_result(v, mode)
+
+
+def _finalize_list(o: t.Any, mode: FinalizeMode) -> t.Iterator[t.Any]:
+ for v in o:
+ if v is not Omit:
+ yield _finalize_template_result(v, mode)
+
+
+def _maybe_finalize_scalar(o: t.Any) -> t.Any:
+ # DTFIX5: this should check all supported scalar subclasses, not just JSON ones (also, does the JSON serializer handle these cases?)
+ for target_type in _json_subclassable_scalar_types:
+ if not isinstance(o, target_type):
+ continue
+
+ match _TemplateConfig.unknown_type_conversion_handler.action:
+ # we don't want to show the object value, and it can't be Origin-tagged; send the current template value for best effort
+ case ErrorAction.WARNING:
+ display.warning(
+ msg=f'Type {native_type_name(o)!r} is unsupported in variable storage, converting to {native_type_name(target_type)!r}.',
+ obj=TemplateContext.current(optional=True).template_value,
+ )
+ case ErrorAction.ERROR:
+ raise AnsibleVariableTypeError.from_value(obj=TemplateContext.current(optional=True).template_value)
+
+ return target_type(o)
+
+ return None
+
+
+def _finalize_fallback_collection(
+ o: t.Any,
+ mode: FinalizeMode,
+ finalizer: t.Callable[[t.Any, FinalizeMode], t.Iterator],
+ target_type: type[list | dict],
+) -> t.Collection[t.Any]:
+ match _TemplateConfig.unknown_type_conversion_handler.action:
+ # we don't want to show the object value, and it can't be Origin-tagged; send the current template value for best effort
+ case ErrorAction.WARNING:
+ display.warning(
+ msg=f'Type {native_type_name(o)!r} is unsupported in variable storage, converting to {native_type_name(target_type)!r}.',
+ obj=TemplateContext.current(optional=True).template_value,
+ )
+ case ErrorAction.ERROR:
+ raise AnsibleVariableTypeError.from_value(obj=TemplateContext.current(optional=True).template_value)
+
+ return _finalize_collection(o, mode, finalizer, target_type)
+
+
+def _finalize_collection(
+ o: t.Any,
+ mode: FinalizeMode,
+ finalizer: t.Callable[[t.Any, FinalizeMode], t.Iterator],
+ target_type: type[list | dict],
+) -> t.Collection[t.Any]:
+ return AnsibleTagHelper.tag(finalizer(o, mode), AnsibleTagHelper.tags(o), value_type=target_type)
+
+
+def _finalize_template_result(o: t.Any, mode: FinalizeMode) -> t.Any:
+ """Recurse the template result, rendering any encountered templates, converting containers to non-lazy versions."""
+ # DTFIX5: add tests to ensure this method doesn't drift from allowed types
+ o_type = type(o)
+
+ # DTFIX-FUTURE: provide an optional way to check for trusted templates leaking out of templating (injected, but not passed through templar.template)
+
+ if o_type is _AnsibleTaggedStr:
+ return _JinjaConstTemplate.untag(o) # prevent _JinjaConstTemplate from leaking into finalized results
+
+ if o_type in PASS_THROUGH_SCALAR_VAR_TYPES:
+ return o
+
+ if o_type in _FINALIZE_FAST_PATH_EXACT_MAPPING_TYPES: # silently convert known mapping types to dict
+ return _finalize_collection(o, mode, _finalize_dict, dict)
+
+ if o_type in _FINALIZE_FAST_PATH_EXACT_ITERABLE_TYPES: # silently convert known sequence types to list
+ return _finalize_collection(o, mode, _finalize_list, list)
+
+ if o_type in Marker._concrete_subclasses: # this early return assumes handle_marker follows our variable type rules
+ return TemplateContext.current().templar.marker_behavior.handle_marker(o)
+
+ if mode is not FinalizeMode.TOP_LEVEL: # unsupported type (do not raise)
+ return o
+
+ if o_type in _FINALIZE_DISALLOWED_EXACT_TYPES: # early abort for disallowed types that would otherwise be handled below
+ raise AnsibleVariableTypeError.from_value(obj=o)
+
+ if _internal.is_intermediate_mapping(o): # since isinstance checks are slower, this is separate from the exact type check above
+ return _finalize_fallback_collection(o, mode, _finalize_dict, dict)
+
+ if _internal.is_intermediate_iterable(o): # since isinstance checks are slower, this is separate from the exact type check above
+ return _finalize_fallback_collection(o, mode, _finalize_list, list)
+
+ if (result := _maybe_finalize_scalar(o)) is not None:
+ return result
+
+ raise AnsibleVariableTypeError.from_value(obj=o)
diff --git a/lib/ansible/_internal/_templating/_jinja_common.py b/lib/ansible/_internal/_templating/_jinja_common.py
new file mode 100644
index 00000000000..9f7047dcb6a
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_jinja_common.py
@@ -0,0 +1,323 @@
+from __future__ import annotations
+
+import abc
+import collections.abc as c
+import enum
+import inspect
+import itertools
+import typing as t
+
+from jinja2 import UndefinedError, StrictUndefined, TemplateRuntimeError
+from jinja2.utils import missing
+
+from ...module_utils._internal import _messages
+from ansible.constants import config
+from ansible.errors import AnsibleUndefinedVariable, AnsibleTypeError
+from ansible._internal._errors._handler import ErrorHandler
+from ansible.module_utils._internal._datatag import Tripwire, _untaggable_types
+
+from ._access import NotifiableAccessContextBase
+from ._jinja_patches import _patch_jinja
+from ._utils import TemplateContext
+from .._errors import _captured
+from ...module_utils.datatag import native_type_name
+
+_patch_jinja() # apply Jinja2 patches before types are declared that are dependent on the changes
+
+
+class _SandboxMode(enum.Enum):
+ DEFAULT = enum.auto()
+ ALLOW_UNSAFE_ATTRIBUTES = enum.auto()
+
+
+class _TemplateConfig:
+ allow_embedded_templates: bool = config.get_config_value("ALLOW_EMBEDDED_TEMPLATES")
+ allow_broken_conditionals: bool = config.get_config_value('ALLOW_BROKEN_CONDITIONALS')
+ jinja_extensions: list[str] = config.get_config_value('DEFAULT_JINJA2_EXTENSIONS')
+ sandbox_mode: _SandboxMode = _SandboxMode.__members__[config.get_config_value('_TEMPLAR_SANDBOX_MODE').upper()]
+
+ unknown_type_encountered_handler = ErrorHandler.from_config('_TEMPLAR_UNKNOWN_TYPE_ENCOUNTERED')
+ unknown_type_conversion_handler = ErrorHandler.from_config('_TEMPLAR_UNKNOWN_TYPE_CONVERSION')
+ untrusted_template_handler = ErrorHandler.from_config('_TEMPLAR_UNTRUSTED_TEMPLATE_BEHAVIOR')
+
+
+class MarkerError(UndefinedError):
+ """
+ An Ansible specific subclass of Jinja's UndefinedError, used to preserve and later restore the original Marker instance that raised the error.
+ This error is only raised by Marker and should never escape the templating system.
+ """
+
+ def __init__(self, message: str, source: Marker) -> None:
+ super().__init__(message)
+
+ self.source = source
+
+
+class Marker(StrictUndefined, Tripwire):
+ """
+ Extends Jinja's `StrictUndefined`, allowing any kind of error occurring during recursive templating operations to be captured and deferred.
+ Direct or managed access to most `Marker` attributes will raise a `MarkerError`, which usually ends the current innermost templating
+ operation and converts the `MarkerError` back to the origin Marker instance (subject to the `MarkerBehavior` in effect at the time).
+ """
+
+ __slots__ = ('_marker_template_source',)
+
+ _concrete_subclasses: t.ClassVar[set[type[Marker]]] = set()
+
+ def __init__(
+ self,
+ hint: t.Optional[str] = None,
+ obj: t.Any = missing,
+ name: t.Optional[str] = None,
+ exc: t.Type[TemplateRuntimeError] = UndefinedError, # Ansible doesn't set this argument or consume the attribute it is stored under.
+ *args,
+ _no_template_source=False,
+ **kwargs,
+ ) -> None:
+ if not hint and name and obj is not missing:
+ hint = f"object of type {native_type_name(obj)!r} has no attribute {name!r}"
+
+ kwargs.update(
+ hint=hint,
+ obj=obj,
+ name=name,
+ exc=exc,
+ )
+
+ super().__init__(*args, **kwargs)
+
+ if _no_template_source:
+ self._marker_template_source = None
+ else:
+ self._marker_template_source = TemplateContext.current().template_value
+
+ def _as_exception(self) -> Exception:
+ """Return the exception instance to raise in a top-level templating context."""
+ return AnsibleUndefinedVariable(self._undefined_message, obj=self._marker_template_source)
+
+ def _as_message(self) -> str:
+ """Return the error message to show when this marker must be represented as a string, such as for substitutions or warnings."""
+ return self._undefined_message
+
+ def _fail_with_undefined_error(self, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
+ """Ansible-specific replacement for Jinja's _fail_with_undefined_error tripwire on dunder methods."""
+ self.trip()
+
+ def trip(self) -> t.NoReturn:
+ """Raise an internal exception which can be converted back to this instance."""
+ raise MarkerError(self._undefined_message, self)
+
+ def __setattr__(self, name: str, value: t.Any) -> None:
+ """
+ Any attempt to set an unknown attribute on a `Marker` should invoke the trip method to propagate the original context.
+ This does not protect against mutation of known attributes, but the implementation is fairly simple.
+ """
+ try:
+ super().__setattr__(name, value)
+ except AttributeError:
+ pass
+ else:
+ return
+
+ self.trip()
+
+ def __getattr__(self, name: str) -> t.Any:
+ """Raises AttributeError for dunder-looking accesses, self-propagates otherwise."""
+ if name.startswith('__') and name.endswith('__'):
+ raise AttributeError(name)
+
+ return self
+
+ def __getitem__(self, key):
+ """Self-propagates on all item accesses."""
+ return self
+
+ @classmethod
+ def __init_subclass__(cls, **kwargs) -> None:
+ if not inspect.isabstract(cls):
+ _untaggable_types.add(cls)
+ cls._concrete_subclasses.add(cls)
+
+ @classmethod
+ def _init_class(cls):
+ _untaggable_types.add(cls)
+
+ # These are the methods StrictUndefined already intercepts.
+ jinja_method_names = (
+ '__add__',
+ '__bool__',
+ '__call__',
+ '__complex__',
+ '__contains__',
+ '__div__',
+ '__eq__',
+ '__float__',
+ '__floordiv__',
+ '__ge__',
+ # '__getitem__', # using a custom implementation that propagates self instead
+ '__gt__',
+ '__hash__',
+ '__int__',
+ '__iter__',
+ '__le__',
+ '__len__',
+ '__lt__',
+ '__mod__',
+ '__mul__',
+ '__ne__',
+ '__neg__',
+ '__pos__',
+ '__pow__',
+ '__radd__',
+ '__rdiv__',
+ '__rfloordiv__',
+ '__rmod__',
+ '__rmul__',
+ '__rpow__',
+ '__rsub__',
+ '__rtruediv__',
+ '__str__',
+ '__sub__',
+ '__truediv__',
+ )
+
+ # These additional methods should be intercepted, even though they are not intercepted by StrictUndefined.
+ additional_method_names = (
+ '__aiter__',
+ '__delattr__',
+ '__format__',
+ '__repr__',
+ '__setitem__',
+ )
+
+ for name in jinja_method_names + additional_method_names:
+ setattr(cls, name, cls._fail_with_undefined_error)
+
+
+Marker._init_class()
+
+
+class TruncationMarker(Marker):
+ """
+ An `Marker` value was previously encountered and reported.
+ A subsequent `Marker` value (this instance) indicates the template may have been truncated as a result.
+ It will only be visible if the previous `Marker` was ignored/replaced instead of being tripped, which would raise an exception.
+ """
+
+ __slots__ = ()
+
+ def __init__(self) -> None:
+ super().__init__(hint='template potentially truncated')
+
+
+class UndefinedMarker(Marker):
+ """A `Marker` value that represents an undefined value encountered during templating."""
+
+ __slots__ = ()
+
+
+class ExceptionMarker(Marker, metaclass=abc.ABCMeta):
+ """Base `Marker` class that represents exceptions encountered and deferred during templating."""
+
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def _as_exception(self) -> Exception:
+ pass
+
+ def _as_message(self) -> str:
+ return str(self._as_exception())
+
+ def trip(self) -> t.NoReturn:
+ """Raise an internal exception which can be converted back to this instance while maintaining the cause for callers that follow them."""
+ raise MarkerError(self._undefined_message, self) from self._as_exception()
+
+
+class CapturedExceptionMarker(ExceptionMarker):
+ """A `Marker` value that represents an exception raised during templating."""
+
+ __slots__ = ('_marker_captured_exception',)
+
+ def __init__(self, exception: Exception) -> None:
+ super().__init__(hint=f'A captured exception marker was tripped: {exception}')
+
+ self._marker_captured_exception = exception
+
+ def _as_exception(self) -> Exception:
+ return self._marker_captured_exception
+
+
+class UndecryptableVaultError(_captured.AnsibleCapturedError):
+ """Template-external error raised by VaultExceptionMarker when an undecryptable variable is accessed."""
+
+ context = 'vault'
+ _default_message = "Attempt to use undecryptable variable."
+
+
+class VaultExceptionMarker(ExceptionMarker):
+ """A `Marker` value that represents an error accessing a vaulted value during templating."""
+
+ __slots__ = ('_marker_undecryptable_ciphertext', '_marker_event')
+
+ def __init__(self, ciphertext: str, event: _messages.Event) -> None:
+ super().__init__(hint='A vault exception marker was tripped.')
+
+ self._marker_undecryptable_ciphertext = ciphertext
+ self._marker_event = event
+
+ def _as_exception(self) -> Exception:
+ return UndecryptableVaultError(
+ obj=self._marker_undecryptable_ciphertext,
+ event=self._marker_event,
+ )
+
+ def _disarm(self) -> str:
+ return self._marker_undecryptable_ciphertext
+
+
+def get_first_marker_arg(args: c.Sequence, kwargs: dict[str, t.Any]) -> Marker | None:
+ """Utility method to inspect plugin args and return the first `Marker` encountered, otherwise `None`."""
+ # CAUTION: This function is exposed in public API as ansible.template.get_first_marker_arg.
+ return next(iter_marker_args(args, kwargs), None)
+
+
+def iter_marker_args(args: c.Sequence, kwargs: dict[str, t.Any]) -> t.Generator[Marker]:
+ """Utility method to iterate plugin args and yield any `Marker` encountered."""
+ for arg in itertools.chain(args, kwargs.values()):
+ if isinstance(arg, Marker):
+ yield arg
+
+
+class JinjaCallContext(NotifiableAccessContextBase):
+ """
+ An audit context that wraps all Jinja (template/filter/test/lookup/method/function) calls.
+ While active, calls `trip()` on managed access of `Marker` objects unless the callee declares an understanding of markers.
+ """
+
+ _mask = True
+
+ def __init__(self, accept_lazy_markers: bool) -> None:
+ self._type_interest = frozenset() if accept_lazy_markers else frozenset(Marker._concrete_subclasses)
+
+ def _notify(self, o: Marker) -> t.NoReturn:
+ o.trip()
+
+
+def validate_arg_type(name: str, value: t.Any, allowed_type_or_types: type | tuple[type, ...], /) -> None:
+ """Validate the type of the given argument while preserving context for Marker values."""
+ # DTFIX-FUTURE: find a home for this as a general-purpose utliity method and expose it after some API review
+ if isinstance(value, allowed_type_or_types):
+ return
+
+ if isinstance(allowed_type_or_types, type):
+ arg_type_description = repr(native_type_name(allowed_type_or_types))
+ else:
+ arg_type_description = ' or '.join(repr(native_type_name(item)) for item in allowed_type_or_types)
+
+ if isinstance(value, Marker):
+ try:
+ value.trip()
+ except Exception as ex:
+ raise AnsibleTypeError(f"The {name!r} argument must be of type {arg_type_description}.", obj=value) from ex
+
+ raise AnsibleTypeError(f"The {name!r} argument must be of type {arg_type_description}, not {native_type_name(value)!r}.", obj=value)
diff --git a/lib/ansible/_internal/_templating/_jinja_patches.py b/lib/ansible/_internal/_templating/_jinja_patches.py
new file mode 100644
index 00000000000..55966793e47
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_jinja_patches.py
@@ -0,0 +1,44 @@
+"""Runtime patches for Jinja bugs affecting Ansible."""
+
+from __future__ import annotations
+
+import jinja2
+import jinja2.utils
+
+
+def _patch_jinja_undefined_slots() -> None:
+ """
+ Fix the broken __slots__ on Jinja's Undefined and StrictUndefined if they're missing in the current version.
+ This will no longer be necessary once the fix is included in the minimum supported Jinja version.
+ See: https://github.com/pallets/jinja/issues/2025
+ """
+ if not hasattr(jinja2.Undefined, '__slots__'):
+ jinja2.Undefined.__slots__ = (
+ "_undefined_hint",
+ "_undefined_obj",
+ "_undefined_name",
+ "_undefined_exception",
+ )
+
+ if not hasattr(jinja2.StrictUndefined, '__slots__'):
+ jinja2.StrictUndefined.__slots__ = ()
+
+
+def _patch_jinja_missing_type() -> None:
+ """
+ Fix the `jinja2.utils.missing` type to support pickling while remaining a singleton.
+ This will no longer be necessary once the fix is included in the minimum supported Jinja version.
+ See: https://github.com/pallets/jinja/issues/2027
+ """
+ if getattr(jinja2.utils.missing, '__reduce__')() != 'missing':
+
+ def __reduce__(*_args):
+ return 'missing'
+
+ type(jinja2.utils.missing).__reduce__ = __reduce__
+
+
+def _patch_jinja() -> None:
+ """Apply Jinja2 patches."""
+ _patch_jinja_undefined_slots()
+ _patch_jinja_missing_type()
diff --git a/lib/ansible/_internal/_templating/_jinja_plugins.py b/lib/ansible/_internal/_templating/_jinja_plugins.py
new file mode 100644
index 00000000000..482dabfbb01
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_jinja_plugins.py
@@ -0,0 +1,375 @@
+"""Jinja template plugins (filters, tests, lookups) and custom global functions."""
+
+from __future__ import annotations
+
+import collections.abc as c
+import dataclasses
+import datetime
+import functools
+import inspect
+import re
+import typing as t
+
+from jinja2 import defaults
+
+from ansible.module_utils._internal._ambient_context import AmbientContextBase
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
+from ansible._internal._datatag._tags import TrustedAsTemplate
+from ansible.plugins import AnsibleJinja2Plugin
+from ansible.plugins.loader import lookup_loader, Jinja2Loader
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+from ._datatag import _JinjaConstTemplate
+from ._errors import AnsibleTemplatePluginRuntimeError, AnsibleTemplatePluginLoadError, AnsibleTemplatePluginNotFoundError
+from ._jinja_common import MarkerError, _TemplateConfig, get_first_marker_arg, Marker, JinjaCallContext, CapturedExceptionMarker
+from ._lazy_containers import lazify_container_kwargs, lazify_container_args, lazify_container, _AnsibleLazyTemplateMixin
+from ._utils import LazyOptions, TemplateContext
+
+_display = Display()
+
+_TCallable = t.TypeVar("_TCallable", bound=t.Callable)
+_ITERATOR_TYPES: t.Final = (c.Iterator, c.ItemsView, c.KeysView, c.ValuesView, range)
+
+
+class JinjaPluginIntercept(c.MutableMapping):
+ """
+ Simulated dict class that loads Jinja2Plugins at request
+ otherwise all plugins would need to be loaded a priori.
+
+ NOTE: plugin_loader still loads all 'builtin/legacy' at
+ start so only collection plugins are really at request.
+ """
+
+ def __init__(self, jinja_builtins: c.Mapping[str, AnsibleJinja2Plugin], plugin_loader: Jinja2Loader):
+ super(JinjaPluginIntercept, self).__init__()
+
+ self._plugin_loader = plugin_loader
+ self._jinja_builtins = jinja_builtins
+ self._wrapped_funcs: dict[str, t.Callable] = {}
+
+ def _wrap_and_set_func(self, instance: AnsibleJinja2Plugin) -> t.Callable:
+ if self._plugin_loader.type == 'filter':
+ plugin_func = self._wrap_filter(instance)
+ else:
+ plugin_func = self._wrap_test(instance)
+
+ self._wrapped_funcs[instance._load_name] = plugin_func
+
+ return plugin_func
+
+ def __getitem__(self, key: str) -> t.Callable:
+ instance: AnsibleJinja2Plugin | None = None
+ plugin_func: t.Callable[..., t.Any] | None
+
+ if plugin_func := self._wrapped_funcs.get(key):
+ return plugin_func
+
+ try:
+ instance = self._plugin_loader.get(key)
+ except KeyError:
+ # The plugin name was invalid or no plugin was found by that name.
+ pass
+ except Exception as ex:
+ # An unexpected exception occurred.
+ raise AnsibleTemplatePluginLoadError(self._plugin_loader.type, key) from ex
+
+ if not instance:
+ try:
+ instance = self._jinja_builtins[key]
+ except KeyError:
+ raise AnsibleTemplatePluginNotFoundError(self._plugin_loader.type, key) from None
+
+ plugin_func = self._wrap_and_set_func(instance)
+
+ return plugin_func
+
+ def __setitem__(self, key: str, value: t.Callable) -> None:
+ self._wrap_and_set_func(self._plugin_loader._wrap_func(key, key, value))
+
+ def __delitem__(self, key):
+ raise NotImplementedError()
+
+ def __contains__(self, item: t.Any) -> bool:
+ try:
+ self.__getitem__(item)
+ except AnsibleTemplatePluginLoadError:
+ return True
+ except AnsibleTemplatePluginNotFoundError:
+ return False
+
+ return True
+
+ def __iter__(self):
+ raise NotImplementedError() # dynamic container
+
+ def __len__(self):
+ raise NotImplementedError() # dynamic container
+
+ @staticmethod
+ def _invoke_plugin(instance: AnsibleJinja2Plugin, *args, **kwargs) -> t.Any:
+ if not instance.accept_args_markers:
+ if (first_marker := get_first_marker_arg(args, kwargs)) is not None:
+ return first_marker
+
+ try:
+ with JinjaCallContext(accept_lazy_markers=instance.accept_lazy_markers):
+ return instance.j2_function(*lazify_container_args(args), **lazify_container_kwargs(kwargs))
+ except MarkerError as ex:
+ return ex.source
+ except Exception as ex:
+ try:
+ raise AnsibleTemplatePluginRuntimeError(instance.plugin_type, instance.ansible_name) from ex # DTFIX-FUTURE: which name to use? PluginInfo?
+ except AnsibleTemplatePluginRuntimeError as captured:
+ return CapturedExceptionMarker(captured)
+
+ def _wrap_test(self, instance: AnsibleJinja2Plugin) -> t.Callable:
+ """Intercept point for all test plugins to ensure that args are properly templated/lazified."""
+
+ @functools.wraps(instance.j2_function)
+ def wrapper(*args, **kwargs) -> bool | Marker:
+ result = self._invoke_plugin(instance, *args, **kwargs)
+
+ if isinstance(result, Marker):
+ return result
+
+ if not isinstance(result, bool):
+ template = TemplateContext.current().template_value
+
+ _display.deprecated(
+ msg=f"The test plugin {instance.ansible_name!r} returned a non-boolean result of type {type(result)!r}. "
+ "Test plugins must have a boolean result.",
+ obj=template,
+ version="2.23",
+ )
+
+ result = bool(result)
+
+ return result
+
+ return wrapper
+
+ def _wrap_filter(self, instance: AnsibleJinja2Plugin) -> t.Callable:
+ """Intercept point for all filter plugins to ensure that args are properly templated/lazified."""
+
+ @functools.wraps(instance.j2_function)
+ def wrapper(*args, **kwargs) -> t.Any:
+ result = self._invoke_plugin(instance, *args, **kwargs)
+ result = _wrap_plugin_output(result)
+
+ return result
+
+ return wrapper
+
+
+class _DirectCall:
+ """Functions/methods marked `_DirectCall` bypass Jinja Environment checks for `Marker`."""
+
+ _marker_attr: t.Final[str] = "_directcall"
+
+ @classmethod
+ def mark(cls, src: _TCallable) -> _TCallable:
+ setattr(src, cls._marker_attr, True)
+ return src
+
+ @classmethod
+ def is_marked(cls, value: t.Callable) -> bool:
+ return callable(value) and getattr(value, cls._marker_attr, False)
+
+
+@_DirectCall.mark
+def _query(plugin_name: str, /, *args, **kwargs) -> t.Any:
+ """wrapper for lookup, force wantlist true"""
+ kwargs['wantlist'] = True
+ return _invoke_lookup(plugin_name=plugin_name, lookup_terms=list(args), lookup_kwargs=kwargs)
+
+
+@_DirectCall.mark
+def _lookup(plugin_name: str, /, *args, **kwargs) -> t.Any:
+ # convert the args tuple to a list, since some plugins make a poor assumption that `run.args` is a list
+ return _invoke_lookup(plugin_name=plugin_name, lookup_terms=list(args), lookup_kwargs=kwargs)
+
+
+@dataclasses.dataclass
+class _LookupContext(AmbientContextBase):
+ """Ambient context that wraps lookup execution, providing information about how it was invoked."""
+
+ invoked_as_with: bool
+
+
+@_DirectCall.mark
+def _invoke_lookup(*, plugin_name: str, lookup_terms: list, lookup_kwargs: dict[str, t.Any], invoked_as_with: bool = False) -> t.Any:
+ templar = TemplateContext.current().templar
+
+ from ansible import template as _template
+
+ try:
+ instance: LookupBase | None = lookup_loader.get(plugin_name, loader=templar._loader, templar=_template.Templar._from_template_engine(templar))
+ except Exception as ex:
+ raise AnsibleTemplatePluginLoadError('lookup', plugin_name) from ex
+
+ if instance is None:
+ raise AnsibleTemplatePluginNotFoundError('lookup', plugin_name)
+
+ # if the lookup doesn't understand `Marker` and there's at least one in the top level, short-circuit by returning the first one we found
+ if not instance.accept_args_markers and (first_marker := get_first_marker_arg(lookup_terms, lookup_kwargs)) is not None:
+ return first_marker
+
+ # don't pass these through to the lookup
+ wantlist = lookup_kwargs.pop('wantlist', False)
+ errors = lookup_kwargs.pop('errors', 'strict')
+
+ with JinjaCallContext(accept_lazy_markers=instance.accept_lazy_markers):
+ try:
+ if _TemplateConfig.allow_embedded_templates:
+ # for backwards compat, only trust constant templates in lookup terms
+ with JinjaCallContext(accept_lazy_markers=True):
+ # Force lazy marker support on for this call; the plugin's understanding is irrelevant, as is any existing context, since this backward
+ # compat code always understands markers.
+ lookup_terms = [templar.template(value) for value in _trust_jinja_constants(lookup_terms)]
+
+ # since embedded template support is enabled, repeat the check for `Marker` on lookup_terms, since a template may render as a `Marker`
+ if not instance.accept_args_markers and (first_marker := get_first_marker_arg(lookup_terms, {})) is not None:
+ return first_marker
+ else:
+ lookup_terms = AnsibleTagHelper.tag_copy(lookup_terms, (lazify_container(value) for value in lookup_terms), value_type=list)
+
+ with _LookupContext(invoked_as_with=invoked_as_with):
+ # The lookup context currently only supports the internal use-case where `first_found` requires extra info when invoked via `with_first_found`.
+ # The context may be public API in the future, but for now, other plugins should not implement this kind of dynamic behavior,
+ # though we're stuck with it for backward compatibility on `first_found`.
+ lookup_res = instance.run(lookup_terms, variables=templar.available_variables, **lazify_container_kwargs(lookup_kwargs))
+
+ # DTFIX-FUTURE: Consider allowing/requiring lookup plugins to declare how their result should be handled.
+ # Currently, there are multiple behaviors that are less than ideal and poorly documented (or not at all):
+ # * When `errors=warn` or `errors=ignore` the result is `None` unless `wantlist=True`, in which case the result is `[]`.
+ # * The user must specify `wantlist=True` to receive the plugin return value unmodified.
+ # A plugin can achieve similar results by wrapping its result in a list -- unless of course the user specifies `wantlist=True`.
+ # * When `wantlist=True` is specified, the result is not guaranteed to be a list as the option implies (except on plugin error).
+ # * Sequences are munged unless the user specifies `wantlist=True`:
+ # * len() == 0 - Return an empty sequence.
+ # * len() == 1 - Return the only element in the sequence.
+ # * len() >= 2 when all elements are `str` - Return all the values joined into a single comma separated string.
+ # * len() >= 2 when at least one element is not `str` - Return the sequence as-is.
+
+ if not is_sequence(lookup_res):
+ # DTFIX-FUTURE: deprecate return types which are not a list
+ # previously non-Sequence return types were deprecated and then became an error in 2.18
+ # however, the deprecation message (and this error) mention `list` specifically rather than `Sequence`
+ # letting non-list values through will trigger variable type checking warnings/errors
+ raise TypeError(f'returned {type(lookup_res)} instead of {list}')
+
+ except MarkerError as ex:
+ return ex.source
+ except Exception as ex:
+ # DTFIX-FUTURE: convert this to the new error/warn/ignore context manager
+ if errors == 'warn':
+ _display.error_as_warning(
+ msg=f'An error occurred while running the lookup plugin {plugin_name!r}.',
+ exception=ex,
+ )
+ elif errors == 'ignore':
+ _display.display(f'An error of type {type(ex)} occurred while running the lookup plugin {plugin_name!r}: {ex}', log_only=True)
+ else:
+ raise AnsibleTemplatePluginRuntimeError('lookup', plugin_name) from ex
+
+ return [] if wantlist else None
+
+ if not wantlist and lookup_res:
+ # when wantlist=False the lookup result is either partially delaizified (single element) or fully delaizified (multiple elements)
+
+ if len(lookup_res) == 1:
+ lookup_res = lookup_res[0]
+ else:
+ try:
+ lookup_res = ",".join(lookup_res) # for backwards compatibility, attempt to join `ran` into single string
+ except TypeError:
+ pass # for backwards compatibility, return `ran` as-is when the sequence contains non-string values
+
+ return _wrap_plugin_output(lookup_res)
+
+
+def _now(utc=False, fmt=None):
+ """Jinja2 global function (now) to return current datetime, potentially formatted via strftime."""
+ if utc:
+ now = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
+ else:
+ now = datetime.datetime.now()
+
+ if fmt:
+ return now.strftime(fmt)
+
+ return now
+
+
+def _jinja_const_template_warning(value: object, is_conditional: bool) -> None:
+ """Issue a warning regarding embedded template usage."""
+ help_text = "Use inline expressions, for example: "
+
+ if is_conditional:
+ help_text += """`when: "{{ a_var }}" == 42` becomes `when: a_var == 42`"""
+ else:
+ help_text += """`msg: "{{ lookup('env', '{{ a_var }}') }}"` becomes `msg: "{{ lookup('env', a_var) }}"`"""
+
+ # deprecated: description='disable embedded templates by default and deprecate the feature' core_version='2.23'
+ _display.warning(
+ msg="Jinja constant strings should not contain embedded templates. This feature will be disabled by default in ansible-core 2.23.",
+ obj=value,
+ help_text=help_text,
+ )
+
+
+def _trust_jinja_constants(o: t.Any) -> t.Any:
+ """
+ Recursively apply TrustedAsTemplate to values tagged with _JinjaConstTemplate and remove the tag.
+ Only container types emitted by the Jinja compiler are checked, since others do not contain constants.
+ This is used to provide backwards compatibility with historical lookup behavior for positional arguments.
+ """
+ if _JinjaConstTemplate.is_tagged_on(o):
+ _jinja_const_template_warning(o, is_conditional=False)
+
+ return TrustedAsTemplate().tag(_JinjaConstTemplate.untag(o))
+
+ o_type = type(o)
+
+ if o_type is dict:
+ return {k: _trust_jinja_constants(v) for k, v in o.items()}
+
+ if o_type in (list, tuple):
+ return o_type(_trust_jinja_constants(v) for v in o)
+
+ return o
+
+
+def _wrap_plugin_output(o: t.Any) -> t.Any:
+ """Utility method to ensure that iterators/generators returned from a plugins are consumed."""
+ if isinstance(o, _ITERATOR_TYPES):
+ o = list(o)
+
+ return _AnsibleLazyTemplateMixin._try_create(o, LazyOptions.SKIP_TEMPLATES)
+
+
+_PLUGIN_SOURCES = dict(
+ filter=defaults.DEFAULT_FILTERS,
+ test=defaults.DEFAULT_TESTS,
+)
+
+
+def _get_builtin_short_description(plugin: object) -> str:
+ """
+ Make a reasonable effort to break a function docstring down to a single sentence.
+ We can't use the full docstring due to embedded formatting, particularly RST.
+ This isn't intended to be perfect, just good enough until we can write our own docs for these.
+ """
+ value = re.split(r'(\.|!|\s\(|:\s)', inspect.getdoc(plugin), 1)[0].replace('\n', ' ')
+
+ if value:
+ value += '.'
+
+ return value
+
+
+def get_jinja_builtin_plugin_descriptions(plugin_type: str) -> dict[str, str]:
+ """Returns a dictionary of Jinja builtin plugin names and their short descriptions."""
+ return {f'ansible.builtin.{name}': _get_builtin_short_description(plugin) for name, plugin in _PLUGIN_SOURCES[plugin_type].items() if name.isidentifier()}
diff --git a/lib/ansible/_internal/_templating/_lazy_containers.py b/lib/ansible/_internal/_templating/_lazy_containers.py
new file mode 100644
index 00000000000..1d19e88c645
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_lazy_containers.py
@@ -0,0 +1,633 @@
+from __future__ import annotations
+
+import copy
+import dataclasses
+import functools
+import types
+import typing as t
+
+from jinja2.environment import TemplateModule
+
+from ansible.module_utils._internal._datatag import (
+ AnsibleTagHelper,
+ AnsibleTaggedObject,
+ _AnsibleTaggedDict,
+ _AnsibleTaggedList,
+ _AnsibleTaggedTuple,
+ _NO_INSTANCE_STORAGE,
+ _try_get_internal_tags_mapping,
+)
+
+from ansible.utils.sentinel import Sentinel
+from ansible.errors import AnsibleVariableTypeError
+from ansible._internal._errors._handler import Skippable
+from ansible.vars.hostvars import HostVarsVars, HostVars
+
+from ._access import AnsibleAccessContext
+from ._jinja_common import Marker, _TemplateConfig
+from ._utils import TemplateContext, PASS_THROUGH_SCALAR_VAR_TYPES, LazyOptions
+
+if t.TYPE_CHECKING:
+ from ._engine import TemplateEngine
+
+_KNOWN_TYPES: t.Final[set[type]] = (
+ {
+ HostVars, # example: hostvars
+ HostVarsVars, # example: hostvars.localhost | select
+ type, # example: range(20) | list # triggered on retrieval of `range` type from globals
+ range, # example: range(20) | list # triggered when returning a `range` instance from a call
+ types.FunctionType, # example: undef() | default("blah")
+ types.MethodType, # example: ansible_facts.get | type_debug
+ functools.partial,
+ type(''.startswith), # example: inventory_hostname.upper | type_debug # using `startswith` to resolve `builtin_function_or_method`
+ TemplateModule, # example: '{% import "importme.j2" as im %}{{ im | type_debug }}'
+ }
+ | set(PASS_THROUGH_SCALAR_VAR_TYPES)
+ | set(Marker._concrete_subclasses)
+)
+"""
+These types are known to the templating system.
+In addition to the statically defined types, additional types will be added at runtime.
+When enabled in config, this set will be used to determine if an encountered type should trigger a warning or error.
+"""
+
+
+def register_known_types(*args: type) -> None:
+ """Register a type with the template engine so it will not trigger warnings or errors when encountered."""
+ _KNOWN_TYPES.update(args)
+
+
+class UnsupportedConstructionMethodError(RuntimeError):
+ """Error raised when attempting to construct a lazy container with unsupported arguments."""
+
+ def __init__(self):
+ super().__init__("Direct construction of lazy containers is not supported.")
+
+
+@t.final
+@dataclasses.dataclass(frozen=True, slots=True)
+class _LazyValue:
+ """Wrapper around values to indicate lazy behavior has not yet been applied."""
+
+ value: t.Any
+
+
+@t.final
+@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
+class _LazyValueSource:
+ """Intermediate value source for lazy-eligible collection copy operations."""
+
+ source: t.Iterable
+ templar: TemplateEngine
+ lazy_options: LazyOptions
+
+
+@t.final
+class _NoKeySentinel(Sentinel):
+ """Sentinel used to indicate a requested key was not found."""
+
+
+# There are several operations performed by lazy containers, with some variation between types.
+#
+# Columns: D=dict, L=list, T=tuple
+# Cells: l=lazy (upon access), n=non-lazy (__init__/__new__)
+#
+# D L T Feature Description
+# - - - ----------- ---------------------------------------------------------------
+# l l n propagation when container items which are containers become lazy instances
+# l l n transform when transforms are applied to container items
+# l l n templating when templating is performed on container items
+# l l l access when access calls are performed on container items
+
+
+class _AnsibleLazyTemplateMixin:
+ __slots__ = _NO_INSTANCE_STORAGE
+
+ _dispatch_types: t.ClassVar[dict[type, type[_AnsibleLazyTemplateMixin]]] = {} # populated by __init_subclass__
+ _container_types: t.ClassVar[set[type]] = set() # populated by __init_subclass__
+
+ _native_type: t.ClassVar[type] # from AnsibleTaggedObject
+
+ _SLOTS: t.Final = (
+ '_templar',
+ '_lazy_options',
+ )
+
+ _templar: TemplateEngine
+ _lazy_options: LazyOptions
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ tagged_type = cls.__mro__[1]
+ native_type = tagged_type.__mro__[1]
+
+ for check_type in (tagged_type, native_type):
+ if conflicting_type := cls._dispatch_types.get(check_type):
+ raise TypeError(f"Lazy mixin {cls.__name__!r} type {check_type.__name__!r} conflicts with {conflicting_type.__name__!r}.")
+
+ cls._dispatch_types[native_type] = cls
+ cls._dispatch_types[tagged_type] = cls
+ cls._container_types.add(native_type)
+ cls._empty_tags_as_native = False # never revert to the native type when no tags remain
+
+ register_known_types(cls)
+
+ def __init__(self, contents: t.Iterable | _LazyValueSource) -> None:
+ if isinstance(contents, _LazyValueSource):
+ self._templar = contents.templar
+ self._lazy_options = contents.lazy_options
+ elif isinstance(contents, _AnsibleLazyTemplateMixin):
+ self._templar = contents._templar
+ self._lazy_options = contents._lazy_options
+ else:
+ raise UnsupportedConstructionMethodError()
+
+ def __reduce_ex__(self, protocol):
+ raise NotImplementedError("Pickling of Ansible lazy objects is not permitted.")
+
+ @staticmethod
+ def _try_create(item: t.Any, lazy_options: LazyOptions = LazyOptions.DEFAULT) -> t.Any:
+ """
+ If `item` is a container type which supports lazy access and/or templating, return a lazy wrapped version -- otherwise return it as-is.
+ When returning as-is, a warning or error may be generated for unknown types.
+ The `lazy_options.skip_templates` argument should be set to `True` when `item` is sourced from a plugin instead of Ansible variable storage.
+ This provides backwards compatibility and reduces lazy overhead, as plugins do not normally introduce templates.
+ If a plugin needs to introduce templates, the plugin is responsible for invoking the templar and returning the result.
+ """
+ item_type = type(item)
+
+ # Try to use exact type match first to determine which wrapper (if any) to apply; isinstance checks
+ # are extremely expensive, so try to avoid them for our commonly-supported types.
+ if (dispatcher := _AnsibleLazyTemplateMixin._dispatch_types.get(item_type)) is not None:
+ # Create a generator that yields the elements of `item` wrapped in a `_LazyValue` wrapper.
+ # The wrapper is used to signal to the lazy container that the value must be processed before being returned.
+ # Values added to the lazy container later through other means will be returned as-is, without any special processing.
+ lazy_values = dispatcher._lazy_values(item, lazy_options)
+ tags_mapping = _try_get_internal_tags_mapping(item)
+ value = t.cast(AnsibleTaggedObject, dispatcher)._instance_factory(lazy_values, tags_mapping)
+
+ return value
+
+ with Skippable, _TemplateConfig.unknown_type_encountered_handler.handle(AnsibleVariableTypeError, skip_on_ignore=True):
+ if item_type not in _KNOWN_TYPES:
+ raise AnsibleVariableTypeError(
+ message=f"Encountered unknown type {item_type.__name__!r} during template operation.",
+ help_text="Use supported types to avoid unexpected behavior.",
+ obj=TemplateContext.current().template_value,
+ )
+
+ return item
+
+ def _is_not_lazy_combine_candidate(self, other: object) -> bool:
+ """Returns `True` if `other` cannot be lazily combined with the current instance due to differing templar/options, otherwise returns `False`."""
+ return isinstance(other, _AnsibleLazyTemplateMixin) and (self._templar is not other._templar or self._lazy_options != other._lazy_options)
+
+ def _non_lazy_copy(self) -> t.Collection:
+ """
+ Return a non-lazy copy of this collection.
+ Any remaining lazy wrapped values will be unwrapped without further processing.
+ Tags on this instance will be preserved on the returned copy.
+ """
+ raise NotImplementedError() # pragma: nocover
+
+ @staticmethod
+ def _lazy_values(values: t.Any, lazy_options: LazyOptions) -> _LazyValueSource:
+ """
+ Return an iterable that wraps each of the given elements in a lazy wrapper.
+ Only elements wrapped this way will receive lazy processing when retrieved from the collection.
+ """
+ # DTFIX-FUTURE: check relative performance of method-local vs stored generator expressions on implementations of this method
+ raise NotImplementedError() # pragma: nocover
+
+ def _proxy_or_render_lazy_value(self, key: t.Any, value: t.Any) -> t.Any:
+ """
+ Ensure that the value is lazy-proxied or rendered, and if a key is provided, replace the original value with the result.
+ """
+ if type(value) is not _LazyValue: # pylint: disable=unidiomatic-typecheck
+ if self._lazy_options.access:
+ AnsibleAccessContext.current().access(value)
+
+ return value
+
+ original_value = value.value
+
+ if self._lazy_options.access:
+ AnsibleAccessContext.current().access(original_value)
+
+ new_value = self._templar.template(original_value, lazy_options=self._lazy_options)
+
+ if new_value is not original_value and self._lazy_options.access:
+ AnsibleAccessContext.current().access(new_value)
+
+ if key is not _NoKeySentinel:
+ self._native_type.__setitem__(self, key, new_value) # type: ignore # pylint: disable=unnecessary-dunder-call
+
+ return new_value
+
+
+@t.final # consumers of lazy collections rely heavily on the concrete types being final
+class _AnsibleLazyTemplateDict(_AnsibleTaggedDict, _AnsibleLazyTemplateMixin):
+ __slots__ = _AnsibleLazyTemplateMixin._SLOTS
+
+ def __init__(self, contents: t.Iterable | _LazyValueSource, /, **kwargs) -> None:
+ _AnsibleLazyTemplateMixin.__init__(self, contents)
+
+ if isinstance(contents, _AnsibleLazyTemplateDict):
+ super().__init__(dict.items(contents), **kwargs)
+ elif isinstance(contents, _LazyValueSource):
+ super().__init__(contents.source, **kwargs)
+ else:
+ raise UnsupportedConstructionMethodError()
+
+ def get(self, key: t.Any, default: t.Any = None) -> t.Any:
+ if (value := super().get(key, _NoKeySentinel)) is _NoKeySentinel:
+ return default
+
+ return self._proxy_or_render_lazy_value(key, value)
+
+ def __getitem__(self, key: t.Any, /) -> t.Any:
+ return self._proxy_or_render_lazy_value(key, super().__getitem__(key))
+
+ def __str__(self):
+ return str(self.copy()._native_copy()) # inefficient, but avoids mutating the current instance (to make debugging practical)
+
+ def __repr__(self):
+ return repr(self.copy()._native_copy()) # inefficient, but avoids mutating the current instance (to make debugging practical)
+
+ def __iter__(self):
+ # We're using the base implementation, but must override `__iter__` to skip `dict` fast-path copy, which would bypass lazy behavior.
+ # See: https://github.com/python/cpython/blob/ffcc450a9b8b6927549b501eff7ac14abc238448/Objects/dictobject.c#L3861-L3864
+ return super().__iter__()
+
+ def setdefault(self, key, default=None, /) -> t.Any:
+ if (value := self.get(key, _NoKeySentinel)) is not _NoKeySentinel:
+ return value
+
+ super().__setitem__(key, default)
+
+ return default
+
+ def items(self):
+ for key, value in super().items():
+ yield key, self._proxy_or_render_lazy_value(key, value)
+
+ def values(self):
+ for _key, value in self.items():
+ yield value
+
+ def pop(self, key, default=_NoKeySentinel, /) -> t.Any:
+ if (value := super().get(key, _NoKeySentinel)) is _NoKeySentinel:
+ if default is _NoKeySentinel:
+ raise KeyError(key)
+
+ return default
+
+ value = self._proxy_or_render_lazy_value(_NoKeySentinel, value)
+
+ del self[key]
+
+ return value
+
+ def popitem(self) -> t.Any:
+ try:
+ key = next(reversed(self))
+ except StopIteration:
+ raise KeyError("popitem(): dictionary is empty")
+
+ value = self._proxy_or_render_lazy_value(_NoKeySentinel, self[key])
+
+ del self[key]
+
+ return key, value
+
+ def _native_copy(self) -> dict:
+ return dict(self.items())
+
+ @staticmethod
+ def _item_source(value: dict) -> dict | _LazyValueSource:
+ if isinstance(value, _AnsibleLazyTemplateDict):
+ return _LazyValueSource(source=dict.items(value), templar=value._templar, lazy_options=value._lazy_options)
+
+ return value
+
+ def _yield_non_lazy_dict_items(self) -> t.Iterator[tuple[str, t.Any]]:
+ """
+ Delegate to the base collection items iterator to yield the raw contents.
+ As of Python 3.13, generator functions are significantly faster than inline generator expressions.
+ """
+ for k, v in dict.items(self):
+ yield k, v.value if type(v) is _LazyValue else v # pylint: disable=unidiomatic-typecheck
+
+ def _non_lazy_copy(self) -> dict:
+ return AnsibleTagHelper.tag_copy(self, self._yield_non_lazy_dict_items(), value_type=dict)
+
+ @staticmethod
+ def _lazy_values(values: dict, lazy_options: LazyOptions) -> _LazyValueSource:
+ return _LazyValueSource(source=((k, _LazyValue(v)) for k, v in values.items()), templar=TemplateContext.current().templar, lazy_options=lazy_options)
+
+ @staticmethod
+ def _proxy_or_render_other(other: t.Any | None) -> None:
+ """Call `_proxy_or_render_lazy_values` if `other` is a lazy dict. Used internally by comparison methods."""
+ if type(other) is _AnsibleLazyTemplateDict: # pylint: disable=unidiomatic-typecheck
+ other._proxy_or_render_lazy_values()
+
+ def _proxy_or_render_lazy_values(self) -> None:
+ """Ensure all `_LazyValue` wrapped values have been processed."""
+ for _unused in self.values():
+ pass
+
+ def __eq__(self, other):
+ self._proxy_or_render_lazy_values()
+ self._proxy_or_render_other(other)
+ return super().__eq__(other)
+
+ def __ne__(self, other):
+ self._proxy_or_render_lazy_values()
+ self._proxy_or_render_other(other)
+ return super().__ne__(other)
+
+ def __or__(self, other):
+ # DTFIX-FUTURE: support preservation of laziness when possible like we do for list
+ # Both sides end up going through _proxy_or_render_lazy_value, so there's no Templar preservation needed.
+ # In the future this could be made more lazy when both Templar instances are the same, or if per-value Templar tracking was used.
+ return super().__or__(other)
+
+ def __ror__(self, other):
+ # DTFIX-FUTURE: support preservation of laziness when possible like we do for list
+ # Both sides end up going through _proxy_or_render_lazy_value, so there's no Templar preservation needed.
+ # In the future this could be made more lazy when both Templar instances are the same, or if per-value Templar tracking was used.
+ return super().__ror__(other)
+
+ def __deepcopy__(self, memo):
+ return _AnsibleLazyTemplateDict(
+ _LazyValueSource(
+ source=((copy.deepcopy(k), copy.deepcopy(v)) for k, v in super().items()),
+ templar=copy.deepcopy(self._templar),
+ lazy_options=copy.deepcopy(self._lazy_options),
+ )
+ )
+
+
+@t.final # consumers of lazy collections rely heavily on the concrete types being final
+class _AnsibleLazyTemplateList(_AnsibleTaggedList, _AnsibleLazyTemplateMixin):
+ __slots__ = _AnsibleLazyTemplateMixin._SLOTS
+
+ def __init__(self, contents: t.Iterable | _LazyValueSource, /) -> None:
+ _AnsibleLazyTemplateMixin.__init__(self, contents)
+
+ if isinstance(contents, _AnsibleLazyTemplateList):
+ super().__init__(list.__iter__(contents))
+ elif isinstance(contents, _LazyValueSource):
+ super().__init__(contents.source)
+ else:
+ raise UnsupportedConstructionMethodError()
+
+ def __getitem__(self, key: t.SupportsIndex | slice, /) -> t.Any:
+ if type(key) is slice: # pylint: disable=unidiomatic-typecheck
+ return _AnsibleLazyTemplateList(_LazyValueSource(source=super().__getitem__(key), templar=self._templar, lazy_options=self._lazy_options))
+
+ return self._proxy_or_render_lazy_value(key, super().__getitem__(key))
+
+ def __iter__(self):
+ for key, value in enumerate(super().__iter__()):
+ yield self._proxy_or_render_lazy_value(key, value)
+
+ def pop(self, idx: t.SupportsIndex = -1, /) -> t.Any:
+ if not self:
+ raise IndexError('pop from empty list')
+
+ try:
+ value = self[idx]
+ except IndexError:
+ raise IndexError('pop index out of range')
+
+ value = self._proxy_or_render_lazy_value(_NoKeySentinel, value)
+
+ del self[idx]
+
+ return value
+
+ def __str__(self):
+ return str(self.copy()._native_copy()) # inefficient, but avoids mutating the current instance (to make debugging practical)
+
+ def __repr__(self):
+ return repr(self.copy()._native_copy()) # inefficient, but avoids mutating the current instance (to make debugging practical)
+
+ @staticmethod
+ def _item_source(value: list) -> list | _LazyValueSource:
+ if isinstance(value, _AnsibleLazyTemplateList):
+ return _LazyValueSource(source=list.__iter__(value), templar=value._templar, lazy_options=value._lazy_options)
+
+ return value
+
+ def _yield_non_lazy_list_items(self):
+ """
+ Delegate to the base collection iterator to yield the raw contents.
+ As of Python 3.13, generator functions are significantly faster than inline generator expressions.
+ """
+ for v in list.__iter__(self):
+ yield v.value if type(v) is _LazyValue else v # pylint: disable=unidiomatic-typecheck
+
+ def _non_lazy_copy(self) -> list:
+ return AnsibleTagHelper.tag_copy(self, self._yield_non_lazy_list_items(), value_type=list)
+
+ @staticmethod
+ def _lazy_values(values: list, lazy_options: LazyOptions) -> _LazyValueSource:
+ return _LazyValueSource(source=(_LazyValue(v) for v in values), templar=TemplateContext.current().templar, lazy_options=lazy_options)
+
+ @staticmethod
+ def _proxy_or_render_other(other: t.Any | None) -> None:
+ """Call `_proxy_or_render_lazy_values` if `other` is a lazy list. Used internally by comparison methods."""
+ if type(other) is _AnsibleLazyTemplateList: # pylint: disable=unidiomatic-typecheck
+ other._proxy_or_render_lazy_values()
+
+ def _proxy_or_render_lazy_values(self) -> None:
+ """Ensure all `_LazyValue` wrapped values have been processed."""
+ for _unused in self:
+ pass
+
+ def __eq__(self, other):
+ self._proxy_or_render_lazy_values()
+ self._proxy_or_render_other(other)
+ return super().__eq__(other)
+
+ def __ne__(self, other):
+ self._proxy_or_render_lazy_values()
+ self._proxy_or_render_other(other)
+ return super().__ne__(other)
+
+ def __gt__(self, other):
+ self._proxy_or_render_lazy_values()
+ self._proxy_or_render_other(other)
+ return super().__gt__(other)
+
+ def __ge__(self, other):
+ self._proxy_or_render_lazy_values()
+ self._proxy_or_render_other(other)
+ return super().__ge__(other)
+
+ def __lt__(self, other):
+ self._proxy_or_render_lazy_values()
+ self._proxy_or_render_other(other)
+ return super().__lt__(other)
+
+ def __le__(self, other):
+ self._proxy_or_render_lazy_values()
+ self._proxy_or_render_other(other)
+ return super().__le__(other)
+
+ def __contains__(self, item):
+ self._proxy_or_render_lazy_values()
+ return super().__contains__(item)
+
+ def __reversed__(self):
+ for idx in range(self.__len__() - 1, -1, -1):
+ yield self[idx]
+
+ def __add__(self, other):
+ if self._is_not_lazy_combine_candidate(other):
+ # When other is lazy with a different templar/options, it cannot be lazily combined with self and a plain list must be returned.
+ # If other is a list, de-lazify both, otherwise just let the operation fail.
+
+ if isinstance(other, _AnsibleLazyTemplateList):
+ self._proxy_or_render_lazy_values()
+ other._proxy_or_render_lazy_values()
+
+ return super().__add__(other)
+
+ # For all other cases, the new list inherits our templar and all values stay lazy.
+ # We use list.__add__ to avoid implementing all its error behavior.
+ return _AnsibleLazyTemplateList(_LazyValueSource(source=super().__add__(other), templar=self._templar, lazy_options=self._lazy_options))
+
+ def __radd__(self, other):
+ if not (other_add := getattr(other, '__add__', None)):
+ raise TypeError(f'unsupported operand type(s) for +: {type(other).__name__!r} and {type(self).__name__!r}') from None
+
+ return _AnsibleLazyTemplateList(_LazyValueSource(source=other_add(self), templar=self._templar, lazy_options=self._lazy_options))
+
+ def __mul__(self, other):
+ return _AnsibleLazyTemplateList(_LazyValueSource(source=super().__mul__(other), templar=self._templar, lazy_options=self._lazy_options))
+
+ def __rmul__(self, other):
+ return _AnsibleLazyTemplateList(_LazyValueSource(source=super().__rmul__(other), templar=self._templar, lazy_options=self._lazy_options))
+
+ def index(self, *args, **kwargs) -> int:
+ self._proxy_or_render_lazy_values()
+ return super().index(*args, **kwargs)
+
+ def remove(self, *args, **kwargs) -> None:
+ self._proxy_or_render_lazy_values()
+ super().remove(*args, **kwargs)
+
+ def sort(self, *args, **kwargs) -> None:
+ self._proxy_or_render_lazy_values()
+ super().sort(*args, **kwargs)
+
+ def __deepcopy__(self, memo):
+ return _AnsibleLazyTemplateList(
+ _LazyValueSource(
+ source=(copy.deepcopy(v) for v in super().__iter__()),
+ templar=copy.deepcopy(self._templar),
+ lazy_options=copy.deepcopy(self._lazy_options),
+ )
+ )
+
+
+@t.final # consumers of lazy collections rely heavily on the concrete types being final
+class _AnsibleLazyAccessTuple(_AnsibleTaggedTuple, _AnsibleLazyTemplateMixin):
+ """
+ A tagged tuple subclass that provides only managed access for existing lazy values.
+
+ Since tuples are immutable, they cannot support lazy templating (which would change the tuple's value as templates were resolved).
+ When this type is created, each value in the source tuple is lazified:
+
+ * template strings are templated immediately (possibly resulting in lazy containers)
+ * non-tuple containers are lazy-wrapped
+ * tuples are immediately recursively lazy-wrapped
+ * transformations are applied immediately
+
+ The resulting object provides only managed access to its values (e.g., deprecation warnings, tripwires), and propagates to new lazy containers
+ created as a results of managed access.
+ """
+
+ # DTFIX5: ensure we have tests that explicitly verify this behavior
+
+ # nonempty __slots__ not supported for subtype of 'tuple'
+
+ def __new__(cls, contents: t.Iterable | _LazyValueSource, /) -> t.Self:
+ if isinstance(contents, _AnsibleLazyAccessTuple):
+ return super().__new__(cls, tuple.__iter__(contents))
+
+ if isinstance(contents, _LazyValueSource):
+ return super().__new__(cls, contents.source)
+
+ raise UnsupportedConstructionMethodError()
+
+ def __init__(self, contents: t.Iterable | _LazyValueSource, /) -> None:
+ _AnsibleLazyTemplateMixin.__init__(self, contents)
+
+ def __getitem__(self, key: t.SupportsIndex | slice, /) -> t.Any:
+ if type(key) is slice: # pylint: disable=unidiomatic-typecheck
+ return _AnsibleLazyAccessTuple(super().__getitem__(key))
+
+ value = super().__getitem__(key)
+
+ if self._lazy_options.access:
+ AnsibleAccessContext.current().access(value)
+
+ return value
+
+ @staticmethod
+ def _item_source(value: tuple) -> tuple | _LazyValueSource:
+ if isinstance(value, _AnsibleLazyAccessTuple):
+ return _LazyValueSource(source=tuple.__iter__(value), templar=value._templar, lazy_options=value._lazy_options)
+
+ return value
+
+ @staticmethod
+ def _lazy_values(values: t.Any, lazy_options: LazyOptions) -> _LazyValueSource:
+ templar = TemplateContext.current().templar
+
+ return _LazyValueSource(source=(templar.template(value, lazy_options=lazy_options) for value in values), templar=templar, lazy_options=lazy_options)
+
+ def _non_lazy_copy(self) -> tuple:
+ return AnsibleTagHelper.tag_copy(self, self, value_type=tuple)
+
+ def __deepcopy__(self, memo):
+ return _AnsibleLazyAccessTuple(
+ _LazyValueSource(
+ source=(copy.deepcopy(v) for v in super().__iter__()),
+ templar=copy.deepcopy(self._templar),
+ lazy_options=copy.deepcopy(self._lazy_options),
+ )
+ )
+
+
+def lazify_container(value: t.Any) -> t.Any:
+ """
+ If the given value is a supported container type, return its lazy version, otherwise return the value as-is.
+ This is used to ensure that managed access and templating occur on args and kwargs to a callable, even if they were sourced from Jinja constants.
+
+ Since both variable access and plugin output are already lazified, this mostly affects Jinja constant containers.
+ However, plugins that directly invoke other plugins (e.g., `Environment.call_filter`) are another potential source of non-lazy containers.
+ In these cases, templating will occur for trusted templates automatically upon access.
+
+ Sets, tuples, and dictionary keys cannot be lazy, since their correct operation requires hashability and equality.
+ These properties are mutually exclusive with the following lazy features:
+
+ - managed access on encrypted strings - may raise errors on both operations when decryption fails
+ - managed access on markers - must raise errors on both operations
+ - templating - mutates values
+
+ That leaves non-raising managed access as the only remaining feature, which is insufficient to warrant lazy support.
+ """
+ return _AnsibleLazyTemplateMixin._try_create(value)
+
+
+def lazify_container_args(item: tuple) -> tuple:
+ """Return the given args with values converted to lazy containers as needed."""
+ return tuple(lazify_container(value) for value in item)
+
+
+def lazify_container_kwargs(item: dict[str, t.Any]) -> dict[str, t.Any]:
+ """Return the given kwargs with values converted to lazy containers as needed."""
+ return {key: lazify_container(value) for key, value in item.items()}
diff --git a/lib/ansible/_internal/_templating/_marker_behaviors.py b/lib/ansible/_internal/_templating/_marker_behaviors.py
new file mode 100644
index 00000000000..71df1a6e1f4
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_marker_behaviors.py
@@ -0,0 +1,103 @@
+"""Handling of `Marker` values."""
+
+from __future__ import annotations
+
+import abc
+import contextlib
+import dataclasses
+import itertools
+import typing as t
+
+from ansible.utils.display import Display
+
+from ._jinja_common import Marker
+
+
+class MarkerBehavior(metaclass=abc.ABCMeta):
+ """Base class to support custom handling of `Marker` values encountered during concatenation or finalization."""
+
+ @abc.abstractmethod
+ def handle_marker(self, value: Marker) -> t.Any:
+ """Handle the given `Marker` value."""
+
+
+class FailingMarkerBehavior(MarkerBehavior):
+ """
+ The default behavior when encountering a `Marker` value during concatenation or finalization.
+ This always raises the template-internal `MarkerError` exception.
+ """
+
+ def handle_marker(self, value: Marker) -> t.Any:
+ value.trip()
+
+
+# FAIL_ON_MARKER_BEHAVIOR
+# _DETONATE_MARKER_BEHAVIOR - internal singleton since it's the default and nobody should need to reference it, or make it an actual singleton
+FAIL_ON_UNDEFINED: t.Final = FailingMarkerBehavior() # no sense in making many instances...
+
+
+@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
+class _MarkerTracker:
+ """A numbered occurrence of a `Marker` value for later conversion to a warning."""
+
+ number: int
+ value: Marker
+
+
+class ReplacingMarkerBehavior(MarkerBehavior):
+ """All `Marker` values are replaced with a numbered string placeholder and the message from the value."""
+
+ def __init__(self) -> None:
+ self._trackers: list[_MarkerTracker] = []
+
+ def record_marker(self, value: Marker) -> t.Any:
+ """Assign a sequence number to the given value and record it for later generation of warnings."""
+ number = len(self._trackers) + 1
+
+ self._trackers.append(_MarkerTracker(number=number, value=value))
+
+ return number
+
+ def emit_warnings(self) -> None:
+ """Emit warning messages caused by Marker values, aggregated by unique template."""
+
+ display = Display()
+ grouped_templates = itertools.groupby(self._trackers, key=lambda tracker: tracker.value._marker_template_source)
+
+ for template, items in grouped_templates:
+ item_list = list(items)
+
+ msg = f'Encountered {len(item_list)} template error{"s" if len(item_list) > 1 else ""}.'
+
+ for item in item_list:
+ msg += f'\nerror {item.number} - {item.value._as_message()}'
+
+ display.warning(msg=msg, obj=template)
+
+ @classmethod
+ @contextlib.contextmanager
+ def warning_context(cls) -> t.Generator[t.Self, None, None]:
+ """Collect warnings for `Marker` values and emit warnings when the context exits."""
+ instance = cls()
+
+ try:
+ yield instance
+ finally:
+ instance.emit_warnings()
+
+ def handle_marker(self, value: Marker) -> t.Any:
+ number = self.record_marker(value)
+
+ return f"<< error {number} - {value._as_message()} >>"
+
+
+class RoutingMarkerBehavior(MarkerBehavior):
+ """Routes instances of Marker (by type reference) to another MarkerBehavior, defaulting to FailingMarkerBehavior."""
+
+ def __init__(self, dispatch_table: dict[type[Marker], MarkerBehavior]) -> None:
+ self._dispatch_table = dispatch_table
+
+ def handle_marker(self, value: Marker) -> t.Any:
+ behavior = self._dispatch_table.get(type(value), FAIL_ON_UNDEFINED)
+
+ return behavior.handle_marker(value)
diff --git a/lib/ansible/_internal/_templating/_template_vars.py b/lib/ansible/_internal/_templating/_template_vars.py
new file mode 100644
index 00000000000..b5b1e4c0e9a
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_template_vars.py
@@ -0,0 +1,72 @@
+from __future__ import annotations as _annotations
+
+import datetime as _datetime
+import os as _os
+import pwd as _pwd
+import time as _time
+
+from ansible import constants as _constants
+from ansible.module_utils._internal import _datatag
+
+
+def generate_ansible_template_vars(
+ path: str,
+ fullpath: str | None = None,
+ dest_path: str | None = None,
+ include_ansible_managed: bool = True,
+) -> dict[str, object]:
+ """
+ Generate and return a dictionary with variable metadata about the template specified by `fullpath`.
+ If `fullpath` is `None`, `path` will be used instead.
+ """
+ # deprecated description="update the ansible.windows collection to inline this logic instead of calling this internal function" core_version="2.23"
+ if fullpath is None:
+ fullpath = _os.path.abspath(path)
+
+ template_path = fullpath
+ template_stat = _os.stat(template_path)
+
+ template_uid: int | str
+
+ try:
+ template_uid = _pwd.getpwuid(template_stat.st_uid).pw_name
+ except KeyError:
+ template_uid = template_stat.st_uid
+
+ temp_vars = dict(
+ template_host=_os.uname()[1],
+ template_path=path,
+ template_mtime=_datetime.datetime.fromtimestamp(template_stat.st_mtime),
+ template_uid=template_uid,
+ template_run_date=_datetime.datetime.now(),
+ template_destpath=dest_path,
+ template_fullpath=fullpath,
+ )
+
+ if include_ansible_managed: # only inject the config default value if the variable wasn't set
+ temp_vars['ansible_managed'] = _generate_ansible_managed(template_stat)
+
+ return temp_vars
+
+
+def _generate_ansible_managed(template_stat: _os.stat_result) -> str:
+ """Generate and return the `ansible_managed` variable."""
+ # deprecated description="remove the `_generate_ansible_managed` function and use a constant instead" core_version="2.23"
+
+ from ansible.template import trust_as_template
+
+ managed_default = _constants.config.get_config_value('DEFAULT_MANAGED_STR')
+
+ managed_str = managed_default.format(
+ # IMPORTANT: These values must be constant strings to avoid template injection.
+ # Use Jinja template expressions where variables are needed.
+ host="{{ template_host }}",
+ uid="{{ template_uid }}",
+ file="{{ template_path }}",
+ )
+
+ ansible_managed = _time.strftime(managed_str, _time.localtime(template_stat.st_mtime))
+ ansible_managed = _datatag.AnsibleTagHelper.tag_copy(managed_default, ansible_managed)
+ ansible_managed = trust_as_template(ansible_managed)
+
+ return ansible_managed
diff --git a/lib/ansible/_internal/_templating/_transform.py b/lib/ansible/_internal/_templating/_transform.py
new file mode 100644
index 00000000000..c812b43da5f
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_transform.py
@@ -0,0 +1,70 @@
+"""Runtime projections to provide template/var-visible views of objects that are not natively allowed in Ansible's type system."""
+
+from __future__ import annotations
+
+import dataclasses
+import typing as t
+
+from ansible.module_utils._internal import _traceback, _event_utils, _messages
+from ansible.parsing.vault import EncryptedString, VaultHelper
+from ansible.utils.display import Display
+
+from ._jinja_common import VaultExceptionMarker
+from .._errors import _captured, _error_factory
+from .. import _event_formatting
+
+display = Display()
+
+
+def plugin_info(value: _messages.PluginInfo) -> dict[str, str]:
+ """Render PluginInfo as a dictionary."""
+ return dataclasses.asdict(value)
+
+
+def plugin_type(value: _messages.PluginType) -> str:
+ """Render PluginType as a string."""
+ return value.value
+
+
+def error_summary(value: _messages.ErrorSummary) -> str:
+ """Render ErrorSummary as a formatted traceback for backward-compatibility with pre-2.19 TaskResult.exception."""
+ if _traceback._is_traceback_enabled(_traceback.TracebackEvent.ERROR):
+ return _event_formatting.format_event_traceback(value.event)
+
+ return '(traceback unavailable)'
+
+
+def warning_summary(value: _messages.WarningSummary) -> str:
+ """Render WarningSummary as a simple message string for backward-compatibility with pre-2.19 TaskResult.warnings."""
+ return _event_utils.format_event_brief_message(value.event)
+
+
+def deprecation_summary(value: _messages.DeprecationSummary) -> dict[str, t.Any]:
+ """Render DeprecationSummary as dict values for backward-compatibility with pre-2.19 TaskResult.deprecations."""
+ transformed = _event_utils.deprecation_as_dict(value)
+ transformed.update(deprecator=value.deprecator)
+
+ return transformed
+
+
+def encrypted_string(value: EncryptedString) -> str | VaultExceptionMarker:
+ """Decrypt an encrypted string and return its value, or a VaultExceptionMarker if decryption fails."""
+ try:
+ return value._decrypt()
+ except Exception as ex:
+ return VaultExceptionMarker(
+ ciphertext=VaultHelper.get_ciphertext(value, with_tags=True),
+ event=_error_factory.ControllerEventFactory.from_exception(ex, _traceback.is_traceback_enabled(_traceback.TracebackEvent.ERROR)),
+ )
+
+
+_type_transform_mapping: dict[type, t.Callable[[t.Any], t.Any]] = {
+ _captured.CapturedErrorSummary: error_summary,
+ _messages.PluginInfo: plugin_info,
+ _messages.PluginType: plugin_type,
+ _messages.ErrorSummary: error_summary,
+ _messages.WarningSummary: warning_summary,
+ _messages.DeprecationSummary: deprecation_summary,
+ EncryptedString: encrypted_string,
+}
+"""This mapping is consulted by `Templar.template` to provide custom views of some objects."""
diff --git a/lib/ansible/_internal/_templating/_utils.py b/lib/ansible/_internal/_templating/_utils.py
new file mode 100644
index 00000000000..97d0f71648c
--- /dev/null
+++ b/lib/ansible/_internal/_templating/_utils.py
@@ -0,0 +1,108 @@
+from __future__ import annotations
+
+import dataclasses
+import typing as t
+
+from ansible.module_utils._internal import _ambient_context, _datatag
+
+if t.TYPE_CHECKING:
+ from ._engine import TemplateEngine, TemplateOptions
+
+
+@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
+class LazyOptions:
+ """Templating options that apply to lazy containers, which are inherited by descendent lazy containers."""
+
+ DEFAULT: t.ClassVar[t.Self]
+ """A shared instance with the default options to minimize instance creation for arg defaults."""
+ SKIP_TEMPLATES: t.ClassVar[t.Self]
+ """A shared instance with only `template=False` set to minimize instance creation for arg defaults."""
+ SKIP_TEMPLATES_AND_ACCESS: t.ClassVar[t.Self]
+ """A shared instance with both `template=False` and `access=False` set to minimize instance creation for arg defaults."""
+
+ template: bool = True
+ """Enable/disable templating."""
+
+ access: bool = True
+ """Enable/disables access calls."""
+
+ unmask_type_names: frozenset[str] = frozenset()
+ """Disables template transformations for the provided type names."""
+
+
+LazyOptions.DEFAULT = LazyOptions()
+LazyOptions.SKIP_TEMPLATES = LazyOptions(template=False)
+LazyOptions.SKIP_TEMPLATES_AND_ACCESS = LazyOptions(template=False, access=False)
+
+
+class TemplateContext(_ambient_context.AmbientContextBase):
+ def __init__(
+ self,
+ *,
+ template_value: t.Any,
+ templar: TemplateEngine,
+ options: TemplateOptions,
+ stop_on_template: bool = False,
+ _render_jinja_const_template: bool = False,
+ ):
+ self._template_value = template_value
+ self._templar = templar
+ self._options = options
+ self._stop_on_template = stop_on_template
+ self._parent_ctx = TemplateContext.current(optional=True)
+ self._render_jinja_const_template = _render_jinja_const_template
+
+ @property
+ def is_top_level(self) -> bool:
+ return not self._parent_ctx
+
+ @property
+ def template_value(self) -> t.Any:
+ return self._template_value
+
+ @property
+ def templar(self) -> TemplateEngine:
+ return self._templar
+
+ @property
+ def options(self) -> TemplateOptions:
+ return self._options
+
+ @property
+ def stop_on_template(self) -> bool:
+ return self._stop_on_template
+
+
+class _OmitType:
+ """
+ A placeholder singleton used to dynamically omit items from a dict/list/tuple/set when the value is `Omit`.
+
+ The `Omit` singleton is accessible from all Ansible templating contexts via the Jinja global name `omit`.
+ The `Omit` placeholder value will be visible to Jinja plugins during templating.
+ Jinja plugins requiring omit behavior are responsible for handling encountered `Omit` values.
+ `Omit` values remaining in template results will be automatically dropped during template finalization.
+ When a finalized template renders to a scalar `Omit`, `AnsibleValueOmittedError` will be raised.
+ Passing a value other than `Omit` for `value_for_omit` to the `template` call allows that value to be substituted instead of raising.
+ """
+
+ __slots__ = ()
+
+ def __new__(cls):
+ return Omit
+
+ def __repr__(self):
+ return "<>"
+
+
+Omit = object.__new__(_OmitType)
+
+_datatag._untaggable_types.add(_OmitType)
+
+
+IGNORE_SCALAR_VAR_TYPES = {value for value in _datatag._ANSIBLE_ALLOWED_SCALAR_VAR_TYPES if not issubclass(value, str)}
+"""Scalar variable types that short-circuit bypass templating."""
+
+PASS_THROUGH_SCALAR_VAR_TYPES = _datatag._ANSIBLE_ALLOWED_SCALAR_VAR_TYPES | {
+ _OmitType, # allow pass through of omit for later handling after top-level finalize completes
+}
+"""Scalar variable types which are allowed to appear in finalized template results."""
diff --git a/lib/ansible/_internal/_testing.py b/lib/ansible/_internal/_testing.py
new file mode 100644
index 00000000000..edc77dc76d0
--- /dev/null
+++ b/lib/ansible/_internal/_testing.py
@@ -0,0 +1,26 @@
+"""
+Testing utilities for use in integration tests, not unit tests or non-test code.
+Provides better error behavior than Python's `assert` statement.
+"""
+
+from __future__ import annotations
+
+import contextlib
+import typing as t
+
+
+class _Checker:
+ @staticmethod
+ def check(value: object, msg: str | None = 'Value is not truthy.') -> None:
+ """Raise an `AssertionError` if the given `value` is not truthy."""
+ if not value:
+ raise AssertionError(msg)
+
+
+@contextlib.contextmanager
+def hard_fail_context(msg: str) -> t.Generator[_Checker]:
+ """Enter a context which converts all exceptions to `BaseException` and provides a `Checker` instance for making assertions."""
+ try:
+ yield _Checker()
+ except BaseException as ex:
+ raise BaseException(f"Hard failure: {msg}") from ex
diff --git a/lib/ansible/_internal/_wrapt.py b/lib/ansible/_internal/_wrapt.py
new file mode 100644
index 00000000000..d493baaa717
--- /dev/null
+++ b/lib/ansible/_internal/_wrapt.py
@@ -0,0 +1,1052 @@
+# Copyright (c) 2013-2023, Graham Dumpleton
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# copied from https://github.com/GrahamDumpleton/wrapt/blob/1.15.0/src/wrapt/wrappers.py
+
+# LOCAL PATCHES:
+# - disabled optional relative import of the _wrappers C extension; we shouldn't need it
+
+from __future__ import annotations
+
+# The following makes it easier for us to script updates of the bundled code
+_BUNDLED_METADATA = {"pypi_name": "wrapt", "version": "1.15.0"}
+
+import os
+import sys
+import functools
+import operator
+import weakref
+import inspect
+
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+ string_types = basestring,
+else:
+ string_types = str,
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ return meta("NewBase", bases, {})
+
+class _ObjectProxyMethods(object):
+
+ # We use properties to override the values of __module__ and
+ # __doc__. If we add these in ObjectProxy, the derived class
+ # __dict__ will still be setup to have string variants of these
+ # attributes and the rules of descriptors means that they appear to
+ # take precedence over the properties in the base class. To avoid
+ # that, we copy the properties into the derived class type itself
+ # via a meta class. In that way the properties will always take
+ # precedence.
+
+ @property
+ def __module__(self):
+ return self.__wrapped__.__module__
+
+ @__module__.setter
+ def __module__(self, value):
+ self.__wrapped__.__module__ = value
+
+ @property
+ def __doc__(self):
+ return self.__wrapped__.__doc__
+
+ @__doc__.setter
+ def __doc__(self, value):
+ self.__wrapped__.__doc__ = value
+
+ # We similar use a property for __dict__. We need __dict__ to be
+ # explicit to ensure that vars() works as expected.
+
+ @property
+ def __dict__(self):
+ return self.__wrapped__.__dict__
+
+ # Need to also propagate the special __weakref__ attribute for case
+ # where decorating classes which will define this. If do not define
+ # it and use a function like inspect.getmembers() on a decorator
+ # class it will fail. This can't be in the derived classes.
+
+ @property
+ def __weakref__(self):
+ return self.__wrapped__.__weakref__
+
+class _ObjectProxyMetaType(type):
+ def __new__(cls, name, bases, dictionary):
+ # Copy our special properties into the class so that they
+ # always take precedence over attributes of the same name added
+ # during construction of a derived class. This is to save
+ # duplicating the implementation for them in all derived classes.
+
+ dictionary.update(vars(_ObjectProxyMethods))
+
+ return type.__new__(cls, name, bases, dictionary)
+
+class ObjectProxy(with_metaclass(_ObjectProxyMetaType)):
+
+ __slots__ = '__wrapped__'
+
+ def __init__(self, wrapped):
+ object.__setattr__(self, '__wrapped__', wrapped)
+
+ # Python 3.2+ has the __qualname__ attribute, but it does not
+ # allow it to be overridden using a property and it must instead
+ # be an actual string object instead.
+
+ try:
+ object.__setattr__(self, '__qualname__', wrapped.__qualname__)
+ except AttributeError:
+ pass
+
+ # Python 3.10 onwards also does not allow itself to be overridden
+ # using a property and it must instead be set explicitly.
+
+ try:
+ object.__setattr__(self, '__annotations__', wrapped.__annotations__)
+ except AttributeError:
+ pass
+
+ @property
+ def __name__(self):
+ return self.__wrapped__.__name__
+
+ @__name__.setter
+ def __name__(self, value):
+ self.__wrapped__.__name__ = value
+
+ @property
+ def __class__(self):
+ return self.__wrapped__.__class__
+
+ @__class__.setter
+ def __class__(self, value):
+ self.__wrapped__.__class__ = value
+
+ def __dir__(self):
+ return dir(self.__wrapped__)
+
+ def __str__(self):
+ return str(self.__wrapped__)
+
+ if not PY2:
+ def __bytes__(self):
+ return bytes(self.__wrapped__)
+
+ def __repr__(self):
+ return '<{} at 0x{:x} for {} at 0x{:x}>'.format(
+ type(self).__name__, id(self),
+ type(self.__wrapped__).__name__,
+ id(self.__wrapped__))
+
+ def __reversed__(self):
+ return reversed(self.__wrapped__)
+
+ if not PY2:
+ def __round__(self):
+ return round(self.__wrapped__)
+
+ if sys.hexversion >= 0x03070000:
+ def __mro_entries__(self, bases):
+ return (self.__wrapped__,)
+
+ def __lt__(self, other):
+ return self.__wrapped__ < other
+
+ def __le__(self, other):
+ return self.__wrapped__ <= other
+
+ def __eq__(self, other):
+ return self.__wrapped__ == other
+
+ def __ne__(self, other):
+ return self.__wrapped__ != other
+
+ def __gt__(self, other):
+ return self.__wrapped__ > other
+
+ def __ge__(self, other):
+ return self.__wrapped__ >= other
+
+ def __hash__(self):
+ return hash(self.__wrapped__)
+
+ def __nonzero__(self):
+ return bool(self.__wrapped__)
+
+ def __bool__(self):
+ return bool(self.__wrapped__)
+
+ def __setattr__(self, name, value):
+ if name.startswith('_self_'):
+ object.__setattr__(self, name, value)
+
+ elif name == '__wrapped__':
+ object.__setattr__(self, name, value)
+ try:
+ object.__delattr__(self, '__qualname__')
+ except AttributeError:
+ pass
+ try:
+ object.__setattr__(self, '__qualname__', value.__qualname__)
+ except AttributeError:
+ pass
+ try:
+ object.__delattr__(self, '__annotations__')
+ except AttributeError:
+ pass
+ try:
+ object.__setattr__(self, '__annotations__', value.__annotations__)
+ except AttributeError:
+ pass
+
+ elif name == '__qualname__':
+ setattr(self.__wrapped__, name, value)
+ object.__setattr__(self, name, value)
+
+ elif name == '__annotations__':
+ setattr(self.__wrapped__, name, value)
+ object.__setattr__(self, name, value)
+
+ elif hasattr(type(self), name):
+ object.__setattr__(self, name, value)
+
+ else:
+ setattr(self.__wrapped__, name, value)
+
+ def __getattr__(self, name):
+ # If we are being to lookup '__wrapped__' then the
+ # '__init__()' method cannot have been called.
+
+ if name == '__wrapped__':
+ raise ValueError('wrapper has not been initialised')
+
+ return getattr(self.__wrapped__, name)
+
+ def __delattr__(self, name):
+ if name.startswith('_self_'):
+ object.__delattr__(self, name)
+
+ elif name == '__wrapped__':
+ raise TypeError('__wrapped__ must be an object')
+
+ elif name == '__qualname__':
+ object.__delattr__(self, name)
+ delattr(self.__wrapped__, name)
+
+ elif hasattr(type(self), name):
+ object.__delattr__(self, name)
+
+ else:
+ delattr(self.__wrapped__, name)
+
+ def __add__(self, other):
+ return self.__wrapped__ + other
+
+ def __sub__(self, other):
+ return self.__wrapped__ - other
+
+ def __mul__(self, other):
+ return self.__wrapped__ * other
+
+ def __div__(self, other):
+ return operator.div(self.__wrapped__, other)
+
+ def __truediv__(self, other):
+ return operator.truediv(self.__wrapped__, other)
+
+ def __floordiv__(self, other):
+ return self.__wrapped__ // other
+
+ def __mod__(self, other):
+ return self.__wrapped__ % other
+
+ def __divmod__(self, other):
+ return divmod(self.__wrapped__, other)
+
+ def __pow__(self, other, *args):
+ return pow(self.__wrapped__, other, *args)
+
+ def __lshift__(self, other):
+ return self.__wrapped__ << other
+
+ def __rshift__(self, other):
+ return self.__wrapped__ >> other
+
+ def __and__(self, other):
+ return self.__wrapped__ & other
+
+ def __xor__(self, other):
+ return self.__wrapped__ ^ other
+
+ def __or__(self, other):
+ return self.__wrapped__ | other
+
+ def __radd__(self, other):
+ return other + self.__wrapped__
+
+ def __rsub__(self, other):
+ return other - self.__wrapped__
+
+ def __rmul__(self, other):
+ return other * self.__wrapped__
+
+ def __rdiv__(self, other):
+ return operator.div(other, self.__wrapped__)
+
+ def __rtruediv__(self, other):
+ return operator.truediv(other, self.__wrapped__)
+
+ def __rfloordiv__(self, other):
+ return other // self.__wrapped__
+
+ def __rmod__(self, other):
+ return other % self.__wrapped__
+
+ def __rdivmod__(self, other):
+ return divmod(other, self.__wrapped__)
+
+ def __rpow__(self, other, *args):
+ return pow(other, self.__wrapped__, *args)
+
+ def __rlshift__(self, other):
+ return other << self.__wrapped__
+
+ def __rrshift__(self, other):
+ return other >> self.__wrapped__
+
+ def __rand__(self, other):
+ return other & self.__wrapped__
+
+ def __rxor__(self, other):
+ return other ^ self.__wrapped__
+
+ def __ror__(self, other):
+ return other | self.__wrapped__
+
+ def __iadd__(self, other):
+ self.__wrapped__ += other
+ return self
+
+ def __isub__(self, other):
+ self.__wrapped__ -= other
+ return self
+
+ def __imul__(self, other):
+ self.__wrapped__ *= other
+ return self
+
+ def __idiv__(self, other):
+ self.__wrapped__ = operator.idiv(self.__wrapped__, other)
+ return self
+
+ def __itruediv__(self, other):
+ self.__wrapped__ = operator.itruediv(self.__wrapped__, other)
+ return self
+
+ def __ifloordiv__(self, other):
+ self.__wrapped__ //= other
+ return self
+
+ def __imod__(self, other):
+ self.__wrapped__ %= other
+ return self
+
+ def __ipow__(self, other):
+ self.__wrapped__ **= other
+ return self
+
+ def __ilshift__(self, other):
+ self.__wrapped__ <<= other
+ return self
+
+ def __irshift__(self, other):
+ self.__wrapped__ >>= other
+ return self
+
+ def __iand__(self, other):
+ self.__wrapped__ &= other
+ return self
+
+ def __ixor__(self, other):
+ self.__wrapped__ ^= other
+ return self
+
+ def __ior__(self, other):
+ self.__wrapped__ |= other
+ return self
+
+ def __neg__(self):
+ return -self.__wrapped__
+
+ def __pos__(self):
+ return +self.__wrapped__
+
+ def __abs__(self):
+ return abs(self.__wrapped__)
+
+ def __invert__(self):
+ return ~self.__wrapped__
+
+ def __int__(self):
+ return int(self.__wrapped__)
+
+ def __long__(self):
+ return long(self.__wrapped__)
+
+ def __float__(self):
+ return float(self.__wrapped__)
+
+ def __complex__(self):
+ return complex(self.__wrapped__)
+
+ def __oct__(self):
+ return oct(self.__wrapped__)
+
+ def __hex__(self):
+ return hex(self.__wrapped__)
+
+ def __index__(self):
+ return operator.index(self.__wrapped__)
+
+ def __len__(self):
+ return len(self.__wrapped__)
+
+ def __contains__(self, value):
+ return value in self.__wrapped__
+
+ def __getitem__(self, key):
+ return self.__wrapped__[key]
+
+ def __setitem__(self, key, value):
+ self.__wrapped__[key] = value
+
+ def __delitem__(self, key):
+ del self.__wrapped__[key]
+
+ def __getslice__(self, i, j):
+ return self.__wrapped__[i:j]
+
+ def __setslice__(self, i, j, value):
+ self.__wrapped__[i:j] = value
+
+ def __delslice__(self, i, j):
+ del self.__wrapped__[i:j]
+
+ def __enter__(self):
+ return self.__wrapped__.__enter__()
+
+ def __exit__(self, *args, **kwargs):
+ return self.__wrapped__.__exit__(*args, **kwargs)
+
+ def __iter__(self):
+ return iter(self.__wrapped__)
+
+ def __copy__(self):
+ raise NotImplementedError('object proxy must define __copy__()')
+
+ def __deepcopy__(self, memo):
+ raise NotImplementedError('object proxy must define __deepcopy__()')
+
+ def __reduce__(self):
+ raise NotImplementedError(
+ 'object proxy must define __reduce_ex__()')
+
+ def __reduce_ex__(self, protocol):
+ raise NotImplementedError(
+ 'object proxy must define __reduce_ex__()')
+
+class CallableObjectProxy(ObjectProxy):
+
+ def __call__(*args, **kwargs):
+ def _unpack_self(self, *args):
+ return self, args
+
+ self, args = _unpack_self(*args)
+
+ return self.__wrapped__(*args, **kwargs)
+
+class PartialCallableObjectProxy(ObjectProxy):
+
+ def __init__(*args, **kwargs):
+ def _unpack_self(self, *args):
+ return self, args
+
+ self, args = _unpack_self(*args)
+
+ if len(args) < 1:
+ raise TypeError('partial type takes at least one argument')
+
+ wrapped, args = args[0], args[1:]
+
+ if not callable(wrapped):
+ raise TypeError('the first argument must be callable')
+
+ super(PartialCallableObjectProxy, self).__init__(wrapped)
+
+ self._self_args = args
+ self._self_kwargs = kwargs
+
+ def __call__(*args, **kwargs):
+ def _unpack_self(self, *args):
+ return self, args
+
+ self, args = _unpack_self(*args)
+
+ _args = self._self_args + args
+
+ _kwargs = dict(self._self_kwargs)
+ _kwargs.update(kwargs)
+
+ return self.__wrapped__(*_args, **_kwargs)
+
+class _FunctionWrapperBase(ObjectProxy):
+
+ __slots__ = ('_self_instance', '_self_wrapper', '_self_enabled',
+ '_self_binding', '_self_parent')
+
+ def __init__(self, wrapped, instance, wrapper, enabled=None,
+ binding='function', parent=None):
+
+ super(_FunctionWrapperBase, self).__init__(wrapped)
+
+ object.__setattr__(self, '_self_instance', instance)
+ object.__setattr__(self, '_self_wrapper', wrapper)
+ object.__setattr__(self, '_self_enabled', enabled)
+ object.__setattr__(self, '_self_binding', binding)
+ object.__setattr__(self, '_self_parent', parent)
+
+ def __get__(self, instance, owner):
+ # This method is actually doing double duty for both unbound and
+ # bound derived wrapper classes. It should possibly be broken up
+ # and the distinct functionality moved into the derived classes.
+ # Can't do that straight away due to some legacy code which is
+ # relying on it being here in this base class.
+ #
+ # The distinguishing attribute which determines whether we are
+ # being called in an unbound or bound wrapper is the parent
+ # attribute. If binding has never occurred, then the parent will
+ # be None.
+ #
+ # First therefore, is if we are called in an unbound wrapper. In
+ # this case we perform the binding.
+ #
+ # We have one special case to worry about here. This is where we
+ # are decorating a nested class. In this case the wrapped class
+ # would not have a __get__() method to call. In that case we
+ # simply return self.
+ #
+ # Note that we otherwise still do binding even if instance is
+ # None and accessing an unbound instance method from a class.
+ # This is because we need to be able to later detect that
+ # specific case as we will need to extract the instance from the
+ # first argument of those passed in.
+
+ if self._self_parent is None:
+ if not inspect.isclass(self.__wrapped__):
+ descriptor = self.__wrapped__.__get__(instance, owner)
+
+ return self.__bound_function_wrapper__(descriptor, instance,
+ self._self_wrapper, self._self_enabled,
+ self._self_binding, self)
+
+ return self
+
+ # Now we have the case of binding occurring a second time on what
+ # was already a bound function. In this case we would usually
+ # return ourselves again. This mirrors what Python does.
+ #
+ # The special case this time is where we were originally bound
+ # with an instance of None and we were likely an instance
+ # method. In that case we rebind against the original wrapped
+ # function from the parent again.
+
+ if self._self_instance is None and self._self_binding == 'function':
+ descriptor = self._self_parent.__wrapped__.__get__(
+ instance, owner)
+
+ return self._self_parent.__bound_function_wrapper__(
+ descriptor, instance, self._self_wrapper,
+ self._self_enabled, self._self_binding,
+ self._self_parent)
+
+ return self
+
+ def __call__(*args, **kwargs):
+ def _unpack_self(self, *args):
+ return self, args
+
+ self, args = _unpack_self(*args)
+
+ # If enabled has been specified, then evaluate it at this point
+ # and if the wrapper is not to be executed, then simply return
+ # the bound function rather than a bound wrapper for the bound
+ # function. When evaluating enabled, if it is callable we call
+ # it, otherwise we evaluate it as a boolean.
+
+ if self._self_enabled is not None:
+ if callable(self._self_enabled):
+ if not self._self_enabled():
+ return self.__wrapped__(*args, **kwargs)
+ elif not self._self_enabled:
+ return self.__wrapped__(*args, **kwargs)
+
+ # This can occur where initial function wrapper was applied to
+ # a function that was already bound to an instance. In that case
+ # we want to extract the instance from the function and use it.
+
+ if self._self_binding in ('function', 'classmethod'):
+ if self._self_instance is None:
+ instance = getattr(self.__wrapped__, '__self__', None)
+ if instance is not None:
+ return self._self_wrapper(self.__wrapped__, instance,
+ args, kwargs)
+
+ # This is generally invoked when the wrapped function is being
+ # called as a normal function and is not bound to a class as an
+ # instance method. This is also invoked in the case where the
+ # wrapped function was a method, but this wrapper was in turn
+ # wrapped using the staticmethod decorator.
+
+ return self._self_wrapper(self.__wrapped__, self._self_instance,
+ args, kwargs)
+
+ def __set_name__(self, owner, name):
+ # This is a special method use to supply information to
+ # descriptors about what the name of variable in a class
+ # definition is. Not wanting to add this to ObjectProxy as not
+ # sure of broader implications of doing that. Thus restrict to
+ # FunctionWrapper used by decorators.
+
+ if hasattr(self.__wrapped__, "__set_name__"):
+ self.__wrapped__.__set_name__(owner, name)
+
+ def __instancecheck__(self, instance):
+ # This is a special method used by isinstance() to make checks
+ # instance of the `__wrapped__`.
+ return isinstance(instance, self.__wrapped__)
+
+ def __subclasscheck__(self, subclass):
+ # This is a special method used by issubclass() to make checks
+ # about inheritance of classes. We need to upwrap any object
+ # proxy. Not wanting to add this to ObjectProxy as not sure of
+ # broader implications of doing that. Thus restrict to
+ # FunctionWrapper used by decorators.
+
+ if hasattr(subclass, "__wrapped__"):
+ return issubclass(subclass.__wrapped__, self.__wrapped__)
+ else:
+ return issubclass(subclass, self.__wrapped__)
+
+class BoundFunctionWrapper(_FunctionWrapperBase):
+
+ def __call__(*args, **kwargs):
+ def _unpack_self(self, *args):
+ return self, args
+
+ self, args = _unpack_self(*args)
+
+ # If enabled has been specified, then evaluate it at this point
+ # and if the wrapper is not to be executed, then simply return
+ # the bound function rather than a bound wrapper for the bound
+ # function. When evaluating enabled, if it is callable we call
+ # it, otherwise we evaluate it as a boolean.
+
+ if self._self_enabled is not None:
+ if callable(self._self_enabled):
+ if not self._self_enabled():
+ return self.__wrapped__(*args, **kwargs)
+ elif not self._self_enabled:
+ return self.__wrapped__(*args, **kwargs)
+
+ # We need to do things different depending on whether we are
+ # likely wrapping an instance method vs a static method or class
+ # method.
+
+ if self._self_binding == 'function':
+ if self._self_instance is None:
+ # This situation can occur where someone is calling the
+ # instancemethod via the class type and passing the instance
+ # as the first argument. We need to shift the args before
+ # making the call to the wrapper and effectively bind the
+ # instance to the wrapped function using a partial so the
+ # wrapper doesn't see anything as being different.
+
+ if not args:
+ raise TypeError('missing 1 required positional argument')
+
+ instance, args = args[0], args[1:]
+ wrapped = PartialCallableObjectProxy(self.__wrapped__, instance)
+ return self._self_wrapper(wrapped, instance, args, kwargs)
+
+ return self._self_wrapper(self.__wrapped__, self._self_instance,
+ args, kwargs)
+
+ else:
+ # As in this case we would be dealing with a classmethod or
+ # staticmethod, then _self_instance will only tell us whether
+ # when calling the classmethod or staticmethod they did it via an
+ # instance of the class it is bound to and not the case where
+ # done by the class type itself. We thus ignore _self_instance
+ # and use the __self__ attribute of the bound function instead.
+ # For a classmethod, this means instance will be the class type
+ # and for a staticmethod it will be None. This is probably the
+ # more useful thing we can pass through even though we loose
+ # knowledge of whether they were called on the instance vs the
+ # class type, as it reflects what they have available in the
+ # decoratored function.
+
+ instance = getattr(self.__wrapped__, '__self__', None)
+
+ return self._self_wrapper(self.__wrapped__, instance, args,
+ kwargs)
+
+class FunctionWrapper(_FunctionWrapperBase):
+
+ __bound_function_wrapper__ = BoundFunctionWrapper
+
+ def __init__(self, wrapped, wrapper, enabled=None):
+ # What it is we are wrapping here could be anything. We need to
+ # try and detect specific cases though. In particular, we need
+ # to detect when we are given something that is a method of a
+ # class. Further, we need to know when it is likely an instance
+ # method, as opposed to a class or static method. This can
+ # become problematic though as there isn't strictly a fool proof
+ # method of knowing.
+ #
+ # The situations we could encounter when wrapping a method are:
+ #
+ # 1. The wrapper is being applied as part of a decorator which
+ # is a part of the class definition. In this case what we are
+ # given is the raw unbound function, classmethod or staticmethod
+ # wrapper objects.
+ #
+ # The problem here is that we will not know we are being applied
+ # in the context of the class being set up. This becomes
+ # important later for the case of an instance method, because in
+ # that case we just see it as a raw function and can't
+ # distinguish it from wrapping a normal function outside of
+ # a class context.
+ #
+ # 2. The wrapper is being applied when performing monkey
+ # patching of the class type afterwards and the method to be
+ # wrapped was retrieved direct from the __dict__ of the class
+ # type. This is effectively the same as (1) above.
+ #
+ # 3. The wrapper is being applied when performing monkey
+ # patching of the class type afterwards and the method to be
+ # wrapped was retrieved from the class type. In this case
+ # binding will have been performed where the instance against
+ # which the method is bound will be None at that point.
+ #
+ # This case is a problem because we can no longer tell if the
+ # method was a static method, plus if using Python3, we cannot
+ # tell if it was an instance method as the concept of an
+ # unnbound method no longer exists.
+ #
+ # 4. The wrapper is being applied when performing monkey
+ # patching of an instance of a class. In this case binding will
+ # have been perfomed where the instance was not None.
+ #
+ # This case is a problem because we can no longer tell if the
+ # method was a static method.
+ #
+ # Overall, the best we can do is look at the original type of the
+ # object which was wrapped prior to any binding being done and
+ # see if it is an instance of classmethod or staticmethod. In
+ # the case where other decorators are between us and them, if
+ # they do not propagate the __class__ attribute so that the
+ # isinstance() checks works, then likely this will do the wrong
+ # thing where classmethod and staticmethod are used.
+ #
+ # Since it is likely to be very rare that anyone even puts
+ # decorators around classmethod and staticmethod, likelihood of
+ # that being an issue is very small, so we accept it and suggest
+ # that those other decorators be fixed. It is also only an issue
+ # if a decorator wants to actually do things with the arguments.
+ #
+ # As to not being able to identify static methods properly, we
+ # just hope that that isn't something people are going to want
+ # to wrap, or if they do suggest they do it the correct way by
+ # ensuring that it is decorated in the class definition itself,
+ # or patch it in the __dict__ of the class type.
+ #
+ # So to get the best outcome we can, whenever we aren't sure what
+ # it is, we label it as a 'function'. If it was already bound and
+ # that is rebound later, we assume that it will be an instance
+ # method and try an cope with the possibility that the 'self'
+ # argument it being passed as an explicit argument and shuffle
+ # the arguments around to extract 'self' for use as the instance.
+
+ if isinstance(wrapped, classmethod):
+ binding = 'classmethod'
+
+ elif isinstance(wrapped, staticmethod):
+ binding = 'staticmethod'
+
+ elif hasattr(wrapped, '__self__'):
+ if inspect.isclass(wrapped.__self__):
+ binding = 'classmethod'
+ else:
+ binding = 'function'
+
+ else:
+ binding = 'function'
+
+ super(FunctionWrapper, self).__init__(wrapped, None, wrapper,
+ enabled, binding)
+
+# disabled support for native extension; we likely don't need it
+# try:
+# if not os.environ.get('WRAPT_DISABLE_EXTENSIONS'):
+# from ._wrappers import (ObjectProxy, CallableObjectProxy,
+# PartialCallableObjectProxy, FunctionWrapper,
+# BoundFunctionWrapper, _FunctionWrapperBase)
+# except ImportError:
+# pass
+
+# Helper functions for applying wrappers to existing functions.
+
+def resolve_path(module, name):
+ if isinstance(module, string_types):
+ __import__(module)
+ module = sys.modules[module]
+
+ parent = module
+
+ path = name.split('.')
+ attribute = path[0]
+
+ # We can't just always use getattr() because in doing
+ # that on a class it will cause binding to occur which
+ # will complicate things later and cause some things not
+ # to work. For the case of a class we therefore access
+ # the __dict__ directly. To cope though with the wrong
+ # class being given to us, or a method being moved into
+ # a base class, we need to walk the class hierarchy to
+ # work out exactly which __dict__ the method was defined
+ # in, as accessing it from __dict__ will fail if it was
+ # not actually on the class given. Fallback to using
+ # getattr() if we can't find it. If it truly doesn't
+ # exist, then that will fail.
+
+ def lookup_attribute(parent, attribute):
+ if inspect.isclass(parent):
+ for cls in inspect.getmro(parent):
+ if attribute in vars(cls):
+ return vars(cls)[attribute]
+ else:
+ return getattr(parent, attribute)
+ else:
+ return getattr(parent, attribute)
+
+ original = lookup_attribute(parent, attribute)
+
+ for attribute in path[1:]:
+ parent = original
+ original = lookup_attribute(parent, attribute)
+
+ return (parent, attribute, original)
+
+def apply_patch(parent, attribute, replacement):
+ setattr(parent, attribute, replacement)
+
+def wrap_object(module, name, factory, args=(), kwargs={}):
+ (parent, attribute, original) = resolve_path(module, name)
+ wrapper = factory(original, *args, **kwargs)
+ apply_patch(parent, attribute, wrapper)
+ return wrapper
+
+# Function for applying a proxy object to an attribute of a class
+# instance. The wrapper works by defining an attribute of the same name
+# on the class which is a descriptor and which intercepts access to the
+# instance attribute. Note that this cannot be used on attributes which
+# are themselves defined by a property object.
+
+class AttributeWrapper(object):
+
+ def __init__(self, attribute, factory, args, kwargs):
+ self.attribute = attribute
+ self.factory = factory
+ self.args = args
+ self.kwargs = kwargs
+
+ def __get__(self, instance, owner):
+ value = instance.__dict__[self.attribute]
+ return self.factory(value, *self.args, **self.kwargs)
+
+ def __set__(self, instance, value):
+ instance.__dict__[self.attribute] = value
+
+ def __delete__(self, instance):
+ del instance.__dict__[self.attribute]
+
+def wrap_object_attribute(module, name, factory, args=(), kwargs={}):
+ path, attribute = name.rsplit('.', 1)
+ parent = resolve_path(module, path)[2]
+ wrapper = AttributeWrapper(attribute, factory, args, kwargs)
+ apply_patch(parent, attribute, wrapper)
+ return wrapper
+
+# Functions for creating a simple decorator using a FunctionWrapper,
+# plus short cut functions for applying wrappers to functions. These are
+# for use when doing monkey patching. For a more featured way of
+# creating decorators see the decorator decorator instead.
+
+def function_wrapper(wrapper):
+ def _wrapper(wrapped, instance, args, kwargs):
+ target_wrapped = args[0]
+ if instance is None:
+ target_wrapper = wrapper
+ elif inspect.isclass(instance):
+ target_wrapper = wrapper.__get__(None, instance)
+ else:
+ target_wrapper = wrapper.__get__(instance, type(instance))
+ return FunctionWrapper(target_wrapped, target_wrapper)
+ return FunctionWrapper(wrapper, _wrapper)
+
+def wrap_function_wrapper(module, name, wrapper):
+ return wrap_object(module, name, FunctionWrapper, (wrapper,))
+
+def patch_function_wrapper(module, name):
+ def _wrapper(wrapper):
+ return wrap_object(module, name, FunctionWrapper, (wrapper,))
+ return _wrapper
+
+def transient_function_wrapper(module, name):
+ def _decorator(wrapper):
+ def _wrapper(wrapped, instance, args, kwargs):
+ target_wrapped = args[0]
+ if instance is None:
+ target_wrapper = wrapper
+ elif inspect.isclass(instance):
+ target_wrapper = wrapper.__get__(None, instance)
+ else:
+ target_wrapper = wrapper.__get__(instance, type(instance))
+ def _execute(wrapped, instance, args, kwargs):
+ (parent, attribute, original) = resolve_path(module, name)
+ replacement = FunctionWrapper(original, target_wrapper)
+ setattr(parent, attribute, replacement)
+ try:
+ return wrapped(*args, **kwargs)
+ finally:
+ setattr(parent, attribute, original)
+ return FunctionWrapper(target_wrapped, _execute)
+ return FunctionWrapper(wrapper, _wrapper)
+ return _decorator
+
+# A weak function proxy. This will work on instance methods, class
+# methods, static methods and regular functions. Special treatment is
+# needed for the method types because the bound method is effectively a
+# transient object and applying a weak reference to one will immediately
+# result in it being destroyed and the weakref callback called. The weak
+# reference is therefore applied to the instance the method is bound to
+# and the original function. The function is then rebound at the point
+# of a call via the weak function proxy.
+
+def _weak_function_proxy_callback(ref, proxy, callback):
+ if proxy._self_expired:
+ return
+
+ proxy._self_expired = True
+
+ # This could raise an exception. We let it propagate back and let
+ # the weakref.proxy() deal with it, at which point it generally
+ # prints out a short error message direct to stderr and keeps going.
+
+ if callback is not None:
+ callback(proxy)
+
+class WeakFunctionProxy(ObjectProxy):
+
+ __slots__ = ('_self_expired', '_self_instance')
+
+ def __init__(self, wrapped, callback=None):
+ # We need to determine if the wrapped function is actually a
+ # bound method. In the case of a bound method, we need to keep a
+ # reference to the original unbound function and the instance.
+ # This is necessary because if we hold a reference to the bound
+ # function, it will be the only reference and given it is a
+ # temporary object, it will almost immediately expire and
+ # the weakref callback triggered. So what is done is that we
+ # hold a reference to the instance and unbound function and
+ # when called bind the function to the instance once again and
+ # then call it. Note that we avoid using a nested function for
+ # the callback here so as not to cause any odd reference cycles.
+
+ _callback = callback and functools.partial(
+ _weak_function_proxy_callback, proxy=self,
+ callback=callback)
+
+ self._self_expired = False
+
+ if isinstance(wrapped, _FunctionWrapperBase):
+ self._self_instance = weakref.ref(wrapped._self_instance,
+ _callback)
+
+ if wrapped._self_parent is not None:
+ super(WeakFunctionProxy, self).__init__(
+ weakref.proxy(wrapped._self_parent, _callback))
+
+ else:
+ super(WeakFunctionProxy, self).__init__(
+ weakref.proxy(wrapped, _callback))
+
+ return
+
+ try:
+ self._self_instance = weakref.ref(wrapped.__self__, _callback)
+
+ super(WeakFunctionProxy, self).__init__(
+ weakref.proxy(wrapped.__func__, _callback))
+
+ except AttributeError:
+ self._self_instance = None
+
+ super(WeakFunctionProxy, self).__init__(
+ weakref.proxy(wrapped, _callback))
+
+ def __call__(*args, **kwargs):
+ def _unpack_self(self, *args):
+ return self, args
+
+ self, args = _unpack_self(*args)
+
+ # We perform a boolean check here on the instance and wrapped
+ # function as that will trigger the reference error prior to
+ # calling if the reference had expired.
+
+ instance = self._self_instance and self._self_instance()
+ function = self.__wrapped__ and self.__wrapped__
+
+ # If the wrapped function was originally a bound function, for
+ # which we retained a reference to the instance and the unbound
+ # function we need to rebind the function and then call it. If
+ # not just called the wrapped function.
+
+ if instance is None:
+ return self.__wrapped__(*args, **kwargs)
+
+ return function.__get__(instance, type(instance))(*args, **kwargs)
\ No newline at end of file
diff --git a/lib/ansible/_internal/_yaml/__init__.py b/lib/ansible/_internal/_yaml/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/_internal/_yaml/_constructor.py b/lib/ansible/_internal/_yaml/_constructor.py
new file mode 100644
index 00000000000..ad6f7ba23d3
--- /dev/null
+++ b/lib/ansible/_internal/_yaml/_constructor.py
@@ -0,0 +1,240 @@
+from __future__ import annotations
+
+import abc
+import copy
+import typing as t
+
+from yaml import Node, ScalarNode
+from yaml.constructor import SafeConstructor
+from yaml.resolver import BaseResolver
+
+from ansible import constants as C
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils._internal._datatag import AnsibleTagHelper, AnsibleDatatagBase
+from ansible._internal._datatag._tags import Origin, TrustedAsTemplate
+from ansible.parsing.vault import EncryptedString
+from ansible.utils.display import Display
+
+from ._errors import AnsibleConstructorError
+
+display = Display()
+
+_TRUSTED_AS_TEMPLATE: t.Final[TrustedAsTemplate] = TrustedAsTemplate()
+
+
+class _BaseConstructor(SafeConstructor, metaclass=abc.ABCMeta):
+ """Base class for Ansible YAML constructors."""
+
+ @classmethod
+ @abc.abstractmethod
+ def _register_constructors(cls) -> None:
+ """Method used to register constructors to derived types during class initialization."""
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ """Initialization for derived types."""
+ cls._register_constructors()
+
+
+class AnsibleInstrumentedConstructor(_BaseConstructor):
+ """Ansible constructor which supports Ansible custom behavior such as `Origin` tagging, but no Ansible-specific YAML tags."""
+
+ name: t.Any # provided by the YAML parser, which retrieves it from the stream
+
+ def __init__(self, origin: Origin, trusted_as_template: bool) -> None:
+ if not origin.line_num:
+ origin = origin.replace(line_num=1)
+
+ self._origin = origin
+ self._trusted_as_template = trusted_as_template
+ self._duplicate_key_mode = C.config.get_config_value('DUPLICATE_YAML_DICT_KEY')
+
+ super().__init__()
+
+ @property
+ def trusted_as_template(self) -> bool:
+ return self._trusted_as_template
+
+ def construct_yaml_map(self, node):
+ data = self._node_position_info(node).tag({}) # always an ordered dictionary on py3.7+
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_mapping(self, node, deep=False):
+ # Delegate to built-in implementation to construct the mapping.
+ # This is done before checking for duplicates to leverage existing error checking on the input node.
+ mapping = super().construct_mapping(node, deep)
+ keys = set()
+
+ # Now that the node is known to be a valid mapping, handle any duplicate keys.
+ for key_node, _value_node in node.value:
+ if (key := self.construct_object(key_node, deep=deep)) in keys:
+ msg = f'Found duplicate mapping key {key!r}.'
+
+ if self._duplicate_key_mode == 'error':
+ raise AnsibleConstructorError(problem=msg, problem_mark=key_node.start_mark)
+
+ if self._duplicate_key_mode == 'warn':
+ display.warning(msg=msg, obj=key, help_text='Using last defined value only.')
+
+ keys.add(key)
+
+ return mapping
+
+ def construct_yaml_int(self, node):
+ value = super().construct_yaml_int(node)
+ return self._node_position_info(node).tag(value)
+
+ def construct_yaml_float(self, node):
+ value = super().construct_yaml_float(node)
+ return self._node_position_info(node).tag(value)
+
+ def construct_yaml_timestamp(self, node):
+ value = super().construct_yaml_timestamp(node)
+ return self._node_position_info(node).tag(value)
+
+ def construct_yaml_omap(self, node):
+ origin = self._node_position_info(node)
+ display.deprecated(
+ msg='Use of the YAML `!!omap` tag is deprecated.',
+ version='2.23',
+ obj=origin,
+ help_text='Use a standard mapping instead, as key order is always preserved.',
+ )
+ items = list(super().construct_yaml_omap(node))[0]
+ items = [origin.tag(item) for item in items]
+ yield origin.tag(items)
+
+ def construct_yaml_pairs(self, node):
+ origin = self._node_position_info(node)
+ display.deprecated(
+ msg='Use of the YAML `!!pairs` tag is deprecated.',
+ version='2.23',
+ obj=origin,
+ help_text='Use a standard mapping instead.',
+ )
+ items = list(super().construct_yaml_pairs(node))[0]
+ items = [origin.tag(item) for item in items]
+ yield origin.tag(items)
+
+ def construct_yaml_str(self, node: ScalarNode) -> str:
+ # Override the default string handling function
+ # to always return unicode objects
+ # DTFIX-FUTURE: is this to_text conversion still necessary under Py3?
+ value = to_text(self.construct_scalar(node))
+
+ tags: list[AnsibleDatatagBase] = [self._node_position_info(node)]
+
+ if self.trusted_as_template:
+ # NB: since we're not context aware, this will happily add trust to dictionary keys; this is actually necessary for
+ # certain backward compat scenarios, though might be accomplished in other ways if we wanted to avoid trusting keys in
+ # the general scenario
+ tags.append(_TRUSTED_AS_TEMPLATE)
+
+ return AnsibleTagHelper.tag(value, tags)
+
+ def construct_yaml_binary(self, node):
+ value = super().construct_yaml_binary(node)
+
+ return AnsibleTagHelper.tag(value, self._node_position_info(node))
+
+ def construct_yaml_set(self, node):
+ data = AnsibleTagHelper.tag(set(), self._node_position_info(node))
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_seq(self, node):
+ data = self._node_position_info(node).tag([])
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def _resolve_and_construct_object(self, node):
+ # use a copied node to avoid mutating existing node and tripping the recursion check in construct_object
+ copied_node = copy.copy(node)
+ # repeat implicit resolution process to determine the proper tag for the value in the unsafe node
+ copied_node.tag = t.cast(BaseResolver, self).resolve(type(node), node.value, (True, False))
+
+ # re-entrant call using the correct tag
+ # non-deferred construction of hierarchical nodes so the result is a fully realized object, and so our stateful unsafe propagation behavior works
+ return self.construct_object(copied_node, deep=True)
+
+ def _node_position_info(self, node) -> Origin:
+ # the line number where the previous token has ended (plus empty lines)
+ # Add one so that the first line is line 1 rather than line 0
+ return self._origin.replace(line_num=node.start_mark.line + self._origin.line_num, col_num=node.start_mark.column + 1)
+
+ @classmethod
+ def _register_constructors(cls) -> None:
+ constructors: dict[str, t.Callable] = {
+ 'tag:yaml.org,2002:binary': cls.construct_yaml_binary,
+ 'tag:yaml.org,2002:float': cls.construct_yaml_float,
+ 'tag:yaml.org,2002:int': cls.construct_yaml_int,
+ 'tag:yaml.org,2002:map': cls.construct_yaml_map,
+ 'tag:yaml.org,2002:omap': cls.construct_yaml_omap,
+ 'tag:yaml.org,2002:pairs': cls.construct_yaml_pairs,
+ 'tag:yaml.org,2002:python/dict': cls.construct_yaml_map,
+ 'tag:yaml.org,2002:python/unicode': cls.construct_yaml_str,
+ 'tag:yaml.org,2002:seq': cls.construct_yaml_seq,
+ 'tag:yaml.org,2002:set': cls.construct_yaml_set,
+ 'tag:yaml.org,2002:str': cls.construct_yaml_str,
+ 'tag:yaml.org,2002:timestamp': cls.construct_yaml_timestamp,
+ }
+
+ for tag, constructor in constructors.items():
+ cls.add_constructor(tag, constructor)
+
+
+class AnsibleConstructor(AnsibleInstrumentedConstructor):
+ """Ansible constructor which supports Ansible custom behavior such as `Origin` tagging, as well as Ansible-specific YAML tags."""
+
+ def __init__(self, origin: Origin, trusted_as_template: bool) -> None:
+ self._unsafe_depth = 0 # volatile state var used during recursive construction of a value tagged unsafe
+
+ super().__init__(origin=origin, trusted_as_template=trusted_as_template)
+
+ @property
+ def trusted_as_template(self) -> bool:
+ return self._trusted_as_template and not self._unsafe_depth
+
+ def construct_yaml_unsafe(self, node):
+ self._unsafe_depth += 1
+
+ try:
+ return self._resolve_and_construct_object(node)
+ finally:
+ self._unsafe_depth -= 1
+
+ def construct_yaml_vault(self, node: Node) -> EncryptedString:
+ ciphertext = self._resolve_and_construct_object(node)
+
+ if not isinstance(ciphertext, str):
+ raise AnsibleConstructorError(problem=f"the {node.tag!r} tag requires a string value", problem_mark=node.start_mark)
+
+ encrypted_string = AnsibleTagHelper.tag_copy(ciphertext, EncryptedString(ciphertext=AnsibleTagHelper.untag(ciphertext)))
+
+ return encrypted_string
+
+ def construct_yaml_vault_encrypted(self, node: Node) -> EncryptedString:
+ origin = self._node_position_info(node)
+ display.deprecated(
+ msg='Use of the YAML `!vault-encrypted` tag is deprecated.',
+ version='2.23',
+ obj=origin,
+ help_text='Use the `!vault` tag instead.',
+ )
+
+ return self.construct_yaml_vault(node)
+
+ @classmethod
+ def _register_constructors(cls) -> None:
+ super()._register_constructors()
+
+ constructors: dict[str, t.Callable] = {
+ '!unsafe': cls.construct_yaml_unsafe,
+ '!vault': cls.construct_yaml_vault,
+ '!vault-encrypted': cls.construct_yaml_vault_encrypted,
+ }
+
+ for tag, constructor in constructors.items():
+ cls.add_constructor(tag, constructor)
diff --git a/lib/ansible/_internal/_yaml/_dumper.py b/lib/ansible/_internal/_yaml/_dumper.py
new file mode 100644
index 00000000000..44708f6f524
--- /dev/null
+++ b/lib/ansible/_internal/_yaml/_dumper.py
@@ -0,0 +1,70 @@
+from __future__ import annotations
+
+import abc
+import collections.abc as c
+import typing as t
+
+from yaml.nodes import ScalarNode, Node
+
+from ansible._internal._templating import _jinja_common
+from ansible.module_utils import _internal
+from ansible.module_utils._internal._datatag import AnsibleTaggedObject, Tripwire, AnsibleTagHelper
+from ansible.parsing.vault import VaultHelper
+from ansible.module_utils.common.yaml import HAS_LIBYAML
+
+if HAS_LIBYAML:
+ from yaml.cyaml import CSafeDumper as SafeDumper
+else:
+ from yaml import SafeDumper # type: ignore[assignment]
+
+
+class _BaseDumper(SafeDumper, metaclass=abc.ABCMeta):
+ """Base class for Ansible YAML dumpers."""
+
+ @classmethod
+ @abc.abstractmethod
+ def _register_representers(cls) -> None:
+ """Method used to register representers to derived types during class initialization."""
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ """Initialization for derived types."""
+ cls._register_representers()
+
+
+class AnsibleDumper(_BaseDumper):
+ """A simple stub class that allows us to add representers for our custom types."""
+
+ @classmethod
+ def _register_representers(cls) -> None:
+ cls.add_multi_representer(AnsibleTaggedObject, cls.represent_ansible_tagged_object)
+ cls.add_multi_representer(Tripwire, cls.represent_tripwire)
+ cls.add_multi_representer(c.Mapping, cls.represent_dict)
+ cls.add_multi_representer(c.Collection, cls.represent_list)
+ cls.add_multi_representer(_jinja_common.VaultExceptionMarker, cls.represent_vault_exception_marker)
+
+ def get_node_from_ciphertext(self, data: object) -> ScalarNode | None:
+ if ciphertext := VaultHelper.get_ciphertext(data, with_tags=False):
+ return self.represent_scalar('!vault', ciphertext, style='|')
+
+ return None
+
+ def represent_vault_exception_marker(self, data: _jinja_common.VaultExceptionMarker) -> ScalarNode:
+ if node := self.get_node_from_ciphertext(data):
+ return node
+
+ data.trip()
+
+ def represent_ansible_tagged_object(self, data: AnsibleTaggedObject) -> Node:
+ if _internal.is_intermediate_mapping(data):
+ return self.represent_dict(data)
+
+ if _internal.is_intermediate_iterable(data):
+ return self.represent_list(data)
+
+ if node := self.get_node_from_ciphertext(data):
+ return node
+
+ return self.represent_data(AnsibleTagHelper.as_native_type(data)) # automatically decrypts encrypted strings
+
+ def represent_tripwire(self, data: Tripwire) -> t.NoReturn:
+ data.trip()
diff --git a/lib/ansible/_internal/_yaml/_errors.py b/lib/ansible/_internal/_yaml/_errors.py
new file mode 100644
index 00000000000..0464a928d7b
--- /dev/null
+++ b/lib/ansible/_internal/_yaml/_errors.py
@@ -0,0 +1,166 @@
+from __future__ import annotations
+
+import re
+
+import typing as t
+
+from yaml import MarkedYAMLError
+from yaml.constructor import ConstructorError
+
+from ansible._internal._errors import _error_utils
+from ansible.errors import AnsibleParserError
+from ansible._internal._datatag._tags import Origin
+
+
+class AnsibleConstructorError(ConstructorError):
+ """Ansible-specific ConstructorError used to bypass exception analysis during wrapping in AnsibleYAMLParserError."""
+
+
+class AnsibleYAMLParserError(AnsibleParserError):
+ """YAML-specific parsing failure wrapping an exception raised by the YAML parser."""
+
+ _default_message = 'YAML parsing failed.'
+
+ _include_cause_message = False # hide the underlying cause message, it's included by `handle_exception` as needed
+
+ _formatted_source_context_value: str | None = None
+
+ @property
+ def _formatted_source_context(self) -> str | None:
+ return self._formatted_source_context_value
+
+ @classmethod
+ def handle_exception(cls, exception: Exception, origin: Origin) -> t.NoReturn:
+ if isinstance(exception, MarkedYAMLError):
+ origin = origin.replace(line_num=exception.problem_mark.line + 1, col_num=exception.problem_mark.column + 1)
+
+ source_context = _error_utils.SourceContext.from_origin(origin)
+
+ target_line = source_context.target_line or '' # for these cases, we don't need to distinguish between None and empty string
+
+ message: str | None = None
+ help_text = None
+
+ # FIXME: Do all this by walking the parsed YAML doc stream. Using regexes is a dead-end; YAML's just too flexible to not have a
+ # raft of false-positives and corner cases. If we directly consume either the YAML parse stream or override the YAML composer, we can
+ # better catch these things without worrying about duplicating YAML's scalar parsing logic around quoting/escaping. At first, we can
+ # replace the regex logic below with tiny special-purpose parse consumers to catch specific issues, but ideally, we could do a lot of this
+ # inline with the actual doc parse, since our rules are a lot more strict than YAML's (eg, no support for non-scalar keys), and a lot of the
+ # problem cases where that comes into play are around expression quoting and Jinja {{ syntax looking like weird YAML values we don't support.
+ # Some common examples, where -> is "what YAML actually sees":
+ # foo: {{ bar }} -> {"foo": {{"bar": None}: None}} - a mapping with a mapping as its key (legal YAML, but not legal Python/Ansible)
+ #
+ # - copy: src=foo.txt # kv syntax (kv could be on following line(s), too- implicit multi-line block scalar)
+ # dest: bar.txt # orphaned mapping, since the value of `copy` is the scalar "src=foo.txt"
+ #
+ # - msg == "Error: 'dude' was not found" # unquoted scalar has a : in it -> {'msg == "Error"': 'dude'} [ was not found" ] is garbage orphan scalar
+
+ # noinspection PyUnboundLocalVariable
+ if not isinstance(exception, MarkedYAMLError):
+ pass # unexpected exception, don't use special analysis of exception
+
+ elif isinstance(exception, AnsibleConstructorError):
+ pass # raised internally by ansible code, don't use special analysis of exception
+
+ # Check for tabs.
+ # There may be cases where there is a valid tab in a line that has other errors.
+ # That's OK, users should "fix" their tab usage anyway -- at which point later error handling logic will hopefully find the real issue.
+ elif (tab_idx := target_line.find('\t')) >= 0:
+ source_context = _error_utils.SourceContext.from_origin(origin.replace(col_num=tab_idx + 1))
+ message = "Tabs are usually invalid in YAML."
+
+ # Check for unquoted templates.
+ elif match := re.search(r'^\s*(?:-\s+)*(?:[\w\s]+:\s+)?(?P\{\{.*}})', target_line):
+ source_context = _error_utils.SourceContext.from_origin(origin.replace(col_num=match.start('value') + 1))
+ message = 'This may be an issue with missing quotes around a template block.'
+ # FIXME: Use the captured value to show the actual fix required.
+ help_text = """
+For example:
+
+ raw: {{ some_var }}
+
+Should be:
+
+ raw: "{{ some_var }}"
+"""
+
+ # Check for common unquoted colon mistakes.
+ elif (
+ # ignore lines starting with only whitespace and a colon
+ not target_line.lstrip().startswith(':')
+ # find the value after list/dict preamble
+ and (value_match := re.search(r'^\s*(?:-\s+)*(?:[\w\s\[\]{}]+:\s+)?(?P.*)$', target_line))
+ # ignore properly quoted values
+ and (target_fragment := _replace_quoted_value(value_match.group('value')))
+ # look for an unquoted colon in the value
+ and (colon_match := re.search(r':($| )', target_fragment))
+ ):
+ source_context = _error_utils.SourceContext.from_origin(origin.replace(col_num=value_match.start('value') + colon_match.start() + 1))
+ message = 'Colons in unquoted values must be followed by a non-space character.'
+ # FIXME: Use the captured value to show the actual fix required.
+ help_text = """
+For example:
+
+ raw: echo 'name: ansible'
+
+Should be:
+
+ raw: "echo 'name: ansible'"
+"""
+
+ # Check for common quoting mistakes.
+ elif match := re.search(r'^\s*(?:-\s+)*(?:[\w\s]+:\s+)?(?P[\"\'].*?\s*)$', target_line):
+ suspected_value = match.group('value')
+ first, last = suspected_value[0], suspected_value[-1]
+
+ if first != last: # "foo" in bar
+ source_context = _error_utils.SourceContext.from_origin(origin.replace(col_num=match.start('value') + 1))
+ message = 'Values starting with a quote must end with the same quote.'
+ # FIXME: Use the captured value to show the actual fix required, and use that same logic to improve the origin further.
+ help_text = """
+For example:
+
+ raw: "foo" in bar
+
+Should be:
+
+ raw: '"foo" in bar'
+"""
+ elif first == last and target_line.count(first) > 2: # "foo" and "bar"
+ source_context = _error_utils.SourceContext.from_origin(origin.replace(col_num=match.start('value') + 1))
+ message = 'Values starting with a quote must end with the same quote, and not contain that quote.'
+ # FIXME: Use the captured value to show the actual fix required, and use that same logic to improve the origin further.
+ help_text = """
+For example:
+
+ raw: "foo" in "bar"
+
+Should be:
+
+ raw: '"foo" in "bar"'
+"""
+
+ if not message:
+ if isinstance(exception, MarkedYAMLError):
+ # marked YAML error, pull out the useful messages while omitting the noise
+ message = ' '.join(filter(None, (exception.context, exception.problem, exception.note)))
+ message = message.strip()
+ message = f'{message[0].upper()}{message[1:]}'
+
+ if not message.endswith('.'):
+ message += '.'
+ else:
+ # unexpected error, use the exception message (normally hidden by overriding include_cause_message)
+ message = str(exception)
+
+ message = re.sub(r'\s+', ' ', message).strip()
+
+ error = cls(message, obj=source_context.origin)
+ error._formatted_source_context_value = str(source_context)
+ error._help_text = help_text
+
+ raise error from exception
+
+
+def _replace_quoted_value(value: str, replacement='.') -> str:
+ return re.sub(r"""^\s*('[^']*'|"[^"]*")\s*$""", lambda match: replacement * len(match.group(0)), value)
diff --git a/lib/ansible/_internal/_yaml/_loader.py b/lib/ansible/_internal/_yaml/_loader.py
new file mode 100644
index 00000000000..fa14006c0f8
--- /dev/null
+++ b/lib/ansible/_internal/_yaml/_loader.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+import io as _io
+
+from yaml.resolver import Resolver
+
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
+from ansible.module_utils.common.yaml import HAS_LIBYAML
+from ansible._internal._datatag import _tags
+
+from ._constructor import AnsibleConstructor, AnsibleInstrumentedConstructor
+
+if HAS_LIBYAML:
+ from yaml.cyaml import CParser
+
+ class _YamlParser(CParser):
+ def __init__(self, stream: str | bytes | _io.IOBase) -> None:
+ if isinstance(stream, (str, bytes)):
+ stream = AnsibleTagHelper.untag(stream) # PyYAML + libyaml barfs on str/bytes subclasses
+
+ CParser.__init__(self, stream)
+
+ self.name = getattr(stream, 'name', None) # provide feature parity with the Python implementation (yaml.reader.Reader provides name)
+
+else:
+ from yaml.composer import Composer
+ from yaml.reader import Reader
+ from yaml.scanner import Scanner
+ from yaml.parser import Parser
+
+ class _YamlParser(Reader, Scanner, Parser, Composer): # type: ignore[no-redef]
+ def __init__(self, stream: str | bytes | _io.IOBase) -> None:
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+
+
+class AnsibleInstrumentedLoader(_YamlParser, AnsibleInstrumentedConstructor, Resolver):
+ """Ansible YAML loader which supports Ansible custom behavior such as `Origin` tagging, but no Ansible-specific YAML tags."""
+
+ def __init__(self, stream: str | bytes | _io.IOBase) -> None:
+ _YamlParser.__init__(self, stream)
+
+ AnsibleInstrumentedConstructor.__init__(
+ self,
+ origin=_tags.Origin.get_or_create_tag(stream, self.name),
+ trusted_as_template=_tags.TrustedAsTemplate.is_tagged_on(stream),
+ )
+
+ Resolver.__init__(self)
+
+
+class AnsibleLoader(_YamlParser, AnsibleConstructor, Resolver):
+ """Ansible loader which supports Ansible custom behavior such as `Origin` tagging, as well as Ansible-specific YAML tags."""
+
+ def __init__(self, stream: str | bytes | _io.IOBase) -> None:
+ _YamlParser.__init__(self, stream)
+
+ AnsibleConstructor.__init__(
+ self,
+ origin=_tags.Origin.get_or_create_tag(stream, self.name),
+ trusted_as_template=_tags.TrustedAsTemplate.is_tagged_on(stream),
+ )
+
+ Resolver.__init__(self)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/README.md b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/README.md
new file mode 100644
index 00000000000..9ec03246d23
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/README.md
@@ -0,0 +1,11 @@
+"Protomatter - an unstable substance which every ethical scientist in the galaxy has denounced as dangerously unpredictable."
+
+"But it was the only way to solve certain problems..."
+
+This Ansible Collection is embedded within ansible-core.
+It contains plugins useful for ansible-core's own integration tests.
+They have been made available, completely unsupported,
+in case they prove useful for debugging and troubleshooting purposes.
+
+> CAUTION: This collection is not supported, and may be changed or removed in any version without prior notice.
+Use of these plugins outside ansible-core is highly discouraged.
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/action/debug.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/action/debug.py
new file mode 100644
index 00000000000..60d7c64ec9c
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/action/debug.py
@@ -0,0 +1,36 @@
+from __future__ import annotations
+
+import typing as t
+
+from ansible.module_utils.common.validation import _check_type_str_no_conversion, _check_type_list_strict
+from ansible.plugins.action import ActionBase
+from ansible._internal._templating._engine import TemplateEngine
+from ansible._internal._templating._marker_behaviors import ReplacingMarkerBehavior
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _requires_connection = False
+
+ @classmethod
+ def finalize_task_arg(cls, name: str, value: t.Any, templar: TemplateEngine, context: t.Any) -> t.Any:
+ if name == 'expression':
+ return value
+
+ return super().finalize_task_arg(name, value, templar, context)
+
+ def run(self, tmp=None, task_vars=None):
+ # accepts a list of literal expressions (no templating), evaluates with no failure on undefined, returns all results
+ _vr, args = self.validate_argument_spec(
+ argument_spec=dict(
+ expression=dict(type=_check_type_list_strict, elements=_check_type_str_no_conversion, required=True),
+ ),
+ )
+
+ with ReplacingMarkerBehavior.warning_context() as replacing_behavior:
+ templar = self._templar._engine.extend(marker_behavior=replacing_behavior)
+
+ return dict(
+ _ansible_verbose_always=True,
+ expression_result=[templar.evaluate_expression(expression) for expression in args['expression']],
+ )
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/apply_trust.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/apply_trust.py
new file mode 100644
index 00000000000..22f8aa43c94
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/apply_trust.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+import typing as t
+
+from ansible._internal._datatag._tags import TrustedAsTemplate
+
+
+def apply_trust(value: object) -> object:
+ """
+ Filter that returns a tagged copy of the input string with TrustedAsTemplate.
+ Containers and other non-string values are returned unmodified.
+ """
+ return TrustedAsTemplate().tag(value) if isinstance(value, str) else value
+
+
+class FilterModule:
+ @staticmethod
+ def filters() -> dict[str, t.Callable]:
+ return dict(apply_trust=apply_trust)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/dump_object.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/dump_object.py
new file mode 100644
index 00000000000..fc210a559df
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/dump_object.py
@@ -0,0 +1,27 @@
+from __future__ import annotations
+
+import dataclasses
+import typing as t
+
+from ansible.template import accept_args_markers
+from ansible._internal._templating._jinja_common import ExceptionMarker
+
+
+@accept_args_markers
+def dump_object(value: t.Any) -> object:
+ """Internal filter to convert objects not supported by JSON to types which are."""
+ if dataclasses.is_dataclass(value):
+ return dataclasses.asdict(value) # type: ignore[arg-type]
+
+ if isinstance(value, ExceptionMarker):
+ return dict(
+ exception=value._as_exception(),
+ )
+
+ return value
+
+
+class FilterModule(object):
+ @staticmethod
+ def filters() -> dict[str, t.Callable]:
+ return dict(dump_object=dump_object)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/finalize.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/finalize.py
new file mode 100644
index 00000000000..88f847fb9c8
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/finalize.py
@@ -0,0 +1,16 @@
+from __future__ import annotations
+
+import typing as t
+
+from ansible._internal._templating._engine import _finalize_template_result, FinalizeMode
+
+
+def finalize(value: t.Any) -> t.Any:
+ """Perform an explicit top-level template finalize operation on the supplied value."""
+ return _finalize_template_result(value, mode=FinalizeMode.TOP_LEVEL)
+
+
+class FilterModule:
+ @staticmethod
+ def filters() -> dict[str, t.Callable]:
+ return dict(finalize=finalize)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/origin.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/origin.py
new file mode 100644
index 00000000000..528bb96c626
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/origin.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+import typing as t
+
+from ansible._internal._datatag._tags import Origin
+
+
+def origin(value: object) -> str | None:
+ """Return the origin of the value, if any, otherwise `None`."""
+ origin_tag = Origin.get_tag(value)
+
+ return str(origin_tag) if origin_tag else None
+
+
+class FilterModule:
+ @staticmethod
+ def filters() -> dict[str, t.Callable]:
+ return dict(origin=origin)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/python_literal_eval.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/python_literal_eval.py
new file mode 100644
index 00000000000..416c391e75c
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/python_literal_eval.py
@@ -0,0 +1,24 @@
+from __future__ import annotations
+
+import ast
+
+from ansible.errors import AnsibleTypeError
+
+
+def python_literal_eval(value: object, ignore_errors=False) -> object:
+ try:
+ if isinstance(value, str):
+ return ast.literal_eval(value)
+
+ raise AnsibleTypeError("The `value` to eval must be a string.", obj=value)
+ except Exception:
+ if ignore_errors:
+ return value
+
+ raise
+
+
+class FilterModule(object):
+ @staticmethod
+ def filters():
+ return dict(python_literal_eval=python_literal_eval)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/python_literal_eval.yml b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/python_literal_eval.yml
new file mode 100644
index 00000000000..8d20b835c43
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/python_literal_eval.yml
@@ -0,0 +1,33 @@
+DOCUMENTATION:
+ name: python_literal_eval
+ version_added: "2.19"
+ short_description: evaluate a Python literal expression string
+ description:
+ - Evaluates the input string as a Python literal expression, returning the resulting data structure.
+ - Previous versions of Ansible applied this behavior to all template results in non-native Jinja mode.
+ - This filter provides a way to emulate the previous behavior.
+ notes:
+ - Directly calls Python's C(ast.literal_eval).
+ positional: _input
+ options:
+ _input:
+ description: Python literal string expression.
+ type: str
+ required: true
+ ignore_errors:
+ description: Whether to silently ignore all errors resulting from the literal_eval operation. If true, the input is silently returned unmodified when an error occurs.
+ type: bool
+ default: false
+
+EXAMPLES: |
+ - name: evaluate an expression comprised only of Python literals
+ assert:
+ that: (another_var | ansible._protomatter.python_literal_eval)[1] == 2 # in 2.19 and later, the explicit python_literal_eval emulates the old templating behavior
+ vars:
+ another_var: "{{ some_var }}" # in 2.18 and earlier, indirection through templating caused implicit literal_eval, converting the value to a list
+ some_var: "[1, 2]" # a value that looks like a Python list literal embedded in a string
+
+RETURN:
+ _value:
+ description: Resulting data structure.
+ type: raw
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/tag_names.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/tag_names.py
new file mode 100644
index 00000000000..92525c8d332
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/tag_names.py
@@ -0,0 +1,16 @@
+from __future__ import annotations
+
+import typing as t
+
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
+
+
+def tag_names(value: object) -> list[str]:
+ """Return a list of tag type names (if any) present on the given object."""
+ return sorted(tag_type.__name__ for tag_type in AnsibleTagHelper.tag_types(value))
+
+
+class FilterModule:
+ @staticmethod
+ def filters() -> dict[str, t.Callable]:
+ return dict(tag_names=tag_names)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/true_type.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/true_type.py
new file mode 100644
index 00000000000..32cf3370dfa
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/true_type.py
@@ -0,0 +1,17 @@
+from __future__ import annotations
+
+import typing as t
+
+from ansible.template import accept_args_markers
+
+
+@accept_args_markers
+def true_type(obj: object) -> str:
+ """Internal filter to show the true type name of the given object, not just the base type name like the `debug` filter."""
+ return obj.__class__.__name__
+
+
+class FilterModule(object):
+ @staticmethod
+ def filters() -> dict[str, t.Callable]:
+ return dict(true_type=true_type)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/unmask.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/unmask.py
new file mode 100644
index 00000000000..076a6ff7499
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/filter/unmask.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+import copy
+import dataclasses
+import typing as t
+
+from ansible._internal._templating._jinja_common import validate_arg_type
+from ansible._internal._templating._lazy_containers import _AnsibleLazyTemplateMixin
+from ansible._internal._templating._transform import _type_transform_mapping
+from ansible.errors import AnsibleError
+
+
+def unmask(value: object, type_names: str | list[str]) -> object:
+ """
+ Internal filter to suppress automatic type transformation in Jinja (e.g., WarningSummary, DeprecationSummary, ErrorSummary).
+ Lazy collection caching is in play - the first attempt to access a value in a given lazy container must be with unmasking in place, or the transformed value
+ will already be cached.
+ """
+ validate_arg_type("type_names", type_names, (str, list))
+
+ if isinstance(type_names, str):
+ check_type_names = [type_names]
+ else:
+ check_type_names = type_names
+
+ valid_type_names = {key.__name__ for key in _type_transform_mapping}
+ invalid_type_names = [type_name for type_name in check_type_names if type_name not in valid_type_names]
+
+ if invalid_type_names:
+ raise AnsibleError(f'Unknown type name(s): {", ".join(invalid_type_names)}', obj=type_names)
+
+ result: object
+
+ if isinstance(value, _AnsibleLazyTemplateMixin):
+ result = copy.copy(value)
+ result._lazy_options = dataclasses.replace(
+ result._lazy_options,
+ unmask_type_names=result._lazy_options.unmask_type_names | frozenset(check_type_names),
+ )
+ else:
+ result = value
+
+ return result
+
+
+class FilterModule(object):
+ @staticmethod
+ def filters() -> dict[str, t.Callable]:
+ return dict(unmask=unmask)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/lookup/config.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/lookup/config.py
new file mode 100644
index 00000000000..c4229320963
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/lookup/config.py
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ """Specialized config lookup that applies data transformations on values that config cannot."""
+
+ def run(self, terms, variables=None, **kwargs):
+ if not terms or not (config_name := terms[0]):
+ raise ValueError("config name is required")
+
+ match config_name:
+ case 'DISPLAY_TRACEBACK':
+ # since config can't expand this yet, we need the post-processed version
+ from ansible.module_utils._internal._traceback import traceback_for
+
+ return traceback_for()
+ # DTFIX-FUTURE: plumb through normal config fallback
+ case _:
+ raise ValueError(f"Unknown config name {config_name!r}.")
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/lookup/config.yml b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/lookup/config.yml
new file mode 100644
index 00000000000..5aa954617d2
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/lookup/config.yml
@@ -0,0 +1,2 @@
+DOCUMENTATION:
+ name: config
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged.py
new file mode 100644
index 00000000000..a13b90d4c86
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged.py
@@ -0,0 +1,15 @@
+from __future__ import annotations
+
+import typing as t
+
+from ansible.module_utils._internal import _datatag
+
+
+def tagged(value: t.Any) -> bool:
+ return bool(_datatag.AnsibleTagHelper.tag_types(value))
+
+
+class TestModule:
+ @staticmethod
+ def tests() -> dict[str, t.Callable]:
+ return dict(tagged=tagged)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged.yml b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged.yml
new file mode 100644
index 00000000000..921c03a1513
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged.yml
@@ -0,0 +1,19 @@
+DOCUMENTATION:
+ name: tagged
+ author: Ansible Core
+ version_added: "2.19"
+ short_description: does the value have a data tag
+ description:
+ - Check if the provided value has a data tag.
+ options:
+ _input:
+ description: A value.
+ type: raw
+
+EXAMPLES: |
+ is_data_tagged: "{{ my_variable is ansible._protomatter.tagged }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the value has one or more data tags, otherwise C(False).
+ type: boolean
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged_with.py b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged_with.py
new file mode 100644
index 00000000000..ef59edcab7e
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged_with.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+import typing as t
+
+from ansible.module_utils._internal import _datatag
+
+
+def tagged_with(value: t.Any, tag_name: str) -> bool:
+ if tag_type := _datatag._known_tag_type_map.get(tag_name):
+ return tag_type.is_tagged_on(value)
+
+ raise ValueError(f"Unknown tag name {tag_name!r}.")
+
+
+class TestModule:
+ @staticmethod
+ def tests() -> dict[str, t.Callable]:
+ return dict(tagged_with=tagged_with)
diff --git a/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged_with.yml b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged_with.yml
new file mode 100644
index 00000000000..f455ae919a9
--- /dev/null
+++ b/lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged_with.yml
@@ -0,0 +1,19 @@
+DOCUMENTATION:
+ name: tagged_with
+ author: Ansible Core
+ version_added: "2.19"
+ short_description: does the value have the specified data tag
+ description:
+ - Check if the provided value has the specified data tag.
+ options:
+ _input:
+ description: A value.
+ type: raw
+
+EXAMPLES: |
+ is_data_tagged: "{{ my_variable is ansible._protomatter.tagged_with('Origin') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the value has the specified data tag, otherwise C(False).
+ type: boolean
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
index 3e66b88f0d4..0adc00c9bc9 100644
--- a/lib/ansible/cli/__init__.py
+++ b/lib/ansible/cli/__init__.py
@@ -9,6 +9,18 @@ import locale
import os
import sys
+# We overload the ``ansible`` adhoc command to provide the functionality for
+# ``SSH_ASKPASS``. This code is here, and not in ``adhoc.py`` to bypass
+# unnecessary code. The program provided to ``SSH_ASKPASS`` can only be invoked
+# as a singular command, ``python -m`` doesn't work for that use case, and we
+# aren't adding a new entrypoint at this time. Assume that if we are executing
+# and there is only a single item in argv plus the executable, and the env var
+# is set we are in ``SSH_ASKPASS`` mode
+if 1 <= len(sys.argv) <= 2 and os.path.basename(sys.argv[0]) == "ansible" and os.getenv('_ANSIBLE_SSH_ASKPASS_SHM'):
+ from ansible.cli import _ssh_askpass
+ _ssh_askpass.main()
+
+
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
if sys.version_info < (3, 11):
@@ -63,50 +75,51 @@ def initialize_locale():
initialize_locale()
-from importlib.metadata import version
-from ansible.module_utils.compat.version import LooseVersion
-
-# Used for determining if the system is running a new enough Jinja2 version
-# and should only restrict on our documented minimum versions
-jinja2_version = version('jinja2')
-if jinja2_version < LooseVersion('3.0'):
- raise SystemExit(
- 'ERROR: Ansible requires Jinja2 3.0 or newer on the controller. '
- 'Current version: %s' % jinja2_version
- )
-
-import errno
import getpass
import subprocess
import traceback
from abc import ABC, abstractmethod
from pathlib import Path
+from ansible import _internal # do not remove or defer; ensures controller-specific state is set early
+
+_internal.setup()
+
+from ansible.errors import AnsibleError, ExitCode
+
try:
from ansible import constants as C
from ansible.utils.display import Display
display = Display()
-except Exception as e:
- print('ERROR: %s' % e, file=sys.stderr)
+except Exception as ex:
+ if isinstance(ex, AnsibleError):
+ ex_msg = ' '.join((ex.message, ex._help_text or '')).strip()
+ else:
+ ex_msg = str(ex)
+
+ print(f'ERROR: {ex_msg}\n\n{"".join(traceback.format_exception(ex))}', file=sys.stderr)
sys.exit(5)
+
from ansible import context
+from ansible.utils import display as _display
from ansible.cli.arguments import option_helpers as opt_help
-from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common.file import is_executable
from ansible.parsing.dataloader import DataLoader
-from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
+from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret, VaultSecretsContext
from ansible.plugins.loader import add_all_plugin_dirs, init_plugin_loader
from ansible.release import __version__
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.path import unfrackpath
-from ansible.utils.unsafe_proxy import to_unsafe_text
from ansible.vars.manager import VariableManager
+from ansible.module_utils._internal import _deprecator
+from ansible._internal._ssh import _agent_launch
+
try:
import argcomplete
@@ -124,6 +137,7 @@ class CLI(ABC):
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
+ USES_CONNECTION = False
def __init__(self, args, callback=None):
"""
@@ -137,6 +151,9 @@ class CLI(ABC):
self.parser = None
self.callback = callback
+ self.show_devel_warning()
+
+ def show_devel_warning(self) -> None:
if C.DEVEL_WARNING and __version__.endswith('dev0'):
display.warning(
'You are running the development version of Ansible. You should only run Ansible from "devel" if '
@@ -167,7 +184,7 @@ class CLI(ABC):
else:
display.v(u"No config file found; using defaults")
- C.handle_config_noise(display)
+ _display._report_config_warnings(_deprecator.ANSIBLE_CORE_DEPRECATOR)
@staticmethod
def split_vault_id(vault_id):
@@ -195,9 +212,9 @@ class CLI(ABC):
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
- # if an action needs an encrypt password (create_new_password=True) and we dont
+ # if an action needs an encrypt password (create_new_password=True) and we don't
# have other secrets setup, then automatically add a password prompt as well.
- # prompts cant/shouldnt work without a tty, so dont add prompt secrets
+ # prompts can't/shouldn't work without a tty, so don't add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
@@ -208,7 +225,7 @@ class CLI(ABC):
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
- auto_prompt=True):
+ auto_prompt=True, initialize_context=True):
# list of tuples
vault_secrets = []
@@ -305,15 +322,14 @@ class CLI(ABC):
if last_exception and not found_vault_secret:
raise last_exception
+ if initialize_context:
+ VaultSecretsContext.initialize(VaultSecretsContext(vault_secrets))
+
return vault_secrets
@staticmethod
- def _get_secret(prompt):
-
- secret = getpass.getpass(prompt=prompt)
- if secret:
- secret = to_unsafe_text(secret)
- return secret
+ def _get_secret(prompt: str) -> str:
+ return getpass.getpass(prompt=prompt)
@staticmethod
def ask_passwords():
@@ -322,7 +338,6 @@ class CLI(ABC):
op = context.CLIARGS
sshpass = None
becomepass = None
- become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
@@ -344,7 +359,7 @@ class CLI(ABC):
except EOFError:
pass
- return (sshpass, becomepass)
+ return sshpass, becomepass
def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
""" check for conflicting options """
@@ -510,13 +525,10 @@ class CLI(ABC):
try:
cmd = subprocess.Popen(CLI.PAGER, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
- except IOError:
- pass
- except KeyboardInterrupt:
+ except (OSError, KeyboardInterrupt):
pass
- @staticmethod
- def _play_prereqs():
+ def _play_prereqs(self):
# TODO: evaluate moving all of the code that touches ``AnsibleCollectionConfig``
# into ``init_plugin_loader`` so that we can specifically remove
# ``AnsibleCollectionConfig.playbook_paths`` to make it immutable after instantiation
@@ -547,6 +559,9 @@ class CLI(ABC):
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
+ if self.USES_CONNECTION:
+ _agent_launch.launch_ssh_agent()
+
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options['inventory'], cache=(not options.get('flush_cache')))
@@ -554,8 +569,19 @@ class CLI(ABC):
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
+ # flush fact cache if requested
+ if options['flush_cache']:
+ CLI._flush_cache(inventory, variable_manager)
+
return loader, inventory, variable_manager
+ @staticmethod
+ def _flush_cache(inventory, variable_manager):
+ variable_manager.clear_facts('localhost')
+ for host in inventory.list_hosts():
+ hostname = host.get_name()
+ variable_manager.clear_facts(hostname)
+
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
@@ -575,10 +601,9 @@ class CLI(ABC):
return hosts
@staticmethod
- def get_password_from_file(pwd_file):
-
+ def get_password_from_file(pwd_file: str) -> str:
b_pwd_file = to_bytes(pwd_file)
- secret = None
+
if b_pwd_file == b'-':
# ensure its read as bytes
secret = sys.stdin.buffer.read()
@@ -598,22 +623,22 @@ class CLI(ABC):
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("The password script %s returned an error (rc=%s): %s" % (pwd_file, p.returncode, stderr))
+ raise AnsibleError("The password script %s returned an error (rc=%s): %s" % (pwd_file, p.returncode, to_text(stderr)))
secret = stdout
else:
try:
- with open(b_pwd_file, "rb") as f:
- secret = f.read().strip()
- except (OSError, IOError) as e:
- raise AnsibleError("Could not read password file %s: %s" % (pwd_file, e))
+ with open(b_pwd_file, "rb") as password_file:
+ secret = password_file.read().strip()
+ except OSError as ex:
+ raise AnsibleError(f"Could not read password file {pwd_file!r}.") from ex
secret = secret.strip(b'\r\n')
if not secret:
raise AnsibleError('Empty password was provided from file (%s)' % pwd_file)
- return to_unsafe_text(secret)
+ return to_text(secret)
@classmethod
def cli_executor(cls, args=None):
@@ -625,63 +650,28 @@ class CLI(ABC):
ansible_dir = Path(C.ANSIBLE_HOME).expanduser()
try:
- ansible_dir.mkdir(mode=0o700)
- except OSError as exc:
- if exc.errno != errno.EEXIST:
- display.warning(
- "Failed to create the directory '%s': %s" % (ansible_dir, to_text(exc, errors='surrogate_or_replace'))
- )
+ ansible_dir.mkdir(mode=0o700, exist_ok=True)
+ except OSError as ex:
+ display.error_as_warning(f"Failed to create the directory {ansible_dir!r}.", ex)
else:
display.debug("Created the '%s' directory" % ansible_dir)
- try:
- args = [to_text(a, errors='surrogate_or_strict') for a in args]
- except UnicodeError:
- display.error('Command line args are not in utf-8, unable to continue. Ansible currently only understands utf-8')
- display.display(u"The full traceback was:\n\n%s" % to_text(traceback.format_exc()))
- exit_code = 6
- else:
- cli = cls(args)
- exit_code = cli.run()
-
- except AnsibleOptionsError as e:
- cli.parser.print_help()
- display.error(to_text(e), wrap_text=False)
- exit_code = 5
- except AnsibleParserError as e:
- display.error(to_text(e), wrap_text=False)
- exit_code = 4
- # TQM takes care of these, but leaving comment to reserve the exit codes
- # except AnsibleHostUnreachable as e:
- # display.error(str(e))
- # exit_code = 3
- # except AnsibleHostFailed as e:
- # display.error(str(e))
- # exit_code = 2
- except AnsibleError as e:
- display.error(to_text(e), wrap_text=False)
- exit_code = 1
+ cli = cls(args)
+ exit_code = cli.run()
+ except AnsibleError as ex:
+ display.error(ex)
+ exit_code = ex._exit_code
except KeyboardInterrupt:
display.error("User interrupted execution")
- exit_code = 99
- except Exception as e:
- if C.DEFAULT_DEBUG:
- # Show raw stacktraces in debug mode, It also allow pdb to
- # enter post mortem mode.
- raise
- have_cli_options = bool(context.CLIARGS)
- display.error("Unexpected Exception, this is probably a bug: %s" % to_text(e), wrap_text=False)
- if not have_cli_options or have_cli_options and context.CLIARGS['verbosity'] > 2:
- log_only = False
- if hasattr(e, 'orig_exc'):
- display.vvv('\nexception type: %s' % to_text(type(e.orig_exc)))
- why = to_text(e.orig_exc)
- if to_text(e) != why:
- display.vvv('\noriginal msg: %s' % why)
- else:
- display.display("to see the full traceback, use -vvv")
- log_only = True
- display.display(u"the full traceback was:\n\n%s" % to_text(traceback.format_exc()), log_only=log_only)
- exit_code = 250
+ exit_code = ExitCode.KEYBOARD_INTERRUPT
+ except Exception as ex:
+ try:
+ raise AnsibleError("Unexpected Exception, this is probably a bug.") from ex
+ except AnsibleError as ex2:
+ # DTFIX-FUTURE: clean this up so we're not hacking the internals- re-wrap in an AnsibleCLIUnhandledError that always shows TB, or?
+ from ansible.module_utils._internal import _traceback
+ _traceback._is_traceback_enabled = lambda *_args, **_kwargs: True
+ display.error(ex2)
+ exit_code = ExitCode.UNKNOWN_ERROR
sys.exit(exit_code)
diff --git a/lib/ansible/cli/_ssh_askpass.py b/lib/ansible/cli/_ssh_askpass.py
new file mode 100644
index 00000000000..47cb1299780
--- /dev/null
+++ b/lib/ansible/cli/_ssh_askpass.py
@@ -0,0 +1,54 @@
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import annotations
+
+import json
+import multiprocessing.resource_tracker
+import os
+import re
+import sys
+import typing as t
+
+from multiprocessing.shared_memory import SharedMemory
+
+
+def main() -> t.Never:
+ if len(sys.argv) > 1:
+ exit_code = 0 if handle_prompt(sys.argv[1]) else 1
+ else:
+ exit_code = 1
+
+ sys.exit(exit_code)
+
+
+def handle_prompt(prompt: str) -> bool:
+ if re.search(r'(The authenticity of host |differs from the key for the IP address)', prompt):
+ sys.stdout.write('no')
+ sys.stdout.flush()
+ return True
+
+ # deprecated: description='Python 3.13 and later support track' python_version='3.12'
+ can_track = sys.version_info[:2] >= (3, 13)
+ kwargs = dict(track=False) if can_track else {}
+
+ # This SharedMemory instance is intentionally not closed or unlinked.
+ # Closing will occur naturally in the SharedMemory finalizer.
+ # Unlinking is the responsibility of the process which created it.
+ shm = SharedMemory(name=os.environ['_ANSIBLE_SSH_ASKPASS_SHM'], **kwargs)
+
+ if not can_track:
+ # When track=False is not available, we must unregister explicitly, since it otherwise only occurs during unlink.
+ # This avoids resource tracker noise on stderr during process exit.
+ multiprocessing.resource_tracker.unregister(shm._name, 'shared_memory')
+
+ cfg = json.loads(shm.buf.tobytes().rstrip(b'\x00'))
+
+ if cfg['prompt'] not in prompt:
+ return False
+
+ # Report the password provided by the SharedMemory instance.
+ # The contents are left untouched after consumption to allow subsequent attempts to succeed.
+ # This can occur when multiple password prompting methods are enabled, such as password and keyboard-interactive, which is the default on macOS.
+ sys.stdout.write(cfg['password'])
+ sys.stdout.flush()
+ return True
diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
index 830e5823cfd..352b4f1a64a 100755
--- a/lib/ansible/cli/adhoc.py
+++ b/lib/ansible/cli/adhoc.py
@@ -6,6 +6,8 @@
from __future__ import annotations
+import json
+
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
from ansible import constants as C
@@ -15,10 +17,11 @@ from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils.common.text.converters import to_text
from ansible.parsing.splitter import parse_kv
-from ansible.parsing.utils.yaml import from_yaml
from ansible.playbook import Playbook
from ansible.playbook.play import Play
+from ansible._internal._datatag._tags import Origin
from ansible.utils.display import Display
+from ansible._internal._json._profiles import _legacy
display = Display()
@@ -30,6 +33,8 @@ class AdHocCLI(CLI):
name = 'ansible'
+ USES_CONNECTION = True
+
def init_parser(self):
""" create an options parser for bin/ansible """
super(AdHocCLI, self).init_parser(usage='%prog [options]',
@@ -76,15 +81,20 @@ class AdHocCLI(CLI):
module_args = None
if module_args_raw and module_args_raw.startswith('{') and module_args_raw.endswith('}'):
try:
- module_args = from_yaml(module_args_raw.strip(), json_only=True)
+ module_args = json.loads(module_args_raw, cls=_legacy.Decoder)
except AnsibleParserError:
pass
if not module_args:
module_args = parse_kv(module_args_raw, check_raw=check_raw)
- mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': module_args},
- 'timeout': context.CLIARGS['task_timeout']}
+ mytask = dict(
+ action=context.CLIARGS['module_name'],
+ args=module_args,
+ timeout=context.CLIARGS['task_timeout'],
+ )
+
+ mytask = Origin(description=f'').tag(mytask)
# avoid adding to tasks that don't support it, unless set, then give user an error
if context.CLIARGS['module_name'] not in C._ACTION_ALL_INCLUDE_ROLE_TASKS and any(frozenset((async_val, poll))):
@@ -177,7 +187,7 @@ class AdHocCLI(CLI):
variable_manager=variable_manager,
loader=loader,
passwords=passwords,
- stdout_callback=cb,
+ stdout_callback_name=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=run_tree,
forks=context.CLIARGS['forks'],
diff --git a/lib/ansible/cli/arguments/option_helpers.py b/lib/ansible/cli/arguments/option_helpers.py
index daa7a9a9b2f..034a7c26afc 100644
--- a/lib/ansible/cli/arguments/option_helpers.py
+++ b/lib/ansible/cli/arguments/option_helpers.py
@@ -4,22 +4,28 @@
from __future__ import annotations
import copy
+import dataclasses
+import inspect
import operator
import argparse
import os
import os.path
import sys
import time
+import typing as t
-from jinja2 import __version__ as j2_version
+import yaml
import ansible
from ansible import constants as C
+from ansible._internal import _templating
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.yaml import HAS_LIBYAML, yaml_load
from ansible.release import __version__
from ansible.utils.path import unfrackpath
+from ansible._internal._datatag._tags import TrustedAsTemplate, Origin
+
#
# Special purpose OptionParsers
@@ -30,13 +36,118 @@ class SortingHelpFormatter(argparse.HelpFormatter):
super(SortingHelpFormatter, self).add_arguments(actions)
+@dataclasses.dataclass(frozen=True, kw_only=True)
+class DeprecatedArgument:
+ version: str
+ """The Ansible version that will remove the deprecated argument."""
+
+ option: str | None = None
+ """The specific option string that is deprecated; None applies to all options for this argument."""
+
+ def is_deprecated(self, option: str) -> bool:
+ """Return True if the given option is deprecated, otherwise False."""
+ return self.option is None or option == self.option
+
+ def check(self, option: str) -> None:
+ """Display a deprecation warning if the given option is deprecated."""
+ if not self.is_deprecated(option):
+ return
+
+ from ansible.utils.display import Display
+
+ Display().deprecated( # pylint: disable=ansible-invalid-deprecated-version
+ msg=f'The {option!r} argument is deprecated.',
+ version=self.version,
+ )
+
+
class ArgumentParser(argparse.ArgumentParser):
- def add_argument(self, *args, **kwargs):
+ def __init__(self, *args, **kwargs) -> None:
+ self.__actions: dict[str | None, type[argparse.Action]] = {}
+
+ super().__init__(*args, **kwargs)
+
+ def register(self, registry_name, value, object):
+ """Track registration of actions so that they can be resolved later by name, without depending on the internals of ArgumentParser."""
+ if registry_name == 'action':
+ self.__actions[value] = object
+
+ super().register(registry_name, value, object)
+
+ def _patch_argument(self, args: tuple[str, ...], kwargs: dict[str, t.Any]) -> None:
+ """
+ Patch `kwargs` for an `add_argument` call using the given `args` and `kwargs`.
+ This is used to apply tags to entire categories of CLI arguments.
+ """
+ name = args[0]
+ action = kwargs.get('action')
+ resolved_action = self.__actions.get(action, action) # get the action by name, or use as-is (assume it's a subclass of argparse.Action)
+ action_signature = inspect.signature(resolved_action.__init__)
+
+ if action_signature.parameters.get('type'):
+ arg_type = kwargs.get('type', str)
+
+ if not callable(arg_type):
+ raise ValueError(f'Argument {name!r} requires a callable for the {"type"!r} parameter, not {arg_type!r}.')
+
+ wrapped_arg_type = _tagged_type_factory(name, arg_type)
+
+ kwargs.update(type=wrapped_arg_type)
+
+ def _patch_parser(self, parser):
+ """Patch and return the given parser to intercept the `add_argument` method for further patching."""
+ parser_add_argument = parser.add_argument
+
+ def add_argument(*ag_args, **ag_kwargs):
+ self._patch_argument(ag_args, ag_kwargs)
+
+ parser_add_argument(*ag_args, **ag_kwargs)
+
+ parser.add_argument = add_argument
+
+ return parser
+
+ def add_subparsers(self, *args, **kwargs):
+ sub = super().add_subparsers(*args, **kwargs)
+ sub_add_parser = sub.add_parser
+
+ def add_parser(*sub_args, **sub_kwargs):
+ return self._patch_parser(sub_add_parser(*sub_args, **sub_kwargs))
+
+ sub.add_parser = add_parser
+
+ return sub
+
+ def add_argument_group(self, *args, **kwargs):
+ return self._patch_parser(super().add_argument_group(*args, **kwargs))
+
+ def add_mutually_exclusive_group(self, *args, **kwargs):
+ return self._patch_parser(super().add_mutually_exclusive_group(*args, **kwargs))
+
+ def add_argument(self, *args, **kwargs) -> argparse.Action:
action = kwargs.get('action')
help = kwargs.get('help')
if help and action in {'append', 'append_const', 'count', 'extend', PrependListAction}:
help = f'{help.rstrip(".")}. This argument may be specified multiple times.'
kwargs['help'] = help
+
+ self._patch_argument(args, kwargs)
+
+ deprecated: DeprecatedArgument | None
+
+ if deprecated := kwargs.pop('deprecated', None):
+ action_type = self.__actions.get(action, action)
+
+ class DeprecatedAction(action_type): # type: ignore[misc, valid-type]
+ """A wrapper around an action which handles deprecation warnings."""
+
+ def __call__(self, parser, namespace, values, option_string=None) -> t.Any:
+ deprecated.check(option_string)
+
+ return super().__call__(parser, namespace, values, option_string)
+
+ kwargs['action'] = DeprecatedAction
+
return super().add_argument(*args, **kwargs)
@@ -132,7 +243,7 @@ def _git_repo_info(repo_path):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
- except (IOError, AttributeError):
+ except (OSError, AttributeError):
return ''
with open(os.path.join(repo_path, "HEAD")) as f:
line = f.readline().rstrip("\n")
@@ -182,13 +293,28 @@ def version(prog=None):
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
+
+ if HAS_LIBYAML:
+ libyaml_fragment = "with libyaml"
+
+ # noinspection PyBroadException
+ try:
+ from yaml._yaml import get_version_string
+
+ libyaml_fragment += f" v{get_version_string()}"
+ except Exception: # pylint: disable=broad-except
+ libyaml_fragment += ", version unknown"
+ else:
+ libyaml_fragment = "without libyaml"
+
result.append(" configured module search path = %s" % cpath)
result.append(" ansible python module location = %s" % ':'.join(ansible.__path__))
result.append(" ansible collection location = %s" % ':'.join(C.COLLECTIONS_PATHS))
result.append(" executable location = %s" % sys.argv[0])
result.append(" python version = %s (%s)" % (''.join(sys.version.splitlines()), to_native(sys.executable)))
- result.append(" jinja version = %s" % j2_version)
- result.append(" libyaml = %s" % HAS_LIBYAML)
+ result.append(f" jinja version = {_templating.jinja2_version}")
+ result.append(f" pyyaml version = {yaml.__version__} ({libyaml_fragment})")
+
return "\n".join(result)
@@ -292,19 +418,20 @@ def add_fork_options(parser):
def add_inventory_options(parser):
"""Add options for commands that utilize inventory"""
parser.add_argument('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
- help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
+ help="specify inventory host path or comma separated host list",
+ deprecated=DeprecatedArgument(version='2.23', option='--inventory-file'))
parser.add_argument('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_argument('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
+ parser.add_argument('--flush-cache', dest='flush_cache', action='store_true',
+ help="clear the fact cache for every host in inventory")
def add_meta_options(parser):
"""Add options for commands which can launch meta tasks from the command line"""
parser.add_argument('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
- parser.add_argument('--flush-cache', dest='flush_cache', action='store_true',
- help="clear the fact cache for every host in inventory")
def add_module_options(parser):
@@ -318,9 +445,9 @@ def add_module_options(parser):
def add_output_options(parser):
"""Add options for commands which can change their output"""
parser.add_argument('-o', '--one-line', dest='one_line', action='store_true',
- help='condense output')
+ help='condense output', deprecated=DeprecatedArgument(version='2.23'))
parser.add_argument('-t', '--tree', dest='tree', default=None,
- help='log output to this directory')
+ help='log output to this directory', deprecated=DeprecatedArgument(version='2.23'))
def add_runas_options(parser):
@@ -396,3 +523,29 @@ def add_vault_options(parser):
help='ask for vault password')
base_group.add_argument('--vault-password-file', '--vault-pass-file', default=[], dest='vault_password_files',
help="vault password file", type=unfrack_path(follow=False), action='append')
+
+
+def _tagged_type_factory(name: str, func: t.Callable[[str], object], /) -> t.Callable[[str], object]:
+ """
+ Return a callable that wraps the given function.
+ The result of the wrapped function will be tagged with Origin.
+ It will also be tagged with TrustedAsTemplate if it is equal to the original input string.
+ """
+ def tag_value(value: str) -> object:
+ result = func(value)
+
+ if result is value or func is str:
+ # Values which are not mutated are automatically trusted for templating.
+ # The `is` reference equality is critically important, as other types may only alter the tags, so object equality is
+ # not sufficient to prevent them being tagged as trusted when they should not.
+ # Explicitly include all usages using the `str` type factory since it strips tags.
+ result = TrustedAsTemplate().tag(result)
+
+ if not (origin := Origin.get_tag(value)):
+ origin = Origin(description=f'')
+
+ return origin.tag(result)
+
+ tag_value._name = name # simplify debugging by attaching the argument name to the function
+
+ return tag_value
diff --git a/lib/ansible/cli/config.py b/lib/ansible/cli/config.py
index cd801212fca..ed42545df47 100755
--- a/lib/ansible/cli/config.py
+++ b/lib/ansible/cli/config.py
@@ -10,7 +10,6 @@ from ansible.cli import CLI
import os
import shlex
-import subprocess
import sys
import yaml
@@ -21,10 +20,10 @@ import ansible.plugins.loader as plugin_loader
from ansible import constants as C
from ansible.cli.arguments import option_helpers as opt_help
-from ansible.config.manager import ConfigManager, Setting
+from ansible.config.manager import ConfigManager
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleRequiredOptionError
from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
-from ansible.module_utils.common.json import json_dump
+from ansible._internal import _json
from ansible.module_utils.six import string_types
from ansible.parsing.quoting import is_quoted
from ansible.parsing.yaml.dumper import AnsibleDumper
@@ -178,8 +177,6 @@ class ConfigCLI(CLI):
except Exception:
if context.CLIARGS['action'] in ['view']:
raise
- elif context.CLIARGS['action'] in ['edit', 'update']:
- display.warning("File does not exist, used empty file: %s" % self.config_file)
elif context.CLIARGS['action'] == 'view':
raise AnsibleError('Invalid or no config file was supplied')
@@ -187,30 +184,6 @@ class ConfigCLI(CLI):
# run the requested action
context.CLIARGS['func']()
- def execute_update(self):
- """
- Updates a single setting in the specified ansible.cfg
- """
- raise AnsibleError("Option not implemented yet")
-
- # pylint: disable=unreachable
- if context.CLIARGS['setting'] is None:
- raise AnsibleOptionsError("update option requires a setting to update")
-
- (entry, value) = context.CLIARGS['setting'].split('=')
- if '.' in entry:
- (section, option) = entry.split('.')
- else:
- section = 'defaults'
- option = entry
- subprocess.call([
- 'ansible',
- '-m', 'ini_file',
- 'localhost',
- '-c', 'local',
- '-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value)
- ])
-
def execute_view(self):
"""
Displays the current config file
@@ -221,20 +194,6 @@ class ConfigCLI(CLI):
except Exception as e:
raise AnsibleError("Failed to open config file: %s" % to_native(e))
- def execute_edit(self):
- """
- Opens ansible.cfg in the default EDITOR
- """
- raise AnsibleError("Option not implemented yet")
-
- # pylint: disable=unreachable
- try:
- editor = shlex.split(C.config.get_config_value('EDITOR'))
- editor.append(self.config_file)
- subprocess.call(editor)
- except Exception as e:
- raise AnsibleError("Failed to open editor: %s" % to_native(e))
-
def _list_plugin_settings(self, ptype, plugins=None):
entries = {}
loader = getattr(plugin_loader, '%s_loader' % ptype)
@@ -302,7 +261,7 @@ class ConfigCLI(CLI):
if context.CLIARGS['format'] == 'yaml':
output = yaml_dump(config_entries)
elif context.CLIARGS['format'] == 'json':
- output = json_dump(config_entries)
+ output = _json.json_dumps_formatted(config_entries)
self.pager(to_text(output, errors='surrogate_or_strict'))
@@ -458,21 +417,21 @@ class ConfigCLI(CLI):
entries = []
for setting in sorted(config):
- changed = (config[setting].origin not in ('default', 'REQUIRED') and setting not in _IGNORE_CHANGED)
+ changed = (config[setting]['origin'] not in ('default', 'REQUIRED') and setting not in _IGNORE_CHANGED)
if context.CLIARGS['format'] == 'display':
- if isinstance(config[setting], Setting):
+ if isinstance(config[setting], dict):
# proceed normally
- value = config[setting].value
- if config[setting].origin == 'default' or setting in _IGNORE_CHANGED:
+ value = config[setting]['value']
+ if config[setting]['origin'] == 'default' or setting in _IGNORE_CHANGED:
color = 'green'
value = self.config.template_default(value, get_constants())
- elif config[setting].origin == 'REQUIRED':
+ elif config[setting]['origin'] == 'REQUIRED':
# should include '_terms', '_input', etc
color = 'red'
else:
color = 'yellow'
- msg = "%s(%s) = %s" % (setting, config[setting].origin, value)
+ msg = "%s(%s) = %s" % (setting, config[setting]['origin'], value)
else:
color = 'green'
msg = "%s(%s) = %s" % (setting, 'default', config[setting].get('default'))
@@ -480,10 +439,10 @@ class ConfigCLI(CLI):
entry = stringc(msg, color)
else:
entry = {}
- for key in config[setting]._fields:
+ for key in config[setting].keys():
if key == 'type':
continue
- entry[key] = getattr(config[setting], key)
+ entry[key] = config[setting][key]
if not context.CLIARGS['only_changed'] or changed:
entries.append(entry)
@@ -495,11 +454,17 @@ class ConfigCLI(CLI):
# Add base
config = self.config.get_configuration_definitions(ignore_private=True)
# convert to settings
+ settings = {}
for setting in config.keys():
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, variables=get_constants())
- config[setting] = Setting(setting, v, o, None)
+ settings[setting] = {
+ 'name': setting,
+ 'value': v,
+ 'origin': o,
+ 'type': None
+ }
- return self._render_settings(config)
+ return self._render_settings(settings)
def _get_plugin_configs(self, ptype, plugins):
@@ -554,7 +519,12 @@ class ConfigCLI(CLI):
# not all cases will be error
o = 'REQUIRED'
- config_entries[finalname][setting] = Setting(setting, v, o, None)
+ config_entries[finalname][setting] = {
+ 'name': setting,
+ 'value': v,
+ 'origin': o,
+ 'type': None
+ }
# pretty please!
results = self._render_settings(config_entries[finalname])
@@ -587,7 +557,12 @@ class ConfigCLI(CLI):
if v is None and o is None:
# not all cases will be error
o = 'REQUIRED'
- server_config[setting] = Setting(setting, v, o, None)
+ server_config[setting] = {
+ 'name': setting,
+ 'value': v,
+ 'origin': o,
+ 'type': None
+ }
if context.CLIARGS['format'] == 'display':
if not context.CLIARGS['only_changed'] or server_config:
equals = '=' * len(server)
@@ -617,7 +592,7 @@ class ConfigCLI(CLI):
for server_config in server_config_list:
server = list(server_config.keys())[0]
server_reduced_config = server_config.pop(server)
- configs[server] = server_reduced_config
+ configs[server] = list(server_reduced_config.values())
output.append({'GALAXY_SERVERS': configs})
if context.CLIARGS['type'] == 'all':
@@ -644,7 +619,7 @@ class ConfigCLI(CLI):
if context.CLIARGS['format'] == 'yaml':
text = yaml_dump(output)
elif context.CLIARGS['format'] == 'json':
- text = json_dump(output)
+ text = _json.json_dumps_formatted(output)
self.pager(to_text(text, errors='surrogate_or_strict'))
diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py
index 6f355938aa5..5a6f941664f 100755
--- a/lib/ansible/cli/console.py
+++ b/lib/ansible/cli/console.py
@@ -29,6 +29,7 @@ from ansible.plugins.list import list_plugins
from ansible.plugins.loader import module_loader, fragment_loader
from ansible.utils import plugin_docs
from ansible.utils.color import stringc
+from ansible._internal._datatag._tags import TrustedAsTemplate
from ansible.utils.display import Display
display = Display()
@@ -72,6 +73,8 @@ class ConsoleCLI(CLI, cmd.Cmd):
# use specific to console, but fallback to highlight for backwards compatibility
NORMAL_PROMPT = C.COLOR_CONSOLE_PROMPT or C.COLOR_HIGHLIGHT
+ USES_CONNECTION = True
+
def __init__(self, args):
super(ConsoleCLI, self).__init__(args)
@@ -179,6 +182,8 @@ class ConsoleCLI(CLI, cmd.Cmd):
else:
module_args = ''
+ module_args = TrustedAsTemplate().tag(module_args)
+
if self.callback:
cb = self.callback
elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
@@ -189,7 +194,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
result = None
try:
check_raw = module in C._ACTION_ALLOWS_RAW_ARGS
- task = dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)), timeout=self.task_timeout)
+ task = dict(action=module, args=parse_kv(module_args, check_raw=check_raw), timeout=self.task_timeout)
play_ds = dict(
name="Ansible Shell",
hosts=self.cwd,
@@ -217,7 +222,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
variable_manager=self.variable_manager,
loader=self.loader,
passwords=self.passwords,
- stdout_callback=cb,
+ stdout_callback_name=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=False,
forks=self.forks,
@@ -237,11 +242,8 @@ class ConsoleCLI(CLI, cmd.Cmd):
except KeyboardInterrupt:
display.error('User interrupted execution')
return False
- except Exception as e:
- if self.verbosity >= 3:
- import traceback
- display.v(traceback.format_exc())
- display.error(to_text(e))
+ except Exception as ex:
+ display.error(ex)
return False
def emptyline(self):
@@ -571,7 +573,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history")
try:
readline.read_history_file(histfile)
- except IOError:
+ except OSError:
pass
atexit.register(readline.write_history_file, histfile)
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
index af137829907..6b3c27e3408 100755
--- a/lib/ansible/cli/doc.py
+++ b/lib/ansible/cli/doc.py
@@ -9,13 +9,16 @@ from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
+import collections.abc
import importlib
import pkgutil
import os
import os.path
import re
import textwrap
-import traceback
+import typing as t
+
+import yaml
import ansible.plugins.loader as plugin_loader
@@ -28,19 +31,22 @@ from ansible.collections.list import list_collection_dirs
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError, AnsiblePluginNotFound
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.collections import is_sequence
-from ansible.module_utils.common.json import json_dump
from ansible.module_utils.common.yaml import yaml_dump
from ansible.module_utils.six import string_types
from ansible.parsing.plugin_docs import read_docstub
-from ansible.parsing.utils.yaml import from_yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
-from ansible.plugins.list import list_plugins
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible._internal._yaml._loader import AnsibleInstrumentedLoader
+from ansible.plugins.list import _list_plugins_with_info, _PluginDocMetadata
from ansible.plugins.loader import action_loader, fragment_loader
from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_plugin_docs, get_docstring, get_versioned_doclink
+from ansible.template import trust_as_template
+from ansible._internal import _json
+from ansible._internal._templating import _jinja_plugins
display = Display()
@@ -83,10 +89,9 @@ ref_style = {
def jdump(text):
try:
- display.display(json_dump(text))
- except TypeError as e:
- display.vvv(traceback.format_exc())
- raise AnsibleError('We could not convert all the documentation into JSON as there was a conversion issue: %s' % to_native(e))
+ display.display(_json.json_dumps_formatted(text))
+ except TypeError as ex:
+ raise AnsibleError('We could not convert all the documentation into JSON as there was a conversion issue.') from ex
class RoleMixin(object):
@@ -104,7 +109,7 @@ class RoleMixin(object):
""" Load and process the YAML for the first found of a set of role files
:param str root: The root path to get the files from
- :param str files: List of candidate file names in order of precedence
+ :param list files: List of candidate file names in order of precedence
:param str role_name: The name of the role for which we want the argspec data.
:param str collection: collection name or None in case of stand alone roles
@@ -117,6 +122,7 @@ class RoleMixin(object):
meta_path = os.path.join(root, 'meta')
# Check all potential spec files
+ path = None
for specfile in files:
full_path = os.path.join(meta_path, specfile)
if os.path.exists(full_path):
@@ -128,11 +134,11 @@ class RoleMixin(object):
try:
with open(path, 'r') as f:
- data = from_yaml(f.read(), file_name=path)
+ data = yaml.load(trust_as_template(f), Loader=AnsibleLoader)
if data is None:
data = {}
- except (IOError, OSError) as e:
- raise AnsibleParserError("Could not read the role '%s' (at %s)" % (role_name, path), orig_exc=e)
+ except OSError as ex:
+ raise AnsibleParserError(f"Could not read the role {role_name!r} at {path!r}.") from ex
return data
@@ -696,16 +702,16 @@ class DocCLI(CLI, RoleMixin):
display.warning("Skipping role '%s' due to: %s" % (role, role_json[role]['error']), True)
continue
text += self.get_role_man_text(role, role_json[role])
- except AnsibleParserError as e:
+ except AnsibleError as ex:
# TODO: warn and skip role?
- raise AnsibleParserError("Role '%s" % (role), orig_exc=e)
+ raise AnsibleParserError(f"Error extracting role docs from {role!r}.") from ex
# display results
DocCLI.pager("\n".join(text))
@staticmethod
def _list_keywords():
- return from_yaml(pkgutil.get_data('ansible', 'keyword_desc.yml'))
+ return yaml.load(pkgutil.get_data('ansible', 'keyword_desc.yml'), Loader=AnsibleInstrumentedLoader)
@staticmethod
def _get_keywords_docs(keys):
@@ -768,10 +774,8 @@ class DocCLI(CLI, RoleMixin):
data[key] = kdata
- except (AttributeError, KeyError) as e:
- display.warning("Skipping Invalid keyword '%s' specified: %s" % (key, to_text(e)))
- if display.verbosity >= 3:
- display.verbose(traceback.format_exc())
+ except (AttributeError, KeyError) as ex:
+ display.error_as_warning(f'Skipping invalid keyword {key!r}.', ex)
return data
@@ -787,48 +791,63 @@ class DocCLI(CLI, RoleMixin):
return coll_filter
def _list_plugins(self, plugin_type, content):
-
- results = {}
- self.plugins = {}
- loader = DocCLI._prep_loader(plugin_type)
+ DocCLI._prep_loader(plugin_type)
coll_filter = self._get_collection_filter()
- self.plugins.update(list_plugins(plugin_type, coll_filter))
+ plugins = _list_plugins_with_info(plugin_type, coll_filter)
+
+ # Remove the internal ansible._protomatter plugins if getting all plugins
+ if not coll_filter:
+ plugins = {k: v for k, v in plugins.items() if not k.startswith('ansible._protomatter.')}
# get appropriate content depending on option
if content == 'dir':
- results = self._get_plugin_list_descriptions(loader)
+ results = self._get_plugin_list_descriptions(plugins)
elif content == 'files':
- results = {k: self.plugins[k][0] for k in self.plugins.keys()}
+ results = {k: v.path for k, v in plugins.items()}
else:
- results = {k: {} for k in self.plugins.keys()}
+ results = {k: {} for k in plugins.keys()}
self.plugin_list = set() # reset for next iteration
return results
- def _get_plugins_docs(self, plugin_type, names, fail_ok=False, fail_on_errors=True):
-
+ def _get_plugins_docs(self, plugin_type: str, names: collections.abc.Iterable[str], fail_ok: bool = False, fail_on_errors: bool = True) -> dict[str, dict]:
loader = DocCLI._prep_loader(plugin_type)
+ if plugin_type in ('filter', 'test'):
+ jinja2_builtins = _jinja_plugins.get_jinja_builtin_plugin_descriptions(plugin_type)
+ jinja2_builtins.update({name.split('.')[-1]: value for name, value in jinja2_builtins.items()}) # add short-named versions for lookup
+ else:
+ jinja2_builtins = {}
+
# get the docs for plugins in the command line list
plugin_docs = {}
for plugin in names:
- doc = {}
+ doc: dict[str, t.Any] = {}
try:
- doc, plainexamples, returndocs, metadata = get_plugin_docs(plugin, plugin_type, loader, fragment_loader, (context.CLIARGS['verbosity'] > 0))
+ doc, plainexamples, returndocs, metadata = self._get_plugin_docs_with_jinja2_builtins(
+ plugin,
+ plugin_type,
+ loader,
+ fragment_loader,
+ jinja2_builtins,
+ )
except AnsiblePluginNotFound as e:
display.warning(to_native(e))
continue
- except Exception as e:
+ except Exception as ex:
+ msg = "Missing documentation (or could not parse documentation)"
+
if not fail_on_errors:
- plugin_docs[plugin] = {'error': 'Missing documentation or could not parse documentation: %s' % to_native(e)}
+ plugin_docs[plugin] = {'error': f'{msg}: {ex}.'}
continue
- display.vvv(traceback.format_exc())
- msg = "%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, to_native(e))
+
+ msg = f"{plugin_type} {plugin} {msg}"
+
if fail_ok:
- display.warning(msg)
+ display.warning(f'{msg}: {ex}')
else:
- raise AnsibleError(msg)
+ raise AnsibleError(f'{msg}.') from ex
if not doc:
# The doc section existed but was empty
@@ -840,15 +859,48 @@ class DocCLI(CLI, RoleMixin):
if not fail_on_errors:
# Check whether JSON serialization would break
try:
- json_dump(docs)
- except Exception as e: # pylint:disable=broad-except
- plugin_docs[plugin] = {'error': 'Cannot serialize documentation as JSON: %s' % to_native(e)}
+ _json.json_dumps_formatted(docs)
+ except Exception as ex: # pylint:disable=broad-except
+ plugin_docs[plugin] = {'error': f'Cannot serialize documentation as JSON: {ex}'}
continue
plugin_docs[plugin] = docs
return plugin_docs
+ def _get_plugin_docs_with_jinja2_builtins(
+ self,
+ plugin_name: str,
+ plugin_type: str,
+ loader: t.Any,
+ fragment_loader: t.Any,
+ jinja_builtins: dict[str, str],
+ ) -> tuple[dict, str | None, dict | None, dict | None]:
+ try:
+ return get_plugin_docs(plugin_name, plugin_type, loader, fragment_loader, (context.CLIARGS['verbosity'] > 0))
+ except Exception:
+ if (desc := jinja_builtins.get(plugin_name, ...)) is not ...:
+ short_name = plugin_name.split('.')[-1]
+ long_name = f'ansible.builtin.{short_name}'
+ # Dynamically build a doc stub for any Jinja2 builtin plugin we haven't
+ # explicitly documented.
+ doc = dict(
+ collection='ansible.builtin',
+ plugin_name=long_name,
+ filename='',
+ short_description=desc,
+ description=[
+ desc,
+ '',
+ f"This is the Jinja builtin {plugin_type} plugin {short_name!r}.",
+ f"See: U(https://jinja.palletsprojects.com/en/stable/templates/#jinja-{plugin_type}s.{short_name})",
+ ],
+ )
+
+ return doc, None, None, None
+
+ raise
+
def _get_roles_path(self):
"""
Add any 'roles' subdir in playbook dir to the roles search path.
@@ -997,10 +1049,10 @@ class DocCLI(CLI, RoleMixin):
def get_all_plugins_of_type(plugin_type):
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
paths = loader._get_paths_with_context()
- plugins = {}
+ plugins = []
for path_context in paths:
- plugins.update(list_plugins(plugin_type))
- return sorted(plugins.keys())
+ plugins += _list_plugins_with_info(plugin_type).keys()
+ return sorted(plugins)
@staticmethod
def get_plugin_metadata(plugin_type, plugin_name):
@@ -1015,9 +1067,8 @@ class DocCLI(CLI, RoleMixin):
try:
doc, __, __, __ = get_docstring(filename, fragment_loader, verbose=(context.CLIARGS['verbosity'] > 0),
collection_name=collection_name, plugin_type=plugin_type)
- except Exception:
- display.vvv(traceback.format_exc())
- raise AnsibleError("%s %s at %s has a documentation formatting error or is missing documentation." % (plugin_type, plugin_name, filename))
+ except Exception as ex:
+ raise AnsibleError(f"{plugin_type} {plugin_name} at {filename!r} has a documentation formatting error or is missing documentation.") from ex
if doc is None:
# Removed plugins don't have any documentation
@@ -1093,24 +1144,25 @@ class DocCLI(CLI, RoleMixin):
try:
text = DocCLI.get_man_text(doc, collection_name, plugin_type)
- except Exception as e:
- display.vvv(traceback.format_exc())
- raise AnsibleError("Unable to retrieve documentation from '%s'" % (plugin), orig_exc=e)
+ except Exception as ex:
+ raise AnsibleError(f"Unable to retrieve documentation from {plugin!r}.") from ex
return text
- def _get_plugin_list_descriptions(self, loader):
+ def _get_plugin_list_descriptions(self, plugins: dict[str, _PluginDocMetadata]) -> dict[str, str]:
descs = {}
- for plugin in self.plugins.keys():
+ for plugin, plugin_info in plugins.items():
# TODO: move to plugin itself i.e: plugin.get_desc()
doc = None
- filename = Path(to_native(self.plugins[plugin][0]))
+
docerror = None
- try:
- doc = read_docstub(filename)
- except Exception as e:
- docerror = e
+ if plugin_info.path:
+ filename = Path(to_native(plugin_info.path))
+ try:
+ doc = read_docstub(filename)
+ except Exception as e:
+ docerror = e
# plugin file was empty or had error, lets try other options
if doc is None:
@@ -1125,9 +1177,15 @@ class DocCLI(CLI, RoleMixin):
except Exception as e:
docerror = e
- if docerror:
- display.warning("%s has a documentation formatting error: %s" % (plugin, docerror))
- continue
+ # Do a final fallback to see if the plugin is a shadowed Jinja2 plugin
+ # without any explicit documentation.
+ if doc is None and plugin_info.jinja_builtin_short_description:
+ descs[plugin] = plugin_info.jinja_builtin_short_description
+ continue
+
+ if docerror:
+ display.error_as_warning(f"{plugin} has a documentation formatting error.", exception=docerror)
+ continue
if not doc or not isinstance(doc, dict):
desc = 'UNDOCUMENTED'
@@ -1170,12 +1228,16 @@ class DocCLI(CLI, RoleMixin):
return 'version %s' % (version_added, )
@staticmethod
- def warp_fill(text, limit, initial_indent='', subsequent_indent='', **kwargs):
+ def warp_fill(text, limit, initial_indent='', subsequent_indent='', initial_extra=0, **kwargs):
result = []
for paragraph in text.split('\n\n'):
- result.append(textwrap.fill(paragraph, limit, initial_indent=initial_indent, subsequent_indent=subsequent_indent,
- break_on_hyphens=False, break_long_words=False, drop_whitespace=True, **kwargs))
+ wrapped = textwrap.fill(paragraph, limit, initial_indent=initial_indent + ' ' * initial_extra, subsequent_indent=subsequent_indent,
+ break_on_hyphens=False, break_long_words=False, drop_whitespace=True, **kwargs)
+ if initial_extra and wrapped.startswith(' ' * initial_extra):
+ wrapped = wrapped[initial_extra:]
+ result.append(wrapped)
initial_indent = subsequent_indent
+ initial_extra = 0
return '\n'.join(result)
@staticmethod
@@ -1203,24 +1265,27 @@ class DocCLI(CLI, RoleMixin):
# description is specifically formatted and can either be string or list of strings
if 'description' not in opt:
- raise AnsibleError("All (sub-)options and return values must have a 'description' field")
+ raise AnsibleError("All (sub-)options and return values must have a 'description' field", obj=o)
text.append('')
# TODO: push this to top of for and sort by size, create indent on largest key?
- inline_indent = base_indent + ' ' * max((len(opt_indent) - len(o)) - len(base_indent), 2)
- sub_indent = inline_indent + ' ' * (len(o) + 3)
+ inline_indent = ' ' * max((len(opt_indent) - len(o)) - len(base_indent), 2)
+ extra_indent = base_indent + ' ' * (len(o) + 3)
+ sub_indent = inline_indent + extra_indent
if is_sequence(opt['description']):
for entry_idx, entry in enumerate(opt['description'], 1):
if not isinstance(entry, string_types):
raise AnsibleError("Expected string in description of %s at index %s, got %s" % (o, entry_idx, type(entry)))
if entry_idx == 1:
- text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(entry), limit, initial_indent=inline_indent, subsequent_indent=sub_indent))
+ text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(entry), limit,
+ initial_indent=inline_indent, subsequent_indent=sub_indent, initial_extra=len(extra_indent)))
else:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(entry), limit, initial_indent=sub_indent, subsequent_indent=sub_indent))
else:
if not isinstance(opt['description'], string_types):
raise AnsibleError("Expected string in description of %s, got %s" % (o, type(opt['description'])))
- text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(opt['description']), limit, initial_indent=inline_indent, subsequent_indent=sub_indent))
+ text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(opt['description']), limit,
+ initial_indent=inline_indent, subsequent_indent=sub_indent, initial_extra=len(extra_indent)))
del opt['description']
suboptions = []
@@ -1244,7 +1309,7 @@ class DocCLI(CLI, RoleMixin):
if ignore in item:
del item[ignore]
- # reformat cli optoins
+ # reformat cli options
if 'cli' in opt and opt['cli']:
conf['cli'] = []
for cli in opt['cli']:
@@ -1326,7 +1391,6 @@ class DocCLI(CLI, RoleMixin):
'This was unintentionally allowed when plugin attributes were added, '
'but the feature does not map well to role argument specs.',
version='2.20',
- collection_name='ansible.builtin',
)
text.append("")
text.append(_format("ATTRIBUTES:", 'bold'))
@@ -1360,7 +1424,7 @@ class DocCLI(CLI, RoleMixin):
try:
text.append(yaml_dump(doc.pop('examples'), indent=2, default_flow_style=False))
except Exception as e:
- raise AnsibleParserError("Unable to parse examples section", orig_exc=e)
+ raise AnsibleParserError("Unable to parse examples section.") from e
return text
@@ -1376,7 +1440,7 @@ class DocCLI(CLI, RoleMixin):
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
- text.append("> %s %s (%s)" % (plugin_type.upper(), _format(doc.pop('plugin_name'), 'bold'), doc.pop('filename')))
+ text.append("> %s %s (%s)" % (plugin_type.upper(), _format(doc.pop('plugin_name'), 'bold'), doc.pop('filename') or 'Jinja2'))
if isinstance(doc['description'], list):
descs = doc.pop('description')
@@ -1396,9 +1460,9 @@ class DocCLI(CLI, RoleMixin):
if 'removed_at_date' not in doc['deprecated'] and 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
doc['deprecated']['removed_in'] = doc['deprecated']['version']
try:
- text.append('\t' + C.config.get_deprecated_msg_from_config(doc['deprecated'], True))
+ text.append('\t' + C.config.get_deprecated_msg_from_config(doc['deprecated'], True, collection_name=collection_name))
except KeyError as e:
- raise AnsibleError("Invalid deprecation documentation structure", orig_exc=e)
+ raise AnsibleError("Invalid deprecation documentation structure.") from e
else:
text.append("%s" % doc['deprecated'])
del doc['deprecated']
@@ -1507,8 +1571,8 @@ class DocCLI(CLI, RoleMixin):
else:
try:
text.append(yaml_dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
- except Exception as e:
- raise AnsibleParserError("Unable to parse examples section", orig_exc=e)
+ except Exception as ex:
+ raise AnsibleParserError("Unable to parse examples section.") from ex
if doc.get('returndocs', False):
text.append('')
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
index 5e2bef6f151..6c8c749f9b4 100755
--- a/lib/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -53,10 +53,12 @@ from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible._internal._datatag._tags import TrustedAsTemplate
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.role.requirement import RoleRequirement
-from ansible.template import Templar
+from ansible._internal._templating._engine import TemplateEngine
+from ansible.template import trust_as_template
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
@@ -639,6 +641,7 @@ class GalaxyCLI(CLI):
# it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
auth_url = server_options.pop('auth_url')
client_id = server_options.pop('client_id')
+ client_secret = server_options.pop('client_secret')
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
api_version = server_options.pop('api_version')
@@ -664,15 +667,17 @@ class GalaxyCLI(CLI):
if username:
server_options['token'] = BasicAuthToken(username, server_options['password'])
else:
- if token_val:
- if auth_url:
- server_options['token'] = KeycloakToken(access_token=token_val,
- auth_url=auth_url,
- validate_certs=validate_certs,
- client_id=client_id)
- else:
- # The galaxy v1 / github / django / 'Token'
- server_options['token'] = GalaxyToken(token=token_val)
+ if auth_url:
+ server_options['token'] = KeycloakToken(
+ access_token=token_val,
+ auth_url=auth_url,
+ validate_certs=validate_certs,
+ client_id=client_id,
+ client_secret=client_secret,
+ )
+ elif token_val:
+ # The galaxy v1 / github / django / 'Token'
+ server_options['token'] = GalaxyToken(token=token_val)
server_options.update(galaxy_options)
config_servers.append(GalaxyAPI(
@@ -912,8 +917,8 @@ class GalaxyCLI(CLI):
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
- with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
- meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
+ with open(to_bytes(template_path, errors='surrogate_or_strict'), 'r') as template_obj:
+ meta_template = TrustedAsTemplate().tag(to_text(template_obj.read(), errors='surrogate_or_strict'))
galaxy_meta = get_collections_galaxy_meta_info()
@@ -949,7 +954,7 @@ class GalaxyCLI(CLI):
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
loader = DataLoader()
- templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
+ templar = TemplateEngine(loader, variables={'required_config': required_config, 'optional_config': optional_config})
templar.environment.filters['comment_ify'] = comment_ify
meta_value = templar.template(meta_template)
@@ -1151,7 +1156,7 @@ class GalaxyCLI(CLI):
loader = DataLoader()
inject_data.update(load_extra_vars(loader))
- templar = Templar(loader, variables=inject_data)
+ templar = TemplateEngine(loader, variables=inject_data)
# create role directory
if not os.path.exists(b_obj_path):
@@ -1193,7 +1198,7 @@ class GalaxyCLI(CLI):
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
- template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
+ template_data = trust_as_template(loader.get_text_file_contents(src_template))
try:
b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
except AnsibleError as e:
@@ -1761,6 +1766,8 @@ class GalaxyCLI(CLI):
return 0
+ _task_check_delay_sec = 10 # allows unit test override
+
def execute_import(self):
""" used to import a role into Ansible Galaxy """
@@ -1814,7 +1821,7 @@ class GalaxyCLI(CLI):
rc = ['SUCCESS', 'FAILED'].index(state)
finished = True
else:
- time.sleep(10)
+ time.sleep(self._task_check_delay_sec)
return rc
diff --git a/lib/ansible/cli/inventory.py b/lib/ansible/cli/inventory.py
index 5d99d24ed68..1856d005088 100755
--- a/lib/ansible/cli/inventory.py
+++ b/lib/ansible/cli/inventory.py
@@ -9,15 +9,18 @@ from __future__ import annotations
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
+import json
import sys
+import typing as t
import argparse
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
-from ansible.errors import AnsibleError, AnsibleOptionsError
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleRuntimeError
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible._internal._json._profiles import _inventory_legacy
from ansible.utils.vars import combine_vars
from ansible.utils.display import Display
from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
@@ -148,42 +151,24 @@ class InventoryCLI(CLI):
try:
with open(to_bytes(outfile), 'wb') as f:
f.write(to_bytes(results))
- except (OSError, IOError) as e:
- raise AnsibleError('Unable to write to destination file (%s): %s' % (to_native(outfile), to_native(e)))
+ except OSError as ex:
+ raise AnsibleError(f'Unable to write to destination file {outfile!r}.') from ex
sys.exit(0)
sys.exit(1)
@staticmethod
def dump(stuff):
-
if context.CLIARGS['yaml']:
import yaml
+
from ansible.parsing.yaml.dumper import AnsibleDumper
+
results = to_text(yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False, allow_unicode=True))
elif context.CLIARGS['toml']:
- from ansible.plugins.inventory.toml import toml_dumps
- try:
- results = toml_dumps(stuff)
- except TypeError as e:
- raise AnsibleError(
- 'The source inventory contains a value that cannot be represented in TOML: %s' % e
- )
- except KeyError as e:
- raise AnsibleError(
- 'The source inventory contains a non-string key (%s) which cannot be represented in TOML. '
- 'The specified key will need to be converted to a string. Be aware that if your playbooks '
- 'expect this key to be non-string, your playbooks will need to be modified to support this '
- 'change.' % e.args[0]
- )
+ results = toml_dumps(stuff)
else:
- import json
- from ansible.parsing.ajson import AnsibleJSONEncoder
- try:
- results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True, ensure_ascii=False)
- except TypeError as e:
- results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=False, indent=4, preprocess_unsafe=True, ensure_ascii=False)
- display.warning("Could not sort JSON output due to issues while sorting keys: %s" % to_native(e))
+ results = json.dumps(stuff, cls=_inventory_legacy.Encoder, sort_keys=True, indent=4)
return results
@@ -306,7 +291,11 @@ class InventoryCLI(CLI):
results = format_group(top, frozenset(h.name for h in hosts))
# populate meta
- results['_meta'] = {'hostvars': {}}
+ results['_meta'] = {
+ 'hostvars': {},
+ 'profile': _inventory_legacy.Encoder.profile_name,
+ }
+
for host in hosts:
hvars = self._get_host_variables(host)
if hvars:
@@ -409,6 +398,17 @@ class InventoryCLI(CLI):
return results
+def toml_dumps(data: t.Any) -> str:
+ try:
+ from tomli_w import dumps as _tomli_w_dumps
+ except ImportError:
+ pass
+ else:
+ return _tomli_w_dumps(data)
+
+ raise AnsibleRuntimeError('The Python library "tomli-w" is required when using the TOML output format.')
+
+
def main(args=None):
InventoryCLI.cli_executor(args)
diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
index 9e0e19d3c59..22fb13c274d 100755
--- a/lib/ansible/cli/playbook.py
+++ b/lib/ansible/cli/playbook.py
@@ -34,6 +34,8 @@ class PlaybookCLI(CLI):
name = 'ansible-playbook'
+ USES_CONNECTION = True
+
def init_parser(self):
# create parser for CLI options
@@ -143,10 +145,6 @@ class PlaybookCLI(CLI):
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
CLI.get_host_list(inventory, context.CLIARGS['subset'])
- # flush fact cache if requested
- if context.CLIARGS['flush_cache']:
- self._flush_cache(inventory, variable_manager)
-
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory,
variable_manager=variable_manager, loader=loader,
@@ -228,12 +226,6 @@ class PlaybookCLI(CLI):
else:
return results
- @staticmethod
- def _flush_cache(inventory, variable_manager):
- for host in inventory.list_hosts():
- hostname = host.get_name()
- variable_manager.clear_facts(hostname)
-
def main(args=None):
PlaybookCLI.cli_executor(args)
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
index 212d63872eb..8dded6226bb 100755
--- a/lib/ansible/cli/pull.py
+++ b/lib/ansible/cli/pull.py
@@ -31,6 +31,34 @@ from ansible.utils.display import Display
display = Display()
+SAFE_OUTPUT_ENV = {
+ 'ANSIBLE_CALLBACK_RESULT_FORMAT': 'json',
+ 'ANSIBLE_LOAD_CALLBACK_PLUGINS': '0',
+}
+
+
+def safe_output_env(f):
+
+ def wrapper(*args, **kwargs):
+
+ orig = {}
+
+ for k, v in SAFE_OUTPUT_ENV.items():
+ orig[k] = os.environ.get(k, None)
+ os.environ[k] = v
+
+ result = f(*args, **kwargs)
+
+ for key in orig.keys():
+ if orig[key] is None:
+ del os.environ[key]
+ else:
+ os.environ[key] = orig[key]
+
+ return result
+
+ return wrapper
+
class PullCLI(CLI):
""" Used to pull a remote copy of ansible on each managed node,
@@ -42,7 +70,7 @@ class PullCLI(CLI):
you should use an external scheduler and/or locking to ensure there are no clashing operations.
The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull.
- This is useful both for extreme scale-out as well as periodic remediation.
+ This is useful both for extreme scale-out and periodic remediation.
Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an
excellent way to gather and analyze remote logs from ansible-pull.
"""
@@ -76,8 +104,9 @@ class PullCLI(CLI):
return inv_opts
def init_parser(self):
- """ create an options parser for bin/ansible """
+ """ Specific args/option parser for pull """
+ # signature is different from parent as caller should not need to add usage/desc
super(PullCLI, self).init_parser(
usage='%prog -U [options] []',
desc="pulls playbooks from a VCS repo and executes them on target host")
@@ -106,10 +135,12 @@ class PullCLI(CLI):
help='path to the directory to which Ansible will checkout the repository.')
self.parser.add_argument('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
self.parser.add_argument('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.')
+ # TODO: resolve conflict with check mode, added manually below
self.parser.add_argument('-C', '--checkout', dest='checkout',
help='branch/tag/commit to checkout. Defaults to behavior of repository module.')
self.parser.add_argument('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
help='adds the hostkey for the repo url if not already added')
+ # Overloaded with adhoc ... but really passthrough to adhoc
self.parser.add_argument('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
help='Repository module name, which ansible will use to check out the repo. Choices are %s. Default is %s.'
% (self.REPO_CHOICES, self.DEFAULT_REPO_TYPE))
@@ -121,7 +152,7 @@ class PullCLI(CLI):
self.parser.add_argument('--track-subs', dest='tracksubs', default=False, action='store_true',
help='submodules will track the latest changes. This is equivalent to specifying the --remote flag to git submodule update')
# add a subset of the check_opts flag group manually, as the full set's
- # shortcodes conflict with above --checkout/-C
+ # shortcodes conflict with above --checkout/-C, see to-do above
self.parser.add_argument("--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
self.parser.add_argument("--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
@@ -177,7 +208,7 @@ class PullCLI(CLI):
limit_opts = 'localhost,127.0.0.1'
base_opts = '-c local '
if context.CLIARGS['verbosity'] > 0:
- base_opts += ' -%s' % ''.join(["v" for x in range(0, context.CLIARGS['verbosity'])])
+ base_opts += ' -%s' % ''.join(["v" for dummy in range(0, context.CLIARGS['verbosity'])])
# Attempt to use the inventory passed in as an argument
# It might not yet have been downloaded so use localhost as default
@@ -250,16 +281,22 @@ class PullCLI(CLI):
# RUN the Checkout command
display.debug("running ansible with VCS module to checkout repo")
display.vvvv('EXEC: %s' % cmd)
- rc, b_out, b_err = run_cmd(cmd, live=True)
+ rc, b_out, b_err = safe_output_env(run_cmd)(cmd, live=True)
if rc != 0:
if context.CLIARGS['force']:
display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
else:
return rc
- elif context.CLIARGS['ifchanged'] and b'"changed": true' not in b_out:
- display.display("Repository has not changed, quitting.")
- return 0
+ elif context.CLIARGS['ifchanged']:
+ # detect json/yaml/header, any count as 'changed'
+ for detect in (b'"changed": true', b"changed: True", b"| CHANGED =>"):
+ if detect in b_out:
+ break
+ else:
+ # no change, we bail
+ display.display(f"Repository has not changed, quitting: {b_out!r}")
+ return 0
playbook = self.select_playbook(context.CLIARGS['dest'])
if playbook is None:
@@ -298,6 +335,9 @@ class PullCLI(CLI):
if context.CLIARGS['diff']:
cmd += ' -D'
+ if context.CLIARGS['flush_cache']:
+ cmd += ' --flush-cache'
+
os.chdir(context.CLIARGS['dest'])
# redo inventory options as new files might exist now
diff --git a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
index 0c8baa9871f..adaaedc669d 100644
--- a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
+++ b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
@@ -21,7 +21,7 @@ from ansible.cli.arguments import option_helpers as opt_help
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.connection import Connection, ConnectionError, send_data, recv_data
from ansible.module_utils.service import fork_process
-from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.module_utils._internal._json._profiles import _tagless
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader, init_plugin_loader
from ansible.utils.path import unfrackpath, makedirs_safe
@@ -110,7 +110,7 @@ class ConnectionProcess(object):
result['exception'] = traceback.format_exc()
finally:
result['messages'] = messages
- self.fd.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ self.fd.write(json.dumps(result, cls=_tagless.Encoder))
self.fd.close()
def run(self):
@@ -292,7 +292,7 @@ def main(args=None):
else:
os.close(w)
rfd = os.fdopen(r, 'r')
- data = json.loads(rfd.read(), cls=AnsibleJSONDecoder)
+ data = json.loads(rfd.read(), cls=_tagless.Decoder)
messages.extend(data.pop('messages'))
result.update(data)
@@ -330,10 +330,10 @@ def main(args=None):
sys.stdout = saved_stdout
if 'exception' in result:
rc = 1
- sys.stderr.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ sys.stderr.write(json.dumps(result, cls=_tagless.Encoder))
else:
rc = 0
- sys.stdout.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ sys.stdout.write(json.dumps(result, cls=_tagless.Encoder))
sys.exit(rc)
diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py
index 8b6dc88a3de..6e3b56d002a 100755
--- a/lib/ansible/cli/vault.py
+++ b/lib/ansible/cli/vault.py
@@ -84,7 +84,7 @@ class VaultCLI(CLI):
create_parser.add_argument('--skip-tty-check', default=False, help='allows editor to be opened when no tty attached',
dest='skip_tty_check', action='store_true')
- decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file', parents=[output, common])
+ decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file or string', parents=[output, common])
decrypt_parser.set_defaults(func=self.execute_decrypt)
decrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
@@ -138,11 +138,12 @@ class VaultCLI(CLI):
raise AnsibleOptionsError("At most one input file may be used with the --output option")
if options.action == 'encrypt_string':
- if '-' in options.args or not options.args or options.encrypt_string_stdin_name:
+ if '-' in options.args or options.encrypt_string_stdin_name or (not options.args and not options.encrypt_string_prompt):
+ # prompting from stdin and reading from stdin are mutually exclusive, if stdin is still provided, it is ignored
self.encrypt_string_read_stdin = True
- # TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that.
if options.encrypt_string_prompt and self.encrypt_string_read_stdin:
+ # should only trigger if prompt + either - or encrypt string stdin name were provided
raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
return options
@@ -227,6 +228,7 @@ class VaultCLI(CLI):
vault_ids=new_vault_ids,
vault_password_files=new_vault_password_files,
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
+ initialize_context=False,
create_new_password=True)
if not new_vault_secrets:
@@ -258,7 +260,7 @@ class VaultCLI(CLI):
display.display("Reading plaintext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
- # Fixme: use the correct vau
+ # FIXME: use the correct vau
self.editor.encrypt_file(f, self.encrypt_secret,
vault_id=self.encrypt_vault_id,
output_file=context.CLIARGS['output_file'])
diff --git a/lib/ansible/compat/importlib_resources.py b/lib/ansible/compat/importlib_resources.py
index 0df95f0a518..9019812d582 100644
--- a/lib/ansible/compat/importlib_resources.py
+++ b/lib/ansible/compat/importlib_resources.py
@@ -3,17 +3,14 @@
from __future__ import annotations
-import sys
+from ansible.utils.display import Display as _Display
-HAS_IMPORTLIB_RESOURCES = False
+from importlib.resources import files # pylint: disable=unused-import
-if sys.version_info < (3, 10):
- try:
- from importlib_resources import files # type: ignore[import] # pylint: disable=unused-import
- except ImportError:
- files = None # type: ignore[assignment]
- else:
- HAS_IMPORTLIB_RESOURCES = True
-else:
- from importlib.resources import files
- HAS_IMPORTLIB_RESOURCES = True
+HAS_IMPORTLIB_RESOURCES = True
+
+_Display().deprecated(
+ msg="The `ansible.compat.importlib_resources` module is deprecated.",
+ help_text="Use `importlib.resources` from the Python standard library instead.",
+ version="2.23",
+)
diff --git a/lib/ansible/config/base.yml b/lib/ansible/config/base.yml
index 2613e1812d1..3eeb0250f6c 100644
--- a/lib/ansible/config/base.yml
+++ b/lib/ansible/config/base.yml
@@ -1,6 +1,26 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
+_ANSIBALLZ_COVERAGE_CONFIG:
+ name: Configure the AnsiballZ code coverage extension
+ description:
+ - Enables and configures the AnsiballZ code coverage extension.
+ - This is for internal use only.
+ env:
+ - {name: _ANSIBLE_ANSIBALLZ_COVERAGE_CONFIG}
+ vars:
+ - {name: _ansible_ansiballz_coverage_config}
+ version_added: '2.19'
+_ANSIBALLZ_DEBUGGER_CONFIG:
+ name: Configure the AnsiballZ remote debugging extension
+ description:
+ - Enables and configures the AnsiballZ remote debugging extension.
+ - This is for internal use only.
+ env:
+ - {name: _ANSIBLE_ANSIBALLZ_DEBUGGER_CONFIG}
+ vars:
+ - {name: _ansible_ansiballz_debugger_config}
+ version_added: '2.19'
_ANSIBLE_CONNECTION_PATH:
env:
- name: _ANSIBLE_CONNECTION_PATH
@@ -9,6 +29,59 @@ _ANSIBLE_CONNECTION_PATH:
- For internal use only.
type: path
version_added: "2.18"
+_CALLBACK_DISPATCH_ERROR_BEHAVIOR:
+ name: Callback dispatch error behavior
+ default: warning
+ description:
+ - Action to take when a callback dispatch results in an error.
+ type: choices
+ choices: &basic_error
+ error: issue a 'fatal' error and stop the play
+ warning: issue a warning but continue
+ ignore: just continue silently
+ env: [ { name: _ANSIBLE_CALLBACK_DISPATCH_ERROR_BEHAVIOR } ]
+ version_added: '2.19'
+_MODULE_METADATA:
+ name: Enable experimental module metadata
+ description:
+ - Enables experimental module-level metadata controls for serialization profile selection.
+ - This is for internal use only.
+ type: boolean
+ default: false
+ env: [ { name: _ANSIBLE_MODULE_METADATA } ]
+ version_added: '2.19'
+ALLOW_BROKEN_CONDITIONALS:
+ # This config option will be deprecated once it no longer has any effect (2.23).
+ name: Allow broken conditionals
+ default: false
+ description:
+ - When enabled, this option allows conditionals with non-boolean results to be used.
+ - A deprecation warning will be emitted in these cases.
+ - By default, non-boolean conditionals result in an error.
+ - Such results often indicate unintentional use of templates where they are not supported, resulting in a conditional that is always true.
+ - When this option is enabled, conditional expressions which are a literal ``None`` or empty string will evaluate as true for backwards compatibility.
+ env: [{name: ANSIBLE_ALLOW_BROKEN_CONDITIONALS}]
+ ini:
+ - {key: allow_broken_conditionals, section: defaults}
+ type: boolean
+ version_added: "2.19"
+ALLOW_EMBEDDED_TEMPLATES:
+ name: Allow embedded templates
+ default: true
+ description:
+ - When enabled, this option allows embedded templates to be used for specific backward compatibility scenarios.
+ - A deprecation warning will be emitted in these cases.
+ - First, conditionals (for example, ``failed_when``, ``until``, ``assert.that``) fully enclosed in template delimiters.
+ - "Second, string constants in conditionals (for example, ``when: some_var == '{{ some_other_var }}'``)."
+ - Finally, positional arguments to lookups (for example, ``lookup('pipe', 'echo {{ some_var }}')``).
+ - This feature is deprecated, since embedded templates are unnecessary in these cases.
+ - When disabled, use of embedded templates will result in an error.
+ - A future release will disable this feature by default.
+ env: [{name: ANSIBLE_ALLOW_EMBEDDED_TEMPLATES}]
+ ini:
+ - {key: allow_embedded_templates, section: defaults}
+ type: boolean
+ version_added: "2.19"
ANSIBLE_HOME:
name: The Ansible home path
description:
@@ -160,33 +233,33 @@ AGNOSTIC_BECOME_PROMPT:
yaml: {key: privilege_escalation.agnostic_become_prompt}
version_added: "2.5"
CACHE_PLUGIN:
- name: Persistent Cache plugin
+ name: Persistent Fact Cache plugin
default: memory
- description: Chooses which cache plugin to use, the default 'memory' is ephemeral.
+ description: Chooses which fact cache plugin to use. By default, no cache is used and facts do not persist between runs.
env: [{name: ANSIBLE_CACHE_PLUGIN}]
ini:
- {key: fact_caching, section: defaults}
yaml: {key: facts.cache.plugin}
CACHE_PLUGIN_CONNECTION:
- name: Cache Plugin URI
+ name: Fact Cache Plugin URI
default: ~
- description: Defines connection or path information for the cache plugin.
+ description: Defines connection or path information for the fact cache plugin.
env: [{name: ANSIBLE_CACHE_PLUGIN_CONNECTION}]
ini:
- {key: fact_caching_connection, section: defaults}
yaml: {key: facts.cache.uri}
CACHE_PLUGIN_PREFIX:
- name: Cache Plugin table prefix
+ name: Fact Cache Plugin table prefix
default: ansible_facts
- description: Prefix to use for cache plugin files/tables.
+ description: Prefix to use for fact cache plugin files/tables.
env: [{name: ANSIBLE_CACHE_PLUGIN_PREFIX}]
ini:
- {key: fact_caching_prefix, section: defaults}
yaml: {key: facts.cache.prefix}
CACHE_PLUGIN_TIMEOUT:
- name: Cache Plugin expiration timeout
+ name: Fact Cache Plugin expiration timeout
default: 86400
- description: Expiration timeout for the cache plugin data.
+ description: Expiration timeout for the fact cache plugin data.
env: [{name: ANSIBLE_CACHE_PLUGIN_TIMEOUT}]
ini:
- {key: fact_caching_timeout, section: defaults}
@@ -212,18 +285,9 @@ COLLECTIONS_PATHS:
default: '{{ ANSIBLE_HOME ~ "/collections:/usr/share/ansible/collections" }}'
type: pathspec
env:
- - name: ANSIBLE_COLLECTIONS_PATHS
- deprecated:
- why: does not fit var naming standard, use the singular form ANSIBLE_COLLECTIONS_PATH instead
- version: "2.19"
- name: ANSIBLE_COLLECTIONS_PATH
version_added: '2.10'
ini:
- - key: collections_paths
- section: defaults
- deprecated:
- why: does not fit var naming standard, use the singular form collections_path instead
- version: "2.19"
- key: collections_path
section: defaults
version_added: '2.10'
@@ -233,10 +297,7 @@ COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH:
- When a collection is loaded that does not support the running Ansible version (with the collection metadata key `requires_ansible`).
env: [{name: ANSIBLE_COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH}]
ini: [{key: collections_on_ansible_version_mismatch, section: defaults}]
- choices: &basic_error
- error: issue a 'fatal' error and stop the play
- warning: issue a warning but continue
- ignore: just continue silently
+ choices: *basic_error
default: warning
COLOR_CHANGED:
name: Color for 'changed' task status
@@ -505,6 +566,10 @@ DEFAULT_ALLOW_UNSAFE_LOOKUPS:
- {key: allow_unsafe_lookups, section: defaults}
type: boolean
version_added: "2.2.3"
+ deprecated:
+ why: This option is no longer used in the Ansible Core code base.
+ version: "2.23"
+ alternatives: Lookup plugins are responsible for tagging strings containing templates to allow evaluation as a template.
DEFAULT_ASK_PASS:
name: Ask for the login password
default: False
@@ -721,7 +786,7 @@ DEFAULT_HASH_BEHAVIOUR:
- {key: hash_behaviour, section: defaults}
DEFAULT_HOST_LIST:
name: Inventory Source
- default: /etc/ansible/hosts
+ default: [/etc/ansible/hosts]
description: Comma-separated list of Ansible inventory sources
env:
- name: ANSIBLE_INVENTORY
@@ -764,15 +829,20 @@ DEFAULT_INVENTORY_PLUGIN_PATH:
DEFAULT_JINJA2_EXTENSIONS:
name: Enabled Jinja2 extensions
default: []
+ type: list
description:
- This is a developer-specific feature that allows enabling additional Jinja2 extensions.
- "See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)"
env: [{name: ANSIBLE_JINJA2_EXTENSIONS}]
ini:
- {key: jinja2_extensions, section: defaults}
+ deprecated:
+ why: Jinja2 extensions have been deprecated
+ version: "2.23"
+ alternatives: Ansible-supported Jinja plugins (tests, filters, lookups)
DEFAULT_JINJA2_NATIVE:
name: Use Jinja2's NativeEnvironment for templating
- default: False
+ default: True
description: This option preserves variable types during template operations.
env: [{name: ANSIBLE_JINJA2_NATIVE}]
ini:
@@ -780,6 +850,10 @@ DEFAULT_JINJA2_NATIVE:
type: boolean
yaml: {key: jinja2_native}
version_added: 2.7
+ deprecated:
+ why: This option is no longer used in the Ansible Core code base.
+ version: "2.23"
+ alternatives: Jinja2 native mode is now the default and only option.
DEFAULT_KEEP_REMOTE_FILES:
name: Keep remote files
default: False
@@ -791,7 +865,6 @@ DEFAULT_KEEP_REMOTE_FILES:
- {key: keep_remote_files, section: defaults}
type: boolean
DEFAULT_LIBVIRT_LXC_NOSECLABEL:
- # TODO: move to plugin
name: No security label on Lxc
default: False
description:
@@ -803,6 +876,10 @@ DEFAULT_LIBVIRT_LXC_NOSECLABEL:
- {key: libvirt_lxc_noseclabel, section: selinux}
type: boolean
version_added: "2.1"
+ deprecated:
+ why: This option was moved to the plugin itself
+ version: "2.22"
+ alternatives: Use the option from the plugin itself.
DEFAULT_LOAD_CALLBACK_PLUGINS:
name: Load callbacks for adhoc
default: False
@@ -858,6 +935,10 @@ DEFAULT_MANAGED_STR:
ini:
- {key: ansible_managed, section: defaults}
yaml: {key: defaults.ansible_managed}
+ deprecated:
+ why: The `ansible_managed` variable can be set just like any other variable, or a different variable can be used.
+ version: "2.23"
+ alternatives: Set the `ansible_managed` variable, or use any custom variable in templates.
DEFAULT_MODULE_ARGS:
name: Adhoc default arguments
default: ~
@@ -936,6 +1017,10 @@ DEFAULT_NULL_REPRESENTATION:
ini:
- {key: null_representation, section: defaults}
type: raw
+ deprecated:
+ why: This option is no longer used in the Ansible Core code base.
+ version: "2.23"
+ alternatives: There is no alternative at the moment. A different mechanism would have to be implemented in the current code base.
DEFAULT_POLL_INTERVAL:
name: Async poll interval
default: 15
@@ -1002,7 +1087,7 @@ DEFAULT_ROLES_PATH:
yaml: {key: defaults.roles_path}
DEFAULT_SELINUX_SPECIAL_FS:
name: Problematic file systems
- default: fuse, nfs, vboxsf, ramfs, 9p, vfat
+ default: [fuse, nfs, vboxsf, ramfs, 9p, vfat]
description:
- "Some filesystems do not support safe operations and/or return inconsistent errors,
this setting makes Ansible 'tolerate' those in the list without causing fatal errors."
@@ -1135,6 +1220,10 @@ DEFAULT_UNDEFINED_VAR_BEHAVIOR:
ini:
- {key: error_on_undefined_vars, section: defaults}
type: boolean
+ deprecated:
+ why: This option is no longer used in the Ansible Core code base.
+ version: "2.23"
+ alternatives: There is no alternative at the moment. A different mechanism would have to be implemented in the current code base.
DEFAULT_VARS_PLUGIN_PATH:
name: Vars Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/vars:/usr/share/ansible/plugins/vars" }}'
@@ -1143,15 +1232,6 @@ DEFAULT_VARS_PLUGIN_PATH:
ini:
- {key: vars_plugins, section: defaults}
type: pathspec
-# TODO: unused?
-#DEFAULT_VAR_COMPRESSION_LEVEL:
-# default: 0
-# description: 'TODO: write it'
-# env: [{name: ANSIBLE_VAR_COMPRESSION_LEVEL}]
-# ini:
-# - {key: var_compression_level, section: defaults}
-# type: integer
-# yaml: {key: defaults.var_compression_level}
DEFAULT_VAULT_ID_MATCH:
name: Force vault id match
default: False
@@ -1219,6 +1299,9 @@ DEPRECATION_WARNINGS:
ini:
- {key: deprecation_warnings, section: defaults}
type: boolean
+ vars:
+ - name: ansible_deprecation_warnings
+ version_added: '2.19'
DEVEL_WARNING:
name: Running devel warning
default: True
@@ -1272,6 +1355,23 @@ DISPLAY_SKIPPED_HOSTS:
ini:
- {key: display_skipped_hosts, section: defaults}
type: boolean
+DISPLAY_TRACEBACK:
+ name: Control traceback display
+ default: [never]
+ description: When to include tracebacks in extended error messages
+ env:
+ - name: ANSIBLE_DISPLAY_TRACEBACK
+ ini:
+ - {key: display_traceback, section: defaults}
+ type: list
+ choices:
+ - error
+ - warning
+ - deprecated
+ - deprecated_value
+ - always
+ - never
+ version_added: "2.19"
DOCSITE_ROOT_URL:
name: Root docsite URL
default: https://docs.ansible.com/ansible-core/
@@ -1405,15 +1505,6 @@ GALAXY_COLLECTIONS_PATH_WARNING:
ini:
- {key: collections_path_warning, section: galaxy}
version_added: "2.16"
-# TODO: unused?
-#GALAXY_SCMS:
-# name: Galaxy SCMS
-# default: git, hg
-# description: Available galaxy source control management systems.
-# env: [{name: ANSIBLE_GALAXY_SCMS}]
-# ini:
-# - {key: scms, section: galaxy}
-# type: list
GALAXY_SERVER:
default: https://galaxy.ansible.com
description: "URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source."
@@ -1579,21 +1670,12 @@ INTERPRETER_PYTHON:
description:
- Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode.
Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``.
- All discovery modes employ a lookup table to use the included system Python (on distributions known to include one),
- falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not
- available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters
- installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or
- ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backward-compatibility
- with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
-_INTERPRETER_PYTHON_DISTRO_MAP:
- name: Mapping of known included platform pythons for various Linux distros
- default:
- # Entry only for testing
- ansible test:
- '99': /usr/bin/python99
- version_added: "2.8"
- # FUTURE: add inventory override once we're sure it can't be abused by a rogue target
- # FUTURE: add a platform layer to the map so we could use for, eg, freebsd/macos/etc?
+ All discovery modes match against an ordered list of well-known Python interpreter locations.
+ The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters
+ installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent``.
+ The ``auto_legacy`` modes are deprecated and behave the same as their respective ``auto`` modes.
+ They exist for backward-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python3``,
+ which will use that interpreter if present.
INTERPRETER_PYTHON_FALLBACK:
name: Ordered list of Python interpreters to check for in discovery
default:
@@ -1665,7 +1747,7 @@ INVENTORY_EXPORT:
type: bool
INVENTORY_IGNORE_EXTS:
name: Inventory ignore extensions
- default: "{{(REJECT_EXTS + ('.orig', '.cfg', '.retry'))}}"
+ default: "{{ REJECT_EXTS + ['.orig', '.cfg', '.retry'] }}"
description: List of extensions to ignore when using a directory as an inventory source.
env: [{name: ANSIBLE_INVENTORY_IGNORE}]
ini:
@@ -1722,7 +1804,7 @@ INJECT_FACTS_AS_VARS:
version_added: "2.5"
MODULE_IGNORE_EXTS:
name: Module ignore extensions
- default: "{{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}"
+ default: "{{ REJECT_EXTS + ['.yaml', '.yml', '.ini'] }}"
description:
- List of extensions to ignore when looking for modules to load.
- This is for rejecting script and binary module fallback extensions.
@@ -1762,29 +1844,6 @@ PAGER:
- name: ANSIBLE_PAGER
version_added: '2.15'
- name: PAGER
-PARAMIKO_HOST_KEY_AUTO_ADD:
- default: False
- description: 'TODO: write it'
- env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
- ini:
- - {key: host_key_auto_add, section: paramiko_connection}
- type: boolean
- deprecated:
- why: This option was moved to the plugin itself
- version: "2.20"
- alternatives: Use the option from the plugin itself.
-PARAMIKO_LOOK_FOR_KEYS:
- name: look for keys
- default: True
- description: 'TODO: write it'
- env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
- ini:
- - {key: look_for_keys, section: paramiko_connection}
- type: boolean
- deprecated:
- why: This option was moved to the plugin itself
- version: "2.20"
- alternatives: Use the option from the plugin itself.
PERSISTENT_CONTROL_PATH_DIR:
name: Persistence socket path
default: '{{ ANSIBLE_HOME ~ "/pc" }}'
@@ -1903,6 +1962,32 @@ SHOW_CUSTOM_STATS:
ini:
- {key: show_custom_stats, section: defaults}
type: bool
+SSH_AGENT:
+ name: Manage an SSH Agent
+ description: Manage an SSH Agent via Ansible. A configuration of ``none`` will not interact with an agent,
+ ``auto`` will start and destroy an agent via ``ssh-agent`` binary during the run, and a path
+ to an SSH_AUTH_SOCK will allow interaction with a pre-existing agent.
+ default: none
+ type: string
+ env: [{name: ANSIBLE_SSH_AGENT}]
+ ini: [{key: ssh_agent, section: connection}]
+ version_added: '2.19'
+SSH_AGENT_EXECUTABLE:
+ name: Executable to start for the ansible-managed SSH agent
+ description: When ``SSH_AGENT`` is ``auto``, the path or name of the ssh agent executable to start.
+ default: ssh-agent
+ type: str
+ env: [ { name: ANSIBLE_SSH_AGENT_EXECUTABLE } ]
+ ini: [ { key: ssh_agent_executable, section: connection } ]
+ version_added: '2.19'
+SSH_AGENT_KEY_LIFETIME:
+ name: Set a maximum lifetime when adding identities to an agent
+ description: For keys inserted into an agent defined by ``SSH_AGENT``, define a lifetime, in seconds, that the key may remain
+ in the agent.
+ type: int
+ env: [{name: ANSIBLE_SSH_AGENT_KEY_LIFETIME}]
+ ini: [{key: ssh_agent_key_lifetime, section: connection}]
+ version_added: '2.19'
STRING_TYPE_FILTERS:
name: Filters to preserve strings
default: [string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json]
@@ -1913,6 +1998,10 @@ STRING_TYPE_FILTERS:
ini:
- {key: dont_type_filters, section: jinja2}
type: list
+ deprecated:
+ why: This option has no effect.
+ version: "2.23"
+ alternatives: None; native types returned from filters are always preserved.
SYSTEM_WARNINGS:
name: System warnings
default: True
@@ -1965,6 +2054,52 @@ TASK_TIMEOUT:
- {key: task_timeout, section: defaults}
type: integer
version_added: '2.10'
+_TEMPLAR_SANDBOX_MODE:
+ name: Control Jinja template sandbox behavior
+ default: default
+ description:
+ - The default Jinja sandbox behavior blocks template access to all `_` prefixed object attributes and known collection mutation methods (e.g., `dict.clear()`, `list.append()`).
+ type: choices
+ choices:
+ - default
+ - allow_unsafe_attributes
+ env: [{name: _ANSIBLE_TEMPLAR_SANDBOX_MODE}]
+ deprecated:
+ why: controlling sandbox behavior is a temporary workaround
+ version: '2.23'
+_TEMPLAR_UNKNOWN_TYPE_CONVERSION:
+ name: Templar unknown type conversion behavior
+ default: warning
+ description:
+ - Action to take when an unknown type is converted for variable storage during template finalization.
+ - This setting has no effect on the inability to store unsupported variable types as the result of templating.
+ - Experimental diagnostic feature, subject to change.
+ type: choices
+ choices: *basic_error
+ env: [{name: _ANSIBLE_TEMPLAR_UNKNOWN_TYPE_CONVERSION}]
+ version_added: '2.19'
+_TEMPLAR_UNKNOWN_TYPE_ENCOUNTERED:
+ name: Templar unknown type encountered behavior
+ default: ignore
+ description:
+ - Action to take when an unknown type is encountered inside a template pipeline.
+ - Experimental diagnostic feature, subject to change.
+ type: choices
+ choices: *basic_error
+ env: [{name: _ANSIBLE_TEMPLAR_UNKNOWN_TYPE_ENCOUNTERED}]
+ version_added: '2.19'
+_TEMPLAR_UNTRUSTED_TEMPLATE_BEHAVIOR:
+ name: Templar untrusted template behavior
+ default: ignore
+ description:
+ - Action to take when processing of an untrusted template is skipped.
+ - For `ignore` or `warn`, the input template string is returned as-is.
+ - This setting has no effect on expressions.
+ - Experimental diagnostic feature, subject to change.
+ type: choices
+ choices: *basic_error
+ env: [{name: _ANSIBLE_TEMPLAR_UNTRUSTED_TEMPLATE_BEHAVIOR}]
+ version_added: '2.19'
WORKER_SHUTDOWN_POLL_COUNT:
name: Worker Shutdown Poll Count
default: 0
@@ -2048,25 +2183,6 @@ NETCONF_SSH_CONFIG:
- {key: ssh_config, section: netconf_connection}
yaml: {key: netconf_connection.ssh_config}
default: null
-STRING_CONVERSION_ACTION:
- version_added: '2.8'
- description:
- - Action to take when a module parameter value is converted to a string (this does not affect variables).
- For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc.
- will be converted by the YAML parser unless fully quoted.
- - Valid options are 'error', 'warn', and 'ignore'.
- - Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
- default: 'warn'
- env:
- - name: ANSIBLE_STRING_CONVERSION_ACTION
- ini:
- - section: defaults
- key: string_conversion_action
- type: string
- deprecated:
- why: This option is no longer used in the Ansible Core code base.
- version: "2.19"
- alternatives: There is no alternative at the moment. A different mechanism would have to be implemented in the current code base.
VALIDATE_ACTION_GROUP_METADATA:
version_added: '2.12'
description:
diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py
index f71613bca62..c4b0ffbc362 100644
--- a/lib/ansible/config/manager.py
+++ b/lib/ansible/config/manager.py
@@ -6,29 +6,28 @@ from __future__ import annotations
import atexit
import decimal
import configparser
+import functools
import os
import os.path
import sys
import stat
import tempfile
+import typing as t
-from collections import namedtuple
from collections.abc import Mapping, Sequence
from jinja2.nativetypes import NativeEnvironment
-from ansible.errors import AnsibleOptionsError, AnsibleError, AnsibleRequiredOptionError
+from ansible._internal._datatag import _tags
+from ansible.errors import AnsibleOptionsError, AnsibleError, AnsibleUndefinedConfigEntry, AnsibleRequiredOptionError
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
from ansible.module_utils.common.yaml import yaml_load
-from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.quoting import unquote
-from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.utils.path import cleanup_tmp_file, makedirs_safe, unfrackpath
-Setting = namedtuple('Setting', 'name value origin type')
-
INTERNAL_DEFS = {'lookup': ('_terms',)}
GALAXY_SERVER_DEF = [
@@ -40,6 +39,7 @@ GALAXY_SERVER_DEF = [
('api_version', False, 'int'),
('validate_certs', False, 'bool'),
('client_id', False, 'str'),
+ ('client_secret', False, 'str'),
('timeout', False, 'int'),
]
@@ -52,144 +52,180 @@ GALAXY_SERVER_ADDITIONAL = {
}
-def _get_entry(plugin_type, plugin_name, config):
- """ construct entry for requested config """
- entry = ''
+@t.runtime_checkable
+class _EncryptedStringProtocol(t.Protocol):
+ """Protocol representing an `EncryptedString`, since it cannot be imported here."""
+ # DTFIX-FUTURE: collapse this with the one in collection loader, once we can
+
+ def _decrypt(self) -> str: ...
+
+
+def _get_config_label(plugin_type: str, plugin_name: str, config: str) -> str:
+ """Return a label for the given config."""
+ entry = f'{config!r}'
+
if plugin_type:
- entry += 'plugin_type: %s ' % plugin_type
+ entry += ' for'
+
if plugin_name:
- entry += 'plugin: %s ' % plugin_name
- entry += 'setting: %s ' % config
+ entry += f' {plugin_name!r}'
+
+ entry += f' {plugin_type} plugin'
+
return entry
-# FIXME: see if we can unify in module_utils with similar function used by argspec
-def ensure_type(value, value_type, origin=None, origin_ftype=None):
- """ return a configuration variable with casting
- :arg value: The value to ensure correct typing of
- :kwarg value_type: The type of the value. This can be any of the following strings:
- :boolean: sets the value to a True or False value
- :bool: Same as 'boolean'
- :integer: Sets the value to an integer or raises a ValueType error
- :int: Same as 'integer'
- :float: Sets the value to a float or raises a ValueType error
- :list: Treats the value as a comma separated list. Split the value
- and return it as a python list.
- :none: Sets the value to None
- :path: Expands any environment variables and tilde's in the value.
- :tmppath: Create a unique temporary directory inside of the directory
- specified by value and return its path.
- :temppath: Same as 'tmppath'
- :tmp: Same as 'tmppath'
- :pathlist: Treat the value as a typical PATH string. (On POSIX, this
- means comma separated strings.) Split the value and then expand
- each part for environment variables and tildes.
- :pathspec: Treat the value as a PATH string. Expands any environment variables
- tildes's in the value.
- :str: Sets the value to string types.
- :string: Same as 'str'
+def ensure_type(value: object, value_type: str | None, origin: str | None = None, origin_ftype: str | None = None) -> t.Any:
+ """
+ Converts `value` to the requested `value_type`; raises `ValueError` for failed conversions.
+
+ Values for `value_type` are:
+
+ * boolean/bool: Return a `bool` by applying non-strict `bool` filter rules:
+ 'y', 'yes', 'on', '1', 'true', 't', 1, 1.0, True return True, any other value is False.
+ * integer/int: Return an `int`. Accepts any `str` parseable by `int` or numeric value with a zero mantissa (including `bool`).
+ * float: Return a `float`. Accepts any `str` parseable by `float` or numeric value (including `bool`).
+ * list: Return a `list`. Accepts `list` or `Sequence`. Also accepts, `str`, splitting on ',' while stripping whitespace and unquoting items.
+ * none: Return `None`. Accepts only the string "None".
+ * path: Return a resolved path. Accepts `str`.
+ * temppath/tmppath/tmp: Return a unique temporary directory inside the resolved path specified by the value.
+ * pathspec: Return a `list` of resolved paths. Accepts a `list` or `Sequence`. Also accepts `str`, splitting on ':'.
+ * pathlist: Return a `list` of resolved paths. Accepts a `list` or `Sequence`. Also accepts `str`, splitting on `,` while stripping whitespace from paths.
+ * dictionary/dict: Return a `dict`. Accepts `dict` or `Mapping`.
+ * string/str: Return a `str`. Accepts `bool`, `int`, `float`, `complex` or `str`.
+
+ Path resolution ensures paths are `str` with expansion of '{{CWD}}', environment variables and '~'.
+ Non-absolute paths are expanded relative to the basedir from `origin`, if specified.
+
+ No conversion is performed if `value_type` is unknown or `value` is `None`.
+ When `origin_ftype` is "ini", a `str` result will be unquoted.
"""
- errmsg = ''
- basedir = None
- if origin and os.path.isabs(origin) and os.path.exists(to_bytes(origin)):
- basedir = origin
+ if value is None:
+ return None
+
+ original_value = value
+ copy_tags = value_type not in ('temppath', 'tmppath', 'tmp')
+
+ value = _ensure_type(value, value_type, origin)
+
+ if copy_tags and value is not original_value:
+ if isinstance(value, list):
+ value = [AnsibleTagHelper.tag_copy(original_value, item) for item in value]
+
+ value = AnsibleTagHelper.tag_copy(original_value, value)
+
+ if isinstance(value, str) and origin_ftype and origin_ftype == 'ini':
+ value = unquote(value)
+
+ return value
+
+
+def _ensure_type(value: object, value_type: str | None, origin: str | None = None) -> t.Any:
+ """Internal implementation for `ensure_type`, call that function instead."""
+ original_value = value
+ basedir = origin if origin and os.path.isabs(origin) and os.path.exists(to_bytes(origin)) else None
if value_type:
value_type = value_type.lower()
- if value is not None:
- if value_type in ('boolean', 'bool'):
- value = boolean(value, strict=False)
+ match value_type:
+ case 'boolean' | 'bool':
+ return boolean(value, strict=False)
+
+ case 'integer' | 'int':
+ if isinstance(value, int): # handle both int and bool (which is an int)
+ return int(value)
- elif value_type in ('integer', 'int'):
- if not isinstance(value, int):
+ if isinstance(value, (float, str)):
try:
+ # use Decimal for all other source type conversions; non-zero mantissa is a failure
if (decimal_value := decimal.Decimal(value)) == (int_part := int(decimal_value)):
- value = int_part
- else:
- errmsg = 'int'
- except decimal.DecimalException as e:
- raise ValueError from e
+ return int_part
+ except (decimal.DecimalException, ValueError):
+ pass
- elif value_type == 'float':
- if not isinstance(value, float):
- value = float(value)
+ case 'float':
+ if isinstance(value, float):
+ return value
- elif value_type == 'list':
- if isinstance(value, string_types):
- value = [unquote(x.strip()) for x in value.split(',')]
- elif not isinstance(value, Sequence):
- errmsg = 'list'
+ if isinstance(value, (int, str)):
+ try:
+ return float(value)
+ except ValueError:
+ pass
+
+ case 'list':
+ if isinstance(value, list):
+ return value
+
+ if isinstance(value, str):
+ return [unquote(x.strip()) for x in value.split(',')]
- elif value_type == 'none':
+ if isinstance(value, Sequence) and not isinstance(value, bytes):
+ return list(value)
+
+ case 'none':
if value == "None":
- value = None
+ return None
- if value is not None:
- errmsg = 'None'
+ case 'path':
+ if isinstance(value, str):
+ return resolve_path(value, basedir=basedir)
- elif value_type == 'path':
- if isinstance(value, string_types):
+ case 'temppath' | 'tmppath' | 'tmp':
+ if isinstance(value, str):
value = resolve_path(value, basedir=basedir)
- else:
- errmsg = 'path'
- elif value_type in ('tmp', 'temppath', 'tmppath'):
- if isinstance(value, string_types):
- value = resolve_path(value, basedir=basedir)
if not os.path.exists(value):
makedirs_safe(value, 0o700)
+
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
atexit.register(cleanup_tmp_file, value, warn=True)
- else:
- errmsg = 'temppath'
- elif value_type == 'pathspec':
- if isinstance(value, string_types):
+ return value
+
+ case 'pathspec':
+ if isinstance(value, str):
value = value.split(os.pathsep)
- if isinstance(value, Sequence):
- value = [resolve_path(x, basedir=basedir) for x in value]
- else:
- errmsg = 'pathspec'
+ if isinstance(value, Sequence) and not isinstance(value, bytes) and all(isinstance(x, str) for x in value):
+ return [resolve_path(x, basedir=basedir) for x in value]
- elif value_type == 'pathlist':
- if isinstance(value, string_types):
+ case 'pathlist':
+ if isinstance(value, str):
value = [x.strip() for x in value.split(',')]
- if isinstance(value, Sequence):
- value = [resolve_path(x, basedir=basedir) for x in value]
- else:
- errmsg = 'pathlist'
+ if isinstance(value, Sequence) and not isinstance(value, bytes) and all(isinstance(x, str) for x in value):
+ return [resolve_path(x, basedir=basedir) for x in value]
- elif value_type in ('dict', 'dictionary'):
- if not isinstance(value, Mapping):
- errmsg = 'dictionary'
+ case 'dictionary' | 'dict':
+ if isinstance(value, dict):
+ return value
- elif value_type in ('str', 'string'):
- if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode, bool, int, float, complex)):
- value = to_text(value, errors='surrogate_or_strict')
- if origin_ftype and origin_ftype == 'ini':
- value = unquote(value)
- else:
- errmsg = 'string'
+ if isinstance(value, Mapping):
+ return dict(value)
+
+ case 'string' | 'str':
+ if isinstance(value, str):
+ return value
- # defaults to string type
- elif isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)):
- value = to_text(value, errors='surrogate_or_strict')
- if origin_ftype and origin_ftype == 'ini':
- value = unquote(value)
+ if isinstance(value, (bool, int, float, complex)):
+ return str(value)
- if errmsg:
- raise ValueError(f'Invalid type provided for "{errmsg}": {value!r}')
+ if isinstance(value, _EncryptedStringProtocol):
+ return value._decrypt()
- return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
+ case _:
+ # FIXME: define and document a pass-through value_type (None, 'raw', 'object', '', ...) and then deprecate acceptance of unknown types
+ return value # return non-str values of unknown value_type as-is
+
+ raise ValueError(f'Invalid value provided for {value_type!r}: {original_value!r}')
# FIXME: see if this can live in utils/path
-def resolve_path(path, basedir=None):
+def resolve_path(path: str, basedir: str | None = None) -> str:
""" resolve relative or 'variable' paths """
if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}}
path = path.replace('{{CWD}}', os.getcwd())
@@ -213,18 +249,6 @@ def get_config_type(cfile):
return ftype
-# FIXME: can move to module_utils for use for ini plugins also?
-def get_ini_config_value(p, entry):
- """ returns the value of last ini entry found """
- value = None
- if p is not None:
- try:
- value = p.get(entry.get('section', 'defaults'), entry.get('key', ''), raw=True)
- except Exception: # FIXME: actually report issues here
- pass
- return value
-
-
def find_ini_config_file(warnings=None):
""" Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible """
# FIXME: eventually deprecate ini configs
@@ -302,12 +326,15 @@ def _add_base_defs_deprecations(base_defs):
process(entry)
-class ConfigManager(object):
+class ConfigManager:
DEPRECATED = [] # type: list[tuple[str, dict[str, str]]]
WARNINGS = set() # type: set[str]
+ _errors: list[tuple[str, Exception]]
+
def __init__(self, conf_file=None, defs_file=None):
+ self._get_ini_config_value = functools.cache(self._get_ini_config_value)
self._base_defs = {}
self._plugins = {}
@@ -327,6 +354,9 @@ class ConfigManager(object):
# initialize parser and read config
self._parse_config_file()
+ self._errors = []
+ """Deferred errors that will be turned into warnings."""
+
# ensure we always have config def entry
self._base_defs['CONFIG_FILE'] = {'default': None, 'type': 'path'}
@@ -366,15 +396,16 @@ class ConfigManager(object):
defs = dict((k, server_config_def(server_key, k, req, value_type)) for k, req, value_type in GALAXY_SERVER_DEF)
self.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
- def template_default(self, value, variables):
- if isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')) and variables is not None:
+ def template_default(self, value, variables, key_name: str = ''):
+ if isinstance(value, str) and (value.startswith('{{') and value.endswith('}}')) and variables is not None:
# template default values if possible
# NOTE: cannot use is_template due to circular dep
try:
- t = NativeEnvironment().from_string(value)
- value = t.render(variables)
- except Exception:
- pass # not templatable
+ # FIXME: This really should be using an immutable sandboxed native environment, not just native environment
+ template = NativeEnvironment().from_string(value)
+ value = template.render(variables)
+ except Exception as ex:
+ self._errors.append((f'Failed to template default for config {key_name}.', ex))
return value
def _read_config_yaml_file(self, yml_file):
@@ -477,9 +508,9 @@ class ConfigManager(object):
else:
ret = self._plugins.get(plugin_type, {}).get(name, {})
- if ignore_private:
+ if ignore_private: # ignore 'test' config entries, they should not change runtime behaviors
for cdef in list(ret.keys()):
- if cdef.startswith('_'):
+ if cdef.startswith('_Z_'):
del ret[cdef]
return ret
@@ -496,10 +527,6 @@ class ConfigManager(object):
self.WARNINGS.add(u'value for config entry {0} contains invalid characters, ignoring...'.format(to_text(name)))
continue
if temp_value is not None: # only set if entry is defined in container
- # inline vault variables should be converted to a text string
- if isinstance(temp_value, AnsibleVaultEncryptedUnicode):
- temp_value = to_text(temp_value, errors='surrogate_or_strict')
-
value = temp_value
origin = name
@@ -517,10 +544,14 @@ class ConfigManager(object):
keys=keys, variables=variables, direct=direct)
except AnsibleError:
raise
- except Exception as e:
- raise AnsibleError("Unhandled exception when retrieving %s:\n%s" % (config, to_native(e)), orig_exc=e)
+ except Exception as ex:
+ raise AnsibleError(f"Unhandled exception when retrieving {config!r}.") from ex
return value
+ def get_config_default(self, config: str, plugin_type: str | None = None, plugin_name: str | None = None) -> t.Any:
+ """Return the default value for the specified configuration."""
+ return self.get_configuration_definitions(plugin_type, plugin_name)[config]['default']
+
def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
""" Given a config key figure out the actual value and report on the origin of the settings """
if cfile is None:
@@ -587,6 +618,7 @@ class ConfigManager(object):
# env vars are next precedence
if value is None and defs[config].get('env'):
value, origin = self._loop_entries(os.environ, defs[config]['env'])
+ value = _tags.TrustedAsTemplate().tag(value)
origin = 'env: %s' % origin
# try config file entries next, if we have one
@@ -601,7 +633,7 @@ class ConfigManager(object):
for entry in defs[config][ftype]:
# load from config
if ftype == 'ini':
- temp_value = get_ini_config_value(self._parsers[cfile], entry)
+ temp_value = self._get_ini_config_value(cfile, entry.get('section', 'defaults'), entry['key'])
elif ftype == 'yaml':
raise AnsibleError('YAML configuration type has not been implemented yet')
else:
@@ -625,22 +657,21 @@ class ConfigManager(object):
if value is None:
if defs[config].get('required', False):
if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}):
- raise AnsibleRequiredOptionError("No setting was provided for required configuration %s" %
- to_native(_get_entry(plugin_type, plugin_name, config)))
+ raise AnsibleRequiredOptionError(f"Required config {_get_config_label(plugin_type, plugin_name, config)} not provided.")
else:
origin = 'default'
- value = self.template_default(defs[config].get('default'), variables)
+ value = self.template_default(defs[config].get('default'), variables, key_name=_get_config_label(plugin_type, plugin_name, config))
+
try:
# ensure correct type, can raise exceptions on mismatched types
value = ensure_type(value, defs[config].get('type'), origin=origin, origin_ftype=origin_ftype)
- except ValueError as e:
+ except ValueError as ex:
if origin.startswith('env:') and value == '':
# this is empty env var for non string so we can set to default
origin = 'default'
value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin, origin_ftype=origin_ftype)
else:
- raise AnsibleOptionsError('Invalid type for configuration option %s (from %s): %s' %
- (to_native(_get_entry(plugin_type, plugin_name, config)).strip(), origin, to_native(e)))
+ raise AnsibleOptionsError(f'Config {_get_config_label(plugin_type, plugin_name, config)} from {origin!r} has an invalid value.') from ex
# deal with restricted values
if value is not None and 'choices' in defs[config] and defs[config]['choices'] is not None:
@@ -656,21 +687,24 @@ class ConfigManager(object):
if isinstance(defs[config]['choices'], Mapping):
valid = ', '.join([to_text(k) for k in defs[config]['choices'].keys()])
- elif isinstance(defs[config]['choices'], string_types):
+ elif isinstance(defs[config]['choices'], str):
valid = defs[config]['choices']
elif isinstance(defs[config]['choices'], Sequence):
valid = ', '.join([to_text(c) for c in defs[config]['choices']])
else:
valid = defs[config]['choices']
- raise AnsibleOptionsError('Invalid value "%s" for configuration option "%s", valid values are: %s' %
- (value, to_native(_get_entry(plugin_type, plugin_name, config)), valid))
+ raise AnsibleOptionsError(f'Invalid value {value!r} for config {_get_config_label(plugin_type, plugin_name, config)}.',
+ help_text=f'Valid values are: {valid}')
# deal with deprecation of the setting
if 'deprecated' in defs[config] and origin != 'default':
self.DEPRECATED.append((config, defs[config].get('deprecated')))
else:
- raise AnsibleError('Requested entry (%s) was not defined in configuration.' % to_native(_get_entry(plugin_type, plugin_name, config)))
+ raise AnsibleUndefinedConfigEntry(f'No config definition exists for {_get_config_label(plugin_type, plugin_name, config)}.')
+
+ if not _tags.Origin.is_tagged_on(value):
+ value = _tags.Origin(description=f'').tag(value)
return value, origin
@@ -681,13 +715,41 @@ class ConfigManager(object):
self._plugins[plugin_type][name] = defs
+ def _get_ini_config_value(self, config_file: str, section: str, option: str) -> t.Any:
+ """
+ Fetch `option` from the specified `section`.
+ Returns `None` if the specified `section` or `option` are not present.
+ Origin and TrustedAsTemplate tags are applied to returned values.
+
+ CAUTION: Although INI sourced configuration values are trusted for templating, that does not automatically mean they will be templated.
+ It is up to the code consuming configuration values to apply templating if required.
+ """
+ parser = self._parsers[config_file]
+ value = parser.get(section, option, raw=True, fallback=None)
+
+ if value is not None:
+ value = self._apply_tags(value, section, option)
+
+ return value
+
+ def _apply_tags(self, value: str, section: str, option: str) -> t.Any:
+ """Apply origin and trust to the given `value` sourced from the stated `section` and `option`."""
+ description = f'section {section!r} option {option!r}'
+ origin = _tags.Origin(path=self._config_file, description=description)
+ tags = [origin, _tags.TrustedAsTemplate()]
+ value = AnsibleTagHelper.tag(value, tags)
+
+ return value
+
@staticmethod
- def get_deprecated_msg_from_config(dep_docs, include_removal=False):
+ def get_deprecated_msg_from_config(dep_docs, include_removal=False, collection_name=None):
removal = ''
if include_removal:
if 'removed_at_date' in dep_docs:
removal = f"Will be removed in a release after {dep_docs['removed_at_date']}\n\t"
+ elif collection_name:
+ removal = f"Will be removed in: {collection_name} {dep_docs['removed_in']}\n\t"
else:
removal = f"Will be removed in: Ansible {dep_docs['removed_in']}\n\t"
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index af60053a3dd..c2ce7e5ec9d 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -10,9 +10,7 @@ from string import ascii_letters, digits
from ansible.config.manager import ConfigManager
from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.common.collections import Sequence
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
-from ansible.release import __version__
from ansible.utils.fqcn import add_internal_fqcns
# initialize config manager/config data to read/store global settings
@@ -20,68 +18,11 @@ from ansible.utils.fqcn import add_internal_fqcns
config = ConfigManager()
-def _warning(msg):
- """ display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write """
- try:
- from ansible.utils.display import Display
- Display().warning(msg)
- except Exception:
- import sys
- sys.stderr.write(' [WARNING] %s\n' % (msg))
-
-
-def _deprecated(msg, version):
- """ display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write """
- try:
- from ansible.utils.display import Display
- Display().deprecated(msg, version=version)
- except Exception:
- import sys
- sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
-
-
-def handle_config_noise(display=None):
-
- if display is not None:
- w = display.warning
- d = display.deprecated
- else:
- w = _warning
- d = _deprecated
-
- while config.WARNINGS:
- warn = config.WARNINGS.pop()
- w(warn)
-
- while config.DEPRECATED:
- # tuple with name and options
- dep = config.DEPRECATED.pop(0)
- msg = config.get_deprecated_msg_from_config(dep[1])
- # use tabs only for ansible-doc?
- msg = msg.replace("\t", "")
- d(f"{dep[0]} option. {msg}", version=dep[1]['version'])
-
-
def set_constant(name, value, export=vars()):
""" sets constants and returns resolved options dict """
export[name] = value
-class _DeprecatedSequenceConstant(Sequence):
- def __init__(self, value, msg, version):
- self._value = value
- self._msg = msg
- self._version = version
-
- def __len__(self):
- _deprecated(self._msg, self._version)
- return len(self._value)
-
- def __getitem__(self, y):
- _deprecated(self._msg, self._version)
- return self._value[y]
-
-
# CONSTANTS ### yes, actual ones
# The following are hard-coded action names
@@ -119,7 +60,7 @@ COLOR_CODES = {
'magenta': u'0;35', 'bright magenta': u'1;35',
'normal': u'0',
}
-REJECT_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
+REJECT_EXTS = ['.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst'] # this is concatenated with other config settings as lists; cannot be tuple
BOOL_TRUE = BOOLEANS_TRUE
COLLECTION_PTYPE_COMPAT = {'module': 'modules'}
@@ -166,7 +107,6 @@ INTERNAL_STATIC_VARS = frozenset(
"inventory_hostname_short",
"groups",
"group_names",
- "omit",
"hostvars",
"playbook_dir",
"play_hosts",
@@ -246,6 +186,3 @@ MAGIC_VARIABLE_MAPPING = dict(
# POPULATE SETTINGS FROM CONFIG ###
for setting in config.get_configuration_definitions():
set_constant(setting, config.get_config_value(setting, variables=vars()))
-
-# emit any warnings or deprecations
-handle_config_noise()
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
index 31ee4bdf1da..c866434129e 100644
--- a/lib/ansible/errors/__init__.py
+++ b/lib/ansible/errors/__init__.py
@@ -1,38 +1,35 @@
# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
-import re
-import traceback
+import collections.abc as _c
+import enum
+import types
+import typing as t
-from collections.abc import Sequence
+from json import JSONDecodeError
-from ansible.errors.yaml_strings import (
- YAML_COMMON_DICT_ERROR,
- YAML_COMMON_LEADING_TAB_ERROR,
- YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR,
- YAML_COMMON_UNBALANCED_QUOTES_ERROR,
- YAML_COMMON_UNQUOTED_COLON_ERROR,
- YAML_COMMON_UNQUOTED_VARIABLE_ERROR,
- YAML_POSITION_DETAILS,
- YAML_AND_SHORTHAND_ERROR,
-)
-from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.text.converters import to_text
+from ..module_utils.datatag import native_type_name
+from ansible._internal._datatag import _tags
+from .._internal._errors import _error_utils
+from ansible.module_utils._internal import _text_utils
+
+if t.TYPE_CHECKING:
+ from ansible.plugins import loader as _t_loader
+
+
+class ExitCode(enum.IntEnum):
+ SUCCESS = 0 # used by TQM, must be bit-flag safe
+ GENERIC_ERROR = 1 # used by TQM, must be bit-flag safe
+ HOST_FAILED = 2 # TQM-sourced, must be bit-flag safe
+ HOST_UNREACHABLE = 4 # TQM-sourced, must be bit-flag safe
+ PARSER_ERROR = 4 # FIXME: CLI-sourced, conflicts with HOST_UNREACHABLE
+ INVALID_CLI_OPTION = 5
+ UNICODE_ERROR = 6 # obsolete, no longer used
+ KEYBOARD_INTERRUPT = 99
+ UNKNOWN_ERROR = 250
class AnsibleError(Exception):
@@ -44,257 +41,282 @@ class AnsibleError(Exception):
Usage:
- raise AnsibleError('some message here', obj=obj, show_content=True)
+ raise AnsibleError('some message here', obj=obj)
- Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
- which should be returned by the DataLoader() class.
+ Where "obj" may be tagged with Origin to provide context for error messages.
"""
- def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
- super(AnsibleError, self).__init__(message)
+ _exit_code = ExitCode.GENERIC_ERROR
+ _default_message = ''
+ _default_help_text: str | None = None
+ _include_cause_message = True
+ """
+ When `True`, the exception message will be augmented with cause message(s).
+ Subclasses doing complex error analysis can disable this to take responsibility for reporting cause messages as needed.
+ """
+
+ def __init__(
+ self,
+ message: str = "",
+ obj: t.Any = None,
+ show_content: bool = True,
+ suppress_extended_error: bool | types.EllipsisType = ...,
+ orig_exc: BaseException | None = None,
+ help_text: str | None = None,
+ ) -> None:
+ # DTFIX-FUTURE: these fallback cases mask incorrect use of AnsibleError.message, what should we do?
+ if message is None:
+ message = ''
+ elif not isinstance(message, str):
+ message = str(message)
+
+ if self._default_message and message:
+ message = _text_utils.concat_message(self._default_message, message)
+ elif self._default_message:
+ message = self._default_message
+ elif not message:
+ message = f'Unexpected {type(self).__name__} error.'
+
+ super().__init__(message)
self._show_content = show_content
- self._suppress_extended_error = suppress_extended_error
- self._message = to_native(message)
+ self._message = message
+ self._help_text_value = help_text or self._default_help_text
self.obj = obj
+
+ # deprecated: description='deprecate support for orig_exc, callers should use `raise ... from` only' core_version='2.23'
+ # deprecated: description='remove support for orig_exc' core_version='2.27'
self.orig_exc = orig_exc
- @property
- def message(self):
- # we import this here to prevent an import loop problem,
- # since the objects code also imports ansible.errors
- from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+ if suppress_extended_error is not ...:
+ from ..utils.display import Display
- message = [self._message]
+ if suppress_extended_error:
+ self._show_content = False
- # Add from previous exceptions
- if self.orig_exc:
- message.append('. %s' % to_native(self.orig_exc))
+ Display().deprecated(
+ msg=f"The `suppress_extended_error` argument to `{type(self).__name__}` is deprecated.",
+ version="2.23",
+ help_text="Use `show_content=False` instead.",
+ )
- # Add from yaml to give specific file/line no
- if isinstance(self.obj, AnsibleBaseYAMLObject):
- extended_error = self._get_extended_error()
- if extended_error and not self._suppress_extended_error:
- message.append(
- '\n\n%s' % to_native(extended_error)
- )
+ @property
+ def _original_message(self) -> str:
+ return self._message
- return ''.join(message)
+ @property
+ def message(self) -> str:
+ """
+ Return the original message with cause message(s) appended.
+ The cause will not be followed on any `AnsibleError` with `_include_cause_message=False`.
+ """
+ return _error_utils.format_exception_message(self)
@message.setter
- def message(self, val):
+ def message(self, val) -> None:
self._message = val
- def __str__(self):
- return self.message
+ @property
+ def _formatted_source_context(self) -> str | None:
+ with _error_utils.RedactAnnotatedSourceContext.when(not self._show_content):
+ if source_context := _error_utils.SourceContext.from_value(self.obj):
+ return str(source_context)
- def __repr__(self):
- return self.message
+ return None
- def _get_error_lines_from_file(self, file_name, line_number):
- """
- Returns the line in the file which corresponds to the reported error
- location, as well as the line preceding it (if the error did not
- occur on the first line), to provide context to the error.
- """
+ @property
+ def _help_text(self) -> str | None:
+ return self._help_text_value
- target_line = ''
- prev_line = ''
+ @_help_text.setter
+ def _help_text(self, value: str | None) -> None:
+ self._help_text_value = value
- with open(file_name, 'r') as f:
- lines = f.readlines()
+ def __str__(self) -> str:
+ return self.message
- # In case of a YAML loading error, PyYAML will report the very last line
- # as the location of the error. Avoid an index error here in order to
- # return a helpful message.
- file_length = len(lines)
- if line_number >= file_length:
- line_number = file_length - 1
+ def __getstate__(self) -> dict[str, t.Any]:
+ """Augment object.__getstate__ to preserve additional values not represented in BaseException.__dict__."""
+ state = t.cast(dict[str, t.Any], super().__getstate__())
+ state.update(
+ args=self.args,
+ __cause__=self.__cause__,
+ __context__=self.__context__,
+ __suppress_context__=self.__suppress_context__,
+ )
- # If target_line contains only whitespace, move backwards until
- # actual code is found. If there are several empty lines after target_line,
- # the error lines would just be blank, which is not very helpful.
- target_line = lines[line_number]
- while not target_line.strip():
- line_number -= 1
- target_line = lines[line_number]
+ return state
- if line_number > 0:
- prev_line = lines[line_number - 1]
+ def __reduce__(self) -> tuple[t.Callable, tuple[type], dict[str, t.Any]]:
+ """
+ Enable copy/pickle of AnsibleError derived types by correcting for BaseException's ancient C __reduce__ impl that:
- return (target_line, prev_line)
+ * requires use of a type constructor with positional args
+ * assumes positional args are passed through from the derived type __init__ to BaseException.__init__ unmodified
+ * does not propagate args/__cause__/__context__/__suppress_context__
- def _get_extended_error(self):
+ NOTE: This does not preserve the dunder attributes on non-AnsibleError derived cause/context exceptions.
+ As a result, copy/pickle will discard chained exceptions after the first non-AnsibleError cause/context.
"""
- Given an object reporting the location of the exception in a file, return
- detailed information regarding it including:
+ return type(self).__new__, (type(self),), self.__getstate__()
- * the line which caused the error as well as the one preceding it
- * causes and suggested remedies for common syntax errors
- If this error was created with show_content=False, the reporting of content
- is suppressed, as the file contents may be sensitive (ie. vault data).
- """
+class AnsibleUndefinedConfigEntry(AnsibleError):
+ """The requested config entry is not defined."""
+
- error_message = ''
-
- try:
- (src_file, line_number, col_number) = self.obj.ansible_pos
- error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
- if src_file not in ('', '') and self._show_content:
- (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
- target_line = to_text(target_line)
- prev_line = to_text(prev_line)
- if target_line:
- stripped_line = target_line.replace(" ", "")
-
- # Check for k=v syntax in addition to YAML syntax and set the appropriate error position,
- # arrow index
- if re.search(r'\w+(\s+)?=(\s+)?[\w/-]+', prev_line):
- error_position = prev_line.rstrip().find('=')
- arrow_line = (" " * error_position) + "^ here"
- error_message = YAML_POSITION_DETAILS % (src_file, line_number - 1, error_position + 1)
- error_message += "\nThe offending line appears to be:\n\n%s\n%s\n\n" % (prev_line.rstrip(), arrow_line)
- error_message += YAML_AND_SHORTHAND_ERROR
- else:
- arrow_line = (" " * (col_number - 1)) + "^ here"
- error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
-
- # TODO: There may be cases where there is a valid tab in a line that has other errors.
- if '\t' in target_line:
- error_message += YAML_COMMON_LEADING_TAB_ERROR
- # common error/remediation checking here:
- # check for unquoted vars starting lines
- if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
- error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
- # check for common dictionary mistakes
- elif ":{{" in stripped_line and "}}" in stripped_line:
- error_message += YAML_COMMON_DICT_ERROR
- # check for common unquoted colon mistakes
- elif (len(target_line) and
- len(target_line) > 1 and
- len(target_line) > col_number and
- target_line[col_number] == ":" and
- target_line.count(':') > 1):
- error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
- # otherwise, check for some common quoting mistakes
- else:
- # FIXME: This needs to split on the first ':' to account for modules like lineinfile
- # that may have lines that contain legitimate colons, e.g., line: 'i ALL= (ALL) NOPASSWD: ALL'
- # and throw off the quote matching logic.
- parts = target_line.split(":")
- if len(parts) > 1:
- middle = parts[1].strip()
- match = False
- unbalanced = False
-
- if middle.startswith("'") and not middle.endswith("'"):
- match = True
- elif middle.startswith('"') and not middle.endswith('"'):
- match = True
-
- if (len(middle) > 0 and
- middle[0] in ['"', "'"] and
- middle[-1] in ['"', "'"] and
- target_line.count("'") > 2 or
- target_line.count('"') > 2):
- unbalanced = True
-
- if match:
- error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
- if unbalanced:
- error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
-
- except (IOError, TypeError):
- error_message += '\n(could not open file to display line)'
- except IndexError:
- error_message += '\n(specified line no longer in file, maybe it changed?)'
-
- return error_message
+class AnsibleTaskError(AnsibleError):
+ """Task execution failed; provides contextual information about the task."""
+
+ _default_message = 'Task failed.'
class AnsiblePromptInterrupt(AnsibleError):
- """User interrupt"""
+ """User interrupt."""
class AnsiblePromptNoninteractive(AnsibleError):
- """Unable to get user input"""
+ """Unable to get user input."""
class AnsibleAssertionError(AnsibleError, AssertionError):
- """Invalid assertion"""
- pass
+ """Invalid assertion."""
class AnsibleOptionsError(AnsibleError):
- """ bad or incomplete options passed """
- pass
+ """Invalid options were passed."""
+
+ # FIXME: This exception is used for many non-CLI related errors.
+ # The few cases which are CLI related should really be handled by argparse instead, at which point the exit code here can be removed.
+ _exit_code = ExitCode.INVALID_CLI_OPTION
class AnsibleRequiredOptionError(AnsibleOptionsError):
- """ bad or incomplete options passed """
- pass
+ """Bad or incomplete options passed."""
class AnsibleParserError(AnsibleError):
- """ something was detected early that is wrong about a playbook or data file """
- pass
+ """A playbook or data file could not be parsed."""
+
+ _exit_code = ExitCode.PARSER_ERROR
+
+
+class AnsibleFieldAttributeError(AnsibleParserError):
+ """Errors caused during field attribute processing."""
+
+
+class AnsibleJSONParserError(AnsibleParserError):
+ """JSON-specific parsing failure wrapping an exception raised by the JSON parser."""
+
+ _default_message = 'JSON parsing failed.'
+ _include_cause_message = False # hide the underlying cause message, it's included by `handle_exception` as needed
+
+ @classmethod
+ def handle_exception(cls, exception: Exception, origin: _tags.Origin) -> t.NoReturn:
+ if isinstance(exception, JSONDecodeError):
+ origin = origin.replace(line_num=exception.lineno, col_num=exception.colno)
+
+ message = str(exception)
+
+ error = cls(message, obj=origin)
+
+ raise error from exception
class AnsibleInternalError(AnsibleError):
- """ internal safeguards tripped, something happened in the code that should never happen """
- pass
+ """Internal safeguards tripped, something happened in the code that should never happen."""
class AnsibleRuntimeError(AnsibleError):
- """ ansible had a problem while running a playbook """
- pass
+ """Ansible had a problem while running a playbook."""
class AnsibleModuleError(AnsibleRuntimeError):
- """ a module failed somehow """
- pass
+ """A module failed somehow."""
+
+
+class AnsibleConnectionFailure(AnsibleRuntimeError, _error_utils.ContributesToTaskResult):
+ """
+ The transport / connection_plugin had a fatal error.
+ This exception provides a result dictionary via the ContributesToTaskResult mixin.
+ """
+
+ @property
+ def result_contribution(self) -> t.Mapping[str, object]:
+ return dict(unreachable=True)
-class AnsibleConnectionFailure(AnsibleRuntimeError):
- """ the transport / connection_plugin had a fatal error """
- pass
+ @property
+ def omit_failed_key(self) -> bool:
+ return True
class AnsibleAuthenticationFailure(AnsibleConnectionFailure):
- """invalid username/password/key"""
- pass
+ """Invalid username/password/key."""
+
+ _default_message = "Failed to authenticate."
class AnsibleCallbackError(AnsibleRuntimeError):
- """ a callback failure """
- pass
+ """A callback failure."""
class AnsibleTemplateError(AnsibleRuntimeError):
- """A template related error"""
- pass
+ """A template related error."""
+
+
+class TemplateTrustCheckFailedError(AnsibleTemplateError):
+ """Raised when processing was requested on an untrusted template or expression."""
+
+ _default_message = 'Encountered untrusted template or expression.'
+ _default_help_text = ('Templates and expressions must be defined by trusted sources such as playbooks or roles, '
+ 'not untrusted sources such as module results.')
+
+
+class AnsibleTemplateTransformLimitError(AnsibleTemplateError):
+ """The internal template transform limit was exceeded."""
+
+ _default_message = "Template transform limit exceeded."
+
+class AnsibleTemplateSyntaxError(AnsibleTemplateError):
+ """A syntax error was encountered while parsing a Jinja template or expression."""
-class AnsibleFilterError(AnsibleTemplateError):
- """ a templating failure """
- pass
+class AnsibleBrokenConditionalError(AnsibleTemplateError):
+ """A broken conditional with non-boolean result was used."""
-class AnsibleLookupError(AnsibleTemplateError):
- """ a lookup failure """
- pass
+ _default_help_text = 'Broken conditionals can be temporarily allowed with the `ALLOW_BROKEN_CONDITIONALS` configuration option.'
class AnsibleUndefinedVariable(AnsibleTemplateError):
- """ a templating failure """
- pass
+ """An undefined variable was encountered while processing a template or expression."""
+
+
+class AnsibleValueOmittedError(AnsibleTemplateError):
+ """
+ Raised when the result of a template operation was the Omit singleton. This exception purposely does
+ not derive from AnsibleError to avoid elision of the traceback, since uncaught errors of this type always
+ indicate a bug.
+ """
+
+ _default_message = "A template was resolved to an Omit scalar."
+ _default_help_text = "Callers must be prepared to handle this value. This is most likely a bug in the code requesting templating."
+
+
+class AnsibleTemplatePluginError(AnsibleTemplateError):
+ """An error sourced by a template plugin (lookup/filter/test)."""
+
+
+# deprecated: description='add deprecation warnings for these aliases' core_version='2.23'
+AnsibleFilterError = AnsibleTemplatePluginError
+AnsibleLookupError = AnsibleTemplatePluginError
class AnsibleFileNotFound(AnsibleRuntimeError):
- """ a file missing failure """
+ """A file missing failure."""
- def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, paths=None, file_name=None):
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=..., orig_exc=None, paths=None, file_name=None):
self.file_name = file_name
self.paths = paths
@@ -306,7 +328,7 @@ class AnsibleFileNotFound(AnsibleRuntimeError):
else:
message += "Could not find file"
- if self.paths and isinstance(self.paths, Sequence):
+ if self.paths and isinstance(self.paths, _c.Sequence):
searched = to_text('\n\t'.join(self.paths))
if message:
message += "\n"
@@ -318,71 +340,143 @@ class AnsibleFileNotFound(AnsibleRuntimeError):
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
-# These Exceptions are temporary, using them as flow control until we can get a better solution.
-# DO NOT USE as they will probably be removed soon.
-# We will port the action modules in our tree to use a context manager instead.
-class AnsibleAction(AnsibleRuntimeError):
- """ Base Exception for Action plugin flow control """
+class AnsibleAction(AnsibleRuntimeError, _error_utils.ContributesToTaskResult):
+ """Base Exception for Action plugin flow control."""
- def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=..., orig_exc=None, result=None):
+ super().__init__(message=message, obj=obj, show_content=show_content, suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
- super(AnsibleAction, self).__init__(message=message, obj=obj, show_content=show_content,
- suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
- if result is None:
- self.result = {}
- else:
- self.result = result
+ self._result = result or {}
+
+ @property
+ def result_contribution(self) -> _c.Mapping[str, object]:
+ return self._result
+
+ @property
+ def result(self) -> dict[str, object]:
+ """Backward compatibility property returning a mutable dictionary."""
+ return dict(self.result_contribution)
class AnsibleActionSkip(AnsibleAction):
- """ an action runtime skip"""
+ """
+ An action runtime skip.
+
+ This exception provides a result dictionary via the ContributesToTaskResult mixin.
+ """
+
+ @property
+ def result_contribution(self) -> _c.Mapping[str, object]:
+ return self._result | dict(
+ skipped=True,
+ msg=self.message,
+ )
- def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
- super(AnsibleActionSkip, self).__init__(message=message, obj=obj, show_content=show_content,
- suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
- self.result.update({'skipped': True, 'msg': message})
+ @property
+ def omit_failed_key(self) -> bool:
+ return True
+
+ @property
+ def omit_exception_key(self) -> bool:
+ return True
class AnsibleActionFail(AnsibleAction):
- """ an action runtime failure"""
- def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
- super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content,
- suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
- self.result.update({'failed': True, 'msg': message, 'exception': traceback.format_exc()})
+ """
+ An action runtime failure.
+ This exception provides a result dictionary via the ContributesToTaskResult mixin.
+ """
-class _AnsibleActionDone(AnsibleAction):
- """ an action runtime early exit"""
- pass
+ @property
+ def result_contribution(self) -> _c.Mapping[str, object]:
+ return self._result | dict(
+ failed=True,
+ msg=self.message,
+ )
+
+
+class _ActionDone(AnsibleAction):
+ """
+ Imports as `_AnsibleActionDone` are deprecated. An action runtime early exit.
+
+ This exception provides a result dictionary via the ContributesToTaskResult mixin.
+ """
+
+ @property
+ def omit_failed_key(self) -> bool:
+ return not self._result.get('failed')
+
+ @property
+ def omit_exception_key(self) -> bool:
+ return not self._result.get('failed')
class AnsiblePluginError(AnsibleError):
- """ base class for Ansible plugin-related errors that do not need AnsibleError contextual data """
- def __init__(self, message=None, plugin_load_context=None):
- super(AnsiblePluginError, self).__init__(message)
+ """Base class for Ansible plugin-related errors that do not need AnsibleError contextual data."""
+
+ def __init__(self, message: str | None = None, plugin_load_context: _t_loader.PluginLoadContext | None = None, help_text: str | None = None) -> None:
+ super(AnsiblePluginError, self).__init__(message, help_text=help_text)
+
self.plugin_load_context = plugin_load_context
class AnsiblePluginRemovedError(AnsiblePluginError):
- """ a requested plugin has been removed """
- pass
+ """A requested plugin has been removed."""
class AnsiblePluginCircularRedirect(AnsiblePluginError):
- """a cycle was detected in plugin redirection"""
- pass
+ """A cycle was detected in plugin redirection."""
class AnsibleCollectionUnsupportedVersionError(AnsiblePluginError):
- """a collection is not supported by this version of Ansible"""
- pass
+ """A collection is not supported by this version of Ansible."""
-class AnsibleFilterTypeError(AnsibleTemplateError, TypeError):
- """ a Jinja filter templating failure due to bad type"""
- pass
+class AnsibleTypeError(AnsibleRuntimeError, TypeError):
+ """Ansible-augmented TypeError subclass."""
class AnsiblePluginNotFound(AnsiblePluginError):
- """ Indicates we did not find an Ansible plugin """
- pass
+ """Indicates we did not find an Ansible plugin."""
+
+
+class AnsibleConditionalError(AnsibleRuntimeError):
+ """Errors related to failed conditional expression evaluation."""
+
+
+class AnsibleVariableTypeError(AnsibleRuntimeError):
+ """An error due to attempted storage of an unsupported variable type."""
+
+ @classmethod
+ def from_value(cls, *, obj: t.Any) -> t.Self:
+ # avoid an incorrect error message when `obj` is a type
+ type_name = type(obj).__name__ if isinstance(obj, type) else native_type_name(obj)
+
+ return cls(message=f'Type {type_name!r} is unsupported for variable storage.', obj=obj)
+
+
+def __getattr__(name: str) -> t.Any:
+ """Inject import-time deprecation warnings."""
+ from ..utils.display import Display
+
+ match name:
+ case 'AnsibleFilterTypeError':
+ Display().deprecated(
+ msg=f"Importing {name!r} is deprecated.",
+ help_text=f"Import {AnsibleTypeError.__name__!r} instead.",
+ version="2.23",
+ )
+
+ return AnsibleTypeError
+
+ case '_AnsibleActionDone':
+ Display().deprecated(
+ msg=f"Importing {name!r} is deprecated.",
+ help_text="Return directly from action plugins instead.",
+ version="2.23",
+ )
+
+ return _ActionDone
+
+ raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
diff --git a/lib/ansible/errors/yaml_strings.py b/lib/ansible/errors/yaml_strings.py
deleted file mode 100644
index cc5cfb6c45a..00000000000
--- a/lib/ansible/errors/yaml_strings.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import annotations
-
-__all__ = [
- 'YAML_SYNTAX_ERROR',
- 'YAML_POSITION_DETAILS',
- 'YAML_COMMON_DICT_ERROR',
- 'YAML_COMMON_UNQUOTED_VARIABLE_ERROR',
- 'YAML_COMMON_UNQUOTED_COLON_ERROR',
- 'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR',
- 'YAML_COMMON_UNBALANCED_QUOTES_ERROR',
-]
-
-YAML_SYNTAX_ERROR = """\
-Syntax Error while loading YAML.
- %s"""
-
-YAML_POSITION_DETAILS = """\
-The error appears to be in '%s': line %s, column %s, but may
-be elsewhere in the file depending on the exact syntax problem.
-"""
-
-YAML_COMMON_DICT_ERROR = """\
-This one looks easy to fix. YAML thought it was looking for the start of a
-hash/dictionary and was confused to see a second "{". Most likely this was
-meant to be an ansible template evaluation instead, so we have to give the
-parser a small hint that we wanted a string instead. The solution here is to
-just quote the entire value.
-
-For instance, if the original line was:
-
- app_path: {{ base_path }}/foo
-
-It should be written as:
-
- app_path: "{{ base_path }}/foo"
-"""
-
-YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\
-We could be wrong, but this one looks like it might be an issue with
-missing quotes. Always quote template expression brackets when they
-start a value. For instance:
-
- with_items:
- - {{ foo }}
-
-Should be written as:
-
- with_items:
- - "{{ foo }}"
-"""
-
-YAML_COMMON_UNQUOTED_COLON_ERROR = """\
-This one looks easy to fix. There seems to be an extra unquoted colon in the line
-and this is confusing the parser. It was only expecting to find one free
-colon. The solution is just add some quotes around the colon, or quote the
-entire line after the first colon.
-
-For instance, if the original line was:
-
- copy: src=file.txt dest=/path/filename:with_colon.txt
-
-It can be written as:
-
- copy: src=file.txt dest='/path/filename:with_colon.txt'
-
-Or:
-
- copy: 'src=file.txt dest=/path/filename:with_colon.txt'
-"""
-
-YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\
-This one looks easy to fix. It seems that there is a value started
-with a quote, and the YAML parser is expecting to see the line ended
-with the same kind of quote. For instance:
-
- when: "ok" in result.stdout
-
-Could be written as:
-
- when: '"ok" in result.stdout'
-
-Or equivalently:
-
- when: "'ok' in result.stdout"
-"""
-
-YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\
-We could be wrong, but this one looks like it might be an issue with
-unbalanced quotes. If starting a value with a quote, make sure the
-line ends with the same set of quotes. For instance this arbitrary
-example:
-
- foo: "bad" "wolf"
-
-Could be written as:
-
- foo: '"bad" "wolf"'
-"""
-
-YAML_COMMON_LEADING_TAB_ERROR = """\
-There appears to be a tab character at the start of the line.
-
-YAML does not use tabs for formatting. Tabs should be replaced with spaces.
-
-For example:
- - name: update tooling
- vars:
- version: 1.2.3
-# ^--- there is a tab there.
-
-Should be written as:
- - name: update tooling
- vars:
- version: 1.2.3
-# ^--- all spaces here.
-"""
-
-YAML_AND_SHORTHAND_ERROR = """\
-There appears to be both 'k=v' shorthand syntax and YAML in this task. \
-Only one syntax may be used.
-"""
diff --git a/lib/ansible/executor/action_write_locks.py b/lib/ansible/executor/action_write_locks.py
deleted file mode 100644
index d2acae9b6ff..00000000000
--- a/lib/ansible/executor/action_write_locks.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# (c) 2016 - Red Hat, Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import annotations
-
-import multiprocessing.synchronize
-
-from multiprocessing import Lock
-
-from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
-
-if 'action_write_locks' not in globals():
- # Do not initialize this more than once because it seems to bash
- # the existing one. multiprocessing must be reloading the module
- # when it forks?
- action_write_locks: dict[str | None, multiprocessing.synchronize.Lock] = dict()
-
- # Below is a Lock for use when we weren't expecting a named module. It gets used when an action
- # plugin invokes a module whose name does not match with the action's name. Slightly less
- # efficient as all processes with unexpected module names will wait on this lock
- action_write_locks[None] = Lock()
-
- # These plugins are known to be called directly by action plugins with names differing from the
- # action plugin name. We precreate them here as an optimization.
- # If a list of service managers is created in the future we can do the same for them.
- mods = set(p['name'] for p in PKG_MGRS)
-
- mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
- for mod_name in mods:
- action_write_locks[mod_name] = Lock()
diff --git a/lib/ansible/executor/discovery/python_target.py b/lib/ansible/executor/discovery/python_target.py
deleted file mode 100644
index f66588dedc5..00000000000
--- a/lib/ansible/executor/discovery/python_target.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright: (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# FUTURE: this could be swapped out for our bundled version of distro to move more complete platform
-# logic to the targets, so long as we maintain Py2.6 compat and don't need to do any kind of script assembly
-
-from __future__ import annotations
-
-import json
-import platform
-import io
-import os
-
-
-def read_utf8_file(path, encoding='utf-8'):
- if not os.access(path, os.R_OK):
- return None
- with io.open(path, 'r', encoding=encoding) as fd:
- content = fd.read()
-
- return content
-
-
-def get_platform_info():
- result = dict(platform_dist_result=[])
-
- if hasattr(platform, 'dist'):
- result['platform_dist_result'] = platform.dist()
-
- osrelease_content = read_utf8_file('/etc/os-release')
- # try to fall back to /usr/lib/os-release
- if not osrelease_content:
- osrelease_content = read_utf8_file('/usr/lib/os-release')
-
- result['osrelease_content'] = osrelease_content
-
- return result
-
-
-def main():
- info = get_platform_info()
-
- print(json.dumps(info))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/executor/interpreter_discovery.py b/lib/ansible/executor/interpreter_discovery.py
index 24b2174d3c8..bf168f922e2 100644
--- a/lib/ansible/executor/interpreter_discovery.py
+++ b/lib/ansible/executor/interpreter_discovery.py
@@ -3,25 +3,17 @@
from __future__ import annotations
-import bisect
-import json
-import pkgutil
import re
from ansible import constants as C
from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_native, to_text
-from ansible.module_utils.distro import LinuxDistribution
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
-from ansible.module_utils.compat.version import LooseVersion
-from ansible.module_utils.facts.system.distribution import Distribution
-from traceback import format_exc
-OS_FAMILY_LOWER = {k.lower(): v.lower() for k, v in Distribution.OS_FAMILY.items()}
+_FALLBACK_INTERPRETER = '/usr/bin/python3'
display = Display()
-foundre = re.compile(r'(?s)PLATFORM[\r\n]+(.*)FOUND(.*)ENDFOUND')
+foundre = re.compile(r'FOUND(.*)ENDFOUND', flags=re.DOTALL)
class InterpreterDiscoveryRequiredError(Exception):
@@ -30,42 +22,28 @@ class InterpreterDiscoveryRequiredError(Exception):
self.interpreter_name = interpreter_name
self.discovery_mode = discovery_mode
- def __str__(self):
- return self.message
-
- def __repr__(self):
- # TODO: proper repr impl
- return self.message
-
def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
- # interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to
- # get the system type from uname, and find any random Python that can get us the info we need. For supported
- # target OS types, we'll dispatch a Python script that calls platform.dist() (for older platforms, where available)
- # and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known
- # distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the
- # default fallback of /usr/bin/python is used (if we know it's there), or discovery fails.
-
- # FUTURE: add logical equivalence for "python3" in the case of py3-only modules?
- if interpreter_name != 'python':
- raise ValueError('Interpreter discovery not supported for {0}'.format(interpreter_name))
-
+ """Probe the target host for a Python interpreter from the `INTERPRETER_PYTHON_FALLBACK` list, returning the first found or `/usr/bin/python3` if none."""
host = task_vars.get('inventory_hostname', 'unknown')
res = None
- platform_type = 'unknown'
- found_interpreters = [u'/usr/bin/python3'] # fallback value
- is_auto_legacy = discovery_mode.startswith('auto_legacy')
+ found_interpreters = [_FALLBACK_INTERPRETER] # fallback value
is_silent = discovery_mode.endswith('_silent')
+ if discovery_mode.startswith('auto_legacy'):
+ display.deprecated(
+ msg=f"The '{discovery_mode}' option for 'INTERPRETER_PYTHON' now has the same effect as 'auto'.",
+ version='2.21',
+ )
+
try:
- platform_python_map = C.config.get_config_value('_INTERPRETER_PYTHON_DISTRO_MAP', variables=task_vars)
bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars)
- display.vvv(msg=u"Attempting {0} interpreter discovery".format(interpreter_name), host=host)
+ display.vvv(msg=f"Attempting {interpreter_name} interpreter discovery.", host=host)
# not all command -v impls accept a list of commands, so we have to call it once per python
command_list = ["command -v '%s'" % py for py in bootstrap_python_list]
- shell_bootstrap = "echo PLATFORM; uname; echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list))
+ shell_bootstrap = "echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list))
# FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do?
res = action._low_level_execute_command(shell_bootstrap, sudoable=False)
@@ -78,131 +56,32 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host)
raise ValueError('unexpected output from Python interpreter discovery')
- platform_type = match.groups()[0].lower().strip()
-
- found_interpreters = [interp.strip() for interp in match.groups()[1].splitlines() if interp.startswith('/')]
+ found_interpreters = [interp.strip() for interp in match.groups()[0].splitlines() if interp.startswith('/')]
display.debug(u"found interpreters: {0}".format(found_interpreters), host=host)
if not found_interpreters:
if not is_silent:
- action._discovery_warnings.append(u'No python interpreters found for '
- u'host {0} (tried {1})'.format(host, bootstrap_python_list))
- # this is lame, but returning None or throwing an exception is uglier
- return u'/usr/bin/python3'
-
- if platform_type != 'linux':
- raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type)))
-
- platform_script = pkgutil.get_data('ansible.executor.discovery', 'python_target.py')
-
- # FUTURE: respect pipelining setting instead of just if the connection supports it?
- if action._connection.has_pipelining:
- res = action._low_level_execute_command(found_interpreters[0], sudoable=False, in_data=platform_script)
- else:
- # FUTURE: implement on-disk case (via script action or ?)
- raise NotImplementedError('pipelining support required for extended interpreter discovery')
-
- platform_info = json.loads(res.get('stdout'))
-
- distro, version = _get_linux_distro(platform_info)
- if not distro or not version:
- raise NotImplementedError('unable to get Linux distribution/version info')
-
- family = OS_FAMILY_LOWER.get(distro.lower().strip())
-
- version_map = platform_python_map.get(distro.lower().strip()) or platform_python_map.get(family)
- if not version_map:
- raise NotImplementedError('unsupported Linux distribution: {0}'.format(distro))
-
- platform_interpreter = to_text(_version_fuzzy_match(version, version_map), errors='surrogate_or_strict')
-
- # provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been)
- if is_auto_legacy:
- if platform_interpreter != u'/usr/bin/python3' and u'/usr/bin/python3' in found_interpreters:
- if not is_silent:
- action._discovery_warnings.append(
- u"Distribution {0} {1} on host {2} should use {3}, but is using "
- u"/usr/bin/python3 for backward compatibility with prior Ansible releases. "
- u"See {4} for more information"
- .format(distro, version, host, platform_interpreter,
- get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
- return u'/usr/bin/python3'
-
- if platform_interpreter not in found_interpreters:
- if platform_interpreter not in bootstrap_python_list:
- # sanity check to make sure we looked for it
- if not is_silent:
- action._discovery_warnings \
- .append(u"Platform interpreter {0} on host {1} is missing from bootstrap list"
- .format(platform_interpreter, host))
+ display.warning(msg=f'No python interpreters found for host {host!r} (tried {bootstrap_python_list!r}).')
- if not is_silent:
- action._discovery_warnings \
- .append(u"Distribution {0} {1} on host {2} should use {3}, but is using {4}, since the "
- u"discovered platform python interpreter was not present. See {5} "
- u"for more information."
- .format(distro, version, host, platform_interpreter, found_interpreters[0],
- get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
- return found_interpreters[0]
-
- return platform_interpreter
- except NotImplementedError as ex:
- display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host)
+ # this is lame, but returning None or throwing an exception is uglier
+ return _FALLBACK_INTERPRETER
except AnsibleError:
raise
except Exception as ex:
if not is_silent:
- display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex)))
- display.debug(msg=u'Interpreter discovery traceback:\n{0}'.format(to_text(format_exc())), host=host)
- if res and res.get('stderr'):
- display.vvv(msg=u'Interpreter discovery remote stderr:\n{0}'.format(to_text(res.get('stderr'))), host=host)
-
- if not is_silent:
- action._discovery_warnings \
- .append(u"Platform {0} on host {1} is using the discovered Python interpreter at {2}, but future installation of "
- u"another Python interpreter could change the meaning of that path. See {3} "
- u"for more information."
- .format(platform_type, host, found_interpreters[0],
- get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
- return found_interpreters[0]
-
+ display.error_as_warning(msg=f'Unhandled error in Python interpreter discovery for host {host!r}.', exception=ex)
-def _get_linux_distro(platform_info):
- dist_result = platform_info.get('platform_dist_result', [])
+ if res and res.get('stderr'): # the current ssh plugin implementation always has stderr, making coverage of the false case difficult
+ display.vvv(msg=f"Interpreter discovery remote stderr:\n{res.get('stderr')}", host=host)
- if len(dist_result) == 3 and any(dist_result):
- return dist_result[0], dist_result[1]
-
- osrelease_content = platform_info.get('osrelease_content')
-
- if not osrelease_content:
- return u'', u''
-
- osr = LinuxDistribution._parse_os_release_content(osrelease_content)
-
- return osr.get('id', u''), osr.get('version_id', u'')
-
-
-def _version_fuzzy_match(version, version_map):
- # try exact match first
- res = version_map.get(version)
- if res:
- return res
-
- sorted_looseversions = sorted([LooseVersion(v) for v in version_map.keys()])
-
- find_looseversion = LooseVersion(version)
-
- # slot match; return nearest previous version we're newer than
- kpos = bisect.bisect(sorted_looseversions, find_looseversion)
-
- if kpos == 0:
- # older than everything in the list, return the oldest version
- # TODO: warning-worthy?
- return version_map.get(sorted_looseversions[0].vstring)
-
- # TODO: is "past the end of the list" warning-worthy too (at least if it's not a major version match)?
+ if not is_silent:
+ display.warning(
+ msg=(
+ f"Host {host!r} is using the discovered Python interpreter at {found_interpreters[0]!r}, "
+ "but future installation of another Python interpreter could cause a different interpreter to be discovered."
+ ),
+ help_text=f"See {get_versioned_doclink('reference_appendices/interpreter_discovery.html')} for more information.",
+ )
- # return the next-oldest entry that we're newer than...
- return version_map.get(sorted_looseversions[kpos - 1].vstring)
+ return found_interpreters[0]
diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
index d4c2eab600f..150ed9acc79 100644
--- a/lib/ansible/executor/module_common.py
+++ b/lib/ansible/executor/module_common.py
@@ -20,42 +20,79 @@ from __future__ import annotations
import ast
import base64
+import dataclasses
import datetime
import json
import os
+import pathlib
+import pickle
import shlex
-import time
import zipfile
import re
import pkgutil
+import types
+import typing as t
from ast import AST, Import, ImportFrom
from io import BytesIO
+from ansible._internal import _locking
+from ansible._internal._ansiballz import _builder
+from ansible._internal import _ansiballz
+from ansible._internal._datatag import _utils
+from ansible.module_utils._internal import _dataclass_validation
+from ansible.module_utils.common.yaml import yaml_load
+from ansible.module_utils.datatag import deprecator_from_collection_name
+from ansible._internal._datatag._tags import Origin
+from ansible.module_utils.common.json import Direction, get_module_encoder
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
from ansible.executor.powershell import module_manifest as ps_manifest
-from ansible.module_utils.common.json import AnsibleJSONEncoder
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
+from ansible.plugins.become import BecomeBase
from ansible.plugins.loader import module_utils_loader
+from ansible._internal._templating._engine import TemplateOptions, TemplateEngine
+from ansible.template import Templar
from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, _nested_dict_get
+from ansible.module_utils._internal import _json
+from ansible.module_utils._internal._ansiballz import _loader
+from ansible.module_utils import basic as _basic
-# Must import strategy and use write_locks from there
-# If we import write_locks directly then we end up binding a
-# variable to the object and then it never gets updated.
-from ansible.executor import action_write_locks
+if t.TYPE_CHECKING:
+ from ansible import template as _template
+ from ansible.playbook.task import Task
from ansible.utils.display import Display
-from collections import namedtuple
import importlib.util
import importlib.machinery
display = Display()
-ModuleUtilsProcessEntry = namedtuple('ModuleUtilsProcessEntry', ['name_parts', 'is_ambiguous', 'has_redirected_child', 'is_optional'])
+
+@dataclasses.dataclass(frozen=True, order=True)
+class _ModuleUtilsProcessEntry:
+ """Represents a module/module_utils item awaiting import analysis."""
+ name_parts: tuple[str, ...]
+ is_ambiguous: bool = False
+ child_is_redirected: bool = False
+ is_optional: bool = False
+
+ @classmethod
+ def from_module(cls, module: types.ModuleType, append: str | None = None) -> t.Self:
+ name = module.__name__
+
+ if append:
+ name += '.' + append
+
+ return cls.from_module_name(name)
+
+ @classmethod
+ def from_module_name(cls, module_name: str) -> t.Self:
+ return cls(tuple(module_name.split('.')))
+
REPLACER = b"#<>"
REPLACER_VERSION = b"\"<>\""
@@ -64,348 +101,45 @@ REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<>"
REPLACER_SELINUX = b"<>"
-# We could end up writing out parameters with unicode characters so we need to
-# specify an encoding for the python source file
-ENCODING_STRING = u'# -*- coding: utf-8 -*-'
-b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
-
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
+_SHEBANG_PLACEHOLDER = '# shebang placeholder'
# ******************************************************************************
-ANSIBALLZ_TEMPLATE = u"""%(shebang)s
-%(coding)s
-_ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER
-# This code is part of Ansible, but is an independent component.
-# The code in this particular templatable string, and this templatable string
-# only, is BSD licensed. Modules which end up using this snippet, which is
-# dynamically combined together by Ansible still belong to the author of the
-# module, and they may assign their own license to the complete work.
-#
-# Copyright (c), James Cammarata, 2016
-# Copyright (c), Toshio Kuratomi, 2016
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-def _ansiballz_main():
- import os
- import os.path
-
- # Access to the working directory is required by Python when using pipelining, as well as for the coverage module.
- # Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
- try:
- os.getcwd()
- except OSError:
- try:
- os.chdir(os.path.expanduser('~'))
- except OSError:
- os.chdir('/')
-
-%(rlimit)s
-
- import sys
- import __main__
-
- # For some distros and python versions we pick up this script in the temporary
- # directory. This leads to problems when the ansible module masks a python
- # library that another import needs. We have not figured out what about the
- # specific distros and python versions causes this to behave differently.
- #
- # Tested distros:
- # Fedora23 with python3.4 Works
- # Ubuntu15.10 with python2.7 Works
- # Ubuntu15.10 with python3.4 Fails without this
- # Ubuntu16.04.1 with python3.5 Fails without this
- # To test on another platform:
- # * use the copy module (since this shadows the stdlib copy module)
- # * Turn off pipelining
- # * Make sure that the destination file does not exist
- # * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
- # This will traceback in shutil. Looking at the complete traceback will show
- # that shutil is importing copy which finds the ansible module instead of the
- # stdlib module
- scriptdir = None
- try:
- scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
- except (AttributeError, OSError):
- # Some platforms don't set __file__ when reading from stdin
- # OSX raises OSError if using abspath() in a directory we don't have
- # permission to read (realpath calls abspath)
- pass
-
- # Strip cwd from sys.path to avoid potential permissions issues
- excludes = set(('', '.', scriptdir))
- sys.path = [p for p in sys.path if p not in excludes]
-
- import base64
- import runpy
- import shutil
- import tempfile
- import zipfile
-
- if sys.version_info < (3,):
- PY3 = False
- else:
- PY3 = True
-
- ZIPDATA = %(zipdata)r
-
- # Note: temp_path isn't needed once we switch to zipimport
- def invoke_module(modlib_path, temp_path, json_params):
- # When installed via setuptools (including python setup.py install),
- # ansible may be installed with an easy-install.pth file. That file
- # may load the system-wide install of ansible rather than the one in
- # the module. sitecustomize is the only way to override that setting.
- z = zipfile.ZipFile(modlib_path, mode='a')
-
- # py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
- sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
- sitecustomize = sitecustomize.encode('utf-8')
- # Use a ZipInfo to work around zipfile limitation on hosts with
- # clocks set to a pre-1980 year (for instance, Raspberry Pi)
- zinfo = zipfile.ZipInfo()
- zinfo.filename = 'sitecustomize.py'
- zinfo.date_time = %(date_time)s
- z.writestr(zinfo, sitecustomize)
- z.close()
-
- # Put the zipped up module_utils we got from the controller first in the python path so that we
- # can monkeypatch the right basic
- sys.path.insert(0, modlib_path)
-
- # Monkeypatch the parameters into basic
- from ansible.module_utils import basic
- basic._ANSIBLE_ARGS = json_params
-%(coverage)s
- # Run the module! By importing it as '__main__', it thinks it is executing as a script
- runpy.run_module(mod_name=%(module_fqn)r, init_globals=dict(_module_fqn=%(module_fqn)r, _modlib_path=modlib_path),
- run_name='__main__', alter_sys=True)
-
- # Ansible modules must exit themselves
- print('{"msg": "New-style module did not handle its own exit", "failed": true}')
- sys.exit(1)
-
- def debug(command, zipped_mod, json_params):
- # The code here normally doesn't run. It's only used for debugging on the
- # remote machine.
- #
- # The subcommands in this function make it easier to debug ansiballz
- # modules. Here's the basic steps:
- #
- # Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
- # to save the module file remotely::
- # $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
- #
- # Part of the verbose output will tell you where on the remote machine the
- # module was written to::
- # [...]
- # SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
- # PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
- # ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
- # LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
- # [...]
- #
- # Login to the remote machine and run the module file via from the previous
- # step with the explode subcommand to extract the module payload into
- # source files::
- # $ ssh host1
- # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
- # Module expanded into:
- # /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
- #
- # You can now edit the source files to instrument the code or experiment with
- # different parameter values. When you're ready to run the code you've modified
- # (instead of the code from the actual zipped module), use the execute subcommand like this::
- # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
-
- # Okay to use __file__ here because we're running from a kept file
- basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
- args_path = os.path.join(basedir, 'args')
-
- if command == 'explode':
- # transform the ZIPDATA into an exploded directory of code and then
- # print the path to the code. This is an easy way for people to look
- # at the code on the remote machine for debugging it in that
- # environment
- z = zipfile.ZipFile(zipped_mod)
- for filename in z.namelist():
- if filename.startswith('/'):
- raise Exception('Something wrong with this module zip file: should not contain absolute paths')
-
- dest_filename = os.path.join(basedir, filename)
- if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
- os.makedirs(dest_filename)
- else:
- directory = os.path.dirname(dest_filename)
- if not os.path.exists(directory):
- os.makedirs(directory)
- f = open(dest_filename, 'wb')
- f.write(z.read(filename))
- f.close()
-
- # write the args file
- f = open(args_path, 'wb')
- f.write(json_params)
- f.close()
-
- print('Module expanded into:')
- print('%%s' %% basedir)
- exitcode = 0
-
- elif command == 'execute':
- # Execute the exploded code instead of executing the module from the
- # embedded ZIPDATA. This allows people to easily run their modified
- # code on the remote machine to see how changes will affect it.
-
- # Set pythonpath to the debug dir
- sys.path.insert(0, basedir)
-
- # read in the args file which the user may have modified
- with open(args_path, 'rb') as f:
- json_params = f.read()
-
- # Monkeypatch the parameters into basic
- from ansible.module_utils import basic
- basic._ANSIBLE_ARGS = json_params
-
- # Run the module! By importing it as '__main__', it thinks it is executing as a script
- runpy.run_module(mod_name=%(module_fqn)r, init_globals=None, run_name='__main__', alter_sys=True)
-
- # Ansible modules must exit themselves
- print('{"msg": "New-style module did not handle its own exit", "failed": true}')
- sys.exit(1)
-
- else:
- print('WARNING: Unknown debug command. Doing nothing.')
- exitcode = 0
-
- return exitcode
-
- #
- # See comments in the debug() method for information on debugging
- #
-
- ANSIBALLZ_PARAMS = %(params)s
- if PY3:
- ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
- try:
- # There's a race condition with the controller removing the
- # remote_tmpdir and this module executing under async. So we cannot
- # store this in remote_tmpdir (use system tempdir instead)
- # Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
- # (this helps ansible-test produce coverage stats)
- temp_path = tempfile.mkdtemp(prefix='ansible_' + %(ansible_module)r + '_payload_')
-
- zipped_mod = os.path.join(temp_path, 'ansible_' + %(ansible_module)r + '_payload.zip')
-
- with open(zipped_mod, 'wb') as modlib:
- modlib.write(base64.b64decode(ZIPDATA))
-
- if len(sys.argv) == 2:
- exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
- else:
- # Note: temp_path isn't needed once we switch to zipimport
- invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
- finally:
- try:
- shutil.rmtree(temp_path)
- except (NameError, OSError):
- # tempdir creation probably failed
- pass
- sys.exit(exitcode)
-
-if __name__ == '__main__':
- _ansiballz_main()
-"""
-
-ANSIBALLZ_COVERAGE_TEMPLATE = """
- os.environ['COVERAGE_FILE'] = %(coverage_output)r + '=python-%%s=coverage' %% '.'.join(str(v) for v in sys.version_info[:2])
-
- import atexit
-
- try:
- import coverage
- except ImportError:
- print('{"msg": "Could not import `coverage` module.", "failed": true}')
- sys.exit(1)
-
- cov = coverage.Coverage(config_file=%(coverage_config)r)
- def atexit_coverage():
- cov.stop()
- cov.save()
+def _strip_comments(source: str) -> str:
+ # Strip comments and blank lines from the wrapper
+ buf = []
+ for line in source.splitlines():
+ l = line.strip()
+ if (not l or l.startswith('#')) and l != _SHEBANG_PLACEHOLDER:
+ line = ''
+ buf.append(line)
+ return '\n'.join(buf)
- atexit.register(atexit_coverage)
- cov.start()
-"""
-
-ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = """
- try:
- if PY3:
- import importlib.util
- if importlib.util.find_spec('coverage') is None:
- raise ImportError
- else:
- import imp
- imp.find_module('coverage')
- except ImportError:
- print('{"msg": "Could not find `coverage` module.", "failed": true}')
- sys.exit(1)
-"""
+def _read_ansiballz_code() -> str:
+ code = (pathlib.Path(_ansiballz.__file__).parent / '_wrapper.py').read_text()
-ANSIBALLZ_RLIMIT_TEMPLATE = """
- import resource
+ if not C.DEFAULT_KEEP_REMOTE_FILES:
+ # Keep comments when KEEP_REMOTE_FILES is set. That way users will see
+ # the comments with some nice usage instructions.
+ # Otherwise, strip comments for smaller over the wire size.
+ code = _strip_comments(code)
- existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
+ return code
- # adjust soft limit subject to existing hard limit
- requested_soft = min(existing_hard, %(rlimit_nofile)d)
- if requested_soft != existing_soft:
- try:
- resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
- except ValueError:
- # some platforms (eg macOS) lie about their hard limit
- pass
-"""
+_ANSIBALLZ_CODE = _read_ansiballz_code() # read during startup to prevent individual workers from doing so
-def _strip_comments(source):
- # Strip comments and blank lines from the wrapper
- buf = []
- for line in source.splitlines():
- l = line.strip()
- if not l or l.startswith(u'#'):
- continue
- buf.append(line)
- return u'\n'.join(buf)
+def _get_ansiballz_code(shebang: str) -> str:
+ code = _ANSIBALLZ_CODE
+ code = code.replace(_SHEBANG_PLACEHOLDER, shebang)
+ return code
-if C.DEFAULT_KEEP_REMOTE_FILES:
- # Keep comments when KEEP_REMOTE_FILES is set. That way users will see
- # the comments with some nice usage instructions
- ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
-else:
- # ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
- ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
# dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages
# Do this instead of getting site-packages from distutils.sysconfig so we work when we
@@ -435,6 +169,7 @@ NEW_STYLE_PYTHON_MODULE_RE = re.compile(
class ModuleDepFinder(ast.NodeVisitor):
+ # DTFIX-FUTURE: add support for ignoring imports with a "controller only" comment, this will allow replacing import_controller_module with standard imports
def __init__(self, module_fqn, tree, is_pkg_init=False, *args, **kwargs):
"""
Walk the ast tree for the python module.
@@ -581,7 +316,7 @@ def _slurp(path):
return data
-def _get_shebang(interpreter, task_vars, templar, args=tuple(), remote_is_local=False):
+def _get_shebang(interpreter, task_vars, templar: _template.Templar, args=tuple(), remote_is_local=False):
"""
Handles the different ways ansible allows overriding the shebang target for a module.
"""
@@ -606,7 +341,8 @@ def _get_shebang(interpreter, task_vars, templar, args=tuple(), remote_is_local=
elif C.config.get_configuration_definition(interpreter_config_key):
interpreter_from_config = C.config.get_config_value(interpreter_config_key, variables=task_vars)
- interpreter_out = templar.template(interpreter_from_config.strip())
+ interpreter_out = templar._engine.template(_utils.str_problematic_strip(interpreter_from_config),
+ options=TemplateOptions(value_for_omit=C.config.get_config_default(interpreter_config_key)))
# handle interpreter discovery if requested or empty interpreter was provided
if not interpreter_out or interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
@@ -624,10 +360,11 @@ def _get_shebang(interpreter, task_vars, templar, args=tuple(), remote_is_local=
elif interpreter_config in task_vars:
# for non python we consult vars for a possible direct override
- interpreter_out = templar.template(task_vars.get(interpreter_config).strip())
+ interpreter_out = templar._engine.template(_utils.str_problematic_strip(task_vars.get(interpreter_config)),
+ options=TemplateOptions(value_for_omit=None))
if not interpreter_out:
- # nothing matched(None) or in case someone configures empty string or empty intepreter
+ # nothing matched(None) or in case someone configures empty string or empty interpreter
interpreter_out = interpreter
# set shebang
@@ -700,7 +437,13 @@ class ModuleUtilLocatorBase:
else:
msg += '.'
- display.deprecated(msg, removal_version, removed, removal_date, self._collection_name)
+ display.deprecated( # pylint: disable=ansible-deprecated-date-not-permitted,ansible-deprecated-unnecessary-collection-name
+ msg=msg,
+ version=removal_version,
+ removed=removed,
+ date=removal_date,
+ deprecator=deprecator_from_collection_name(self._collection_name),
+ )
if 'redirect' in routing_entry:
self.redirected = True
source_pkg = '.'.join(name_parts)
@@ -803,12 +546,12 @@ class LegacyModuleUtilLocator(ModuleUtilLocatorBase):
# find_spec needs the full module name
self._info = info = importlib.machinery.PathFinder.find_spec('.'.join(name_parts), paths)
- if info is not None and os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES:
+ if info is not None and info.origin is not None and os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES:
self.is_package = info.origin.endswith('/__init__.py')
path = info.origin
else:
return False
- self.source_code = _slurp(path)
+ self.source_code = Origin(path=path).tag(_slurp(path))
return True
@@ -843,9 +586,18 @@ class CollectionModuleUtilLocator(ModuleUtilLocatorBase):
resource_base_path = os.path.join(*name_parts[3:])
src = None
+
# look for package_dir first, then module
+ src_path = to_native(os.path.join(resource_base_path, '__init__.py'))
+
+ try:
+ collection_pkg = importlib.import_module(collection_pkg_name)
+ pkg_path = os.path.dirname(collection_pkg.__file__)
+ except (ImportError, AttributeError):
+ pkg_path = None
+
try:
- src = pkgutil.get_data(collection_pkg_name, to_native(os.path.join(resource_base_path, '__init__.py')))
+ src = pkgutil.get_data(collection_pkg_name, src_path)
except ImportError:
pass
@@ -854,32 +606,123 @@ class CollectionModuleUtilLocator(ModuleUtilLocatorBase):
if src is not None: # empty string is OK
self.is_package = True
else:
+ src_path = to_native(resource_base_path + '.py')
+
try:
- src = pkgutil.get_data(collection_pkg_name, to_native(resource_base_path + '.py'))
+ src = pkgutil.get_data(collection_pkg_name, src_path)
except ImportError:
pass
if src is None: # empty string is OK
return False
- self.source_code = src
+ # TODO: this feels brittle and funky; we should be able to more definitively assure the source path
+
+ if pkg_path:
+ origin = Origin(path=os.path.join(pkg_path, src_path))
+ else:
+ # DTFIX-FUTURE: not sure if this case is even reachable
+ origin = Origin(description=f'')
+
+ self.source_code = origin.tag(src)
return True
def _get_module_utils_remainder_parts(self, name_parts):
return name_parts[5:] # eg, foo.bar for ansible_collections.ns.coll.plugins.module_utils.foo.bar
-def _make_zinfo(filename, date_time, zf=None):
+def _make_zinfo(filename: str, date_time: datetime.datetime, zf: zipfile.ZipFile | None = None) -> zipfile.ZipInfo:
zinfo = zipfile.ZipInfo(
filename=filename,
- date_time=date_time
+ date_time=date_time.utctimetuple()[:6],
)
+
if zf:
zinfo.compress_type = zf.compression
+
return zinfo
-def recursive_finder(name, module_fqn, module_data, zf, date_time=None):
+@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
+class ModuleMetadata:
+ @classmethod
+ def __post_init__(cls):
+ _dataclass_validation.inject_post_init_validation(cls)
+
+
+@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
+class ModuleMetadataV1(ModuleMetadata):
+ serialization_profile: str
+
+
+metadata_versions: dict[t.Any, type[ModuleMetadata]] = {
+ 1: ModuleMetadataV1,
+}
+
+_DEFAULT_LEGACY_METADATA = ModuleMetadataV1(serialization_profile='legacy')
+
+
+def _get_module_metadata(module: ast.Module) -> ModuleMetadata:
+ # experimental module metadata; off by default
+ if not C.config.get_config_value('_MODULE_METADATA'):
+ return _DEFAULT_LEGACY_METADATA
+
+ metadata_nodes: list[ast.Assign] = []
+
+ for node in module.body:
+ if isinstance(node, ast.Assign):
+ if len(node.targets) == 1:
+ target = node.targets[0]
+
+ if isinstance(target, ast.Name):
+ if target.id == 'METADATA':
+ metadata_nodes.append(node)
+
+ if not metadata_nodes:
+ return _DEFAULT_LEGACY_METADATA
+
+ if len(metadata_nodes) > 1:
+ raise ValueError('Module METADATA must defined only once.')
+
+ metadata_node = metadata_nodes[0]
+
+ if not isinstance(metadata_node.value, ast.Constant):
+ raise TypeError(f'Module METADATA node must be {ast.Constant} not {type(metadata_node)}.')
+
+ unparsed_metadata = metadata_node.value.value
+
+ if not isinstance(unparsed_metadata, str):
+ raise TypeError(f'Module METADATA must be {str} not {type(unparsed_metadata)}.')
+
+ try:
+ parsed_metadata = yaml_load(unparsed_metadata)
+ except Exception as ex:
+ raise ValueError('Module METADATA must be valid YAML.') from ex
+
+ if not isinstance(parsed_metadata, dict):
+ raise TypeError(f'Module METADATA must parse to {dict} not {type(parsed_metadata)}.')
+
+ schema_version = parsed_metadata.pop('schema_version', None)
+
+ if not (metadata_type := metadata_versions.get(schema_version)):
+ raise ValueError(f'Module METADATA schema_version {schema_version} is unknown.')
+
+ try:
+ metadata = metadata_type(**parsed_metadata) # type: ignore
+ except Exception as ex:
+ raise ValueError('Module METADATA is invalid.') from ex
+
+ return metadata
+
+
+def recursive_finder(
+ name: str,
+ module_fqn: str,
+ module_data: str | bytes,
+ zf: zipfile.ZipFile,
+ date_time: datetime.datetime,
+ extension_manager: _builder.ExtensionManager,
+) -> ModuleMetadata:
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module and its module_utils files needs. (no longer actually recursive)
@@ -889,9 +732,6 @@ def recursive_finder(name, module_fqn, module_data, zf, date_time=None):
:arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload
which we're assembling
"""
- if date_time is None:
- date_time = time.gmtime()[:6]
-
# py_module_cache maps python module names to a tuple of the code in the module
# and the pathname to the module.
# Here we pre-load it with modules which we create without bothering to
@@ -913,49 +753,59 @@ def recursive_finder(name, module_fqn, module_data, zf, date_time=None):
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
- # Parse the module code and find the imports of ansible.module_utils
- try:
- tree = compile(module_data, '', 'exec', ast.PyCF_ONLY_AST)
- except (SyntaxError, IndentationError) as e:
- raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
-
+ tree = _compile_module_ast(name, module_data)
+ module_metadata = _get_module_metadata(tree)
finder = ModuleDepFinder(module_fqn, tree)
- # the format of this set is a tuple of the module name and whether or not the import is ambiguous as a module name
- # or an attribute of a module (eg from x.y import z <-- is z a module or an attribute of x.y?)
- modules_to_process = [ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) for m in finder.submodules]
+ if not isinstance(module_metadata, ModuleMetadataV1):
+ raise NotImplementedError()
- # HACK: basic is currently always required since module global init is currently tied up with AnsiballZ arg input
- modules_to_process.append(ModuleUtilsProcessEntry(('ansible', 'module_utils', 'basic'), False, False, is_optional=False))
+ profile = module_metadata.serialization_profile
+
+ # the format of this set is a tuple of the module name and whether the import is ambiguous as a module name
+ # or an attribute of a module (e.g. from x.y import z <-- is z a module or an attribute of x.y?)
+ modules_to_process = [_ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) for m in finder.submodules]
+
+ # include module_utils that are always required
+ modules_to_process.extend((
+ _ModuleUtilsProcessEntry.from_module(_loader),
+ _ModuleUtilsProcessEntry.from_module(_basic),
+ _ModuleUtilsProcessEntry.from_module_name(_json.get_module_serialization_profile_module_name(profile, True)),
+ _ModuleUtilsProcessEntry.from_module_name(_json.get_module_serialization_profile_module_name(profile, False)),
+ ))
+
+ modules_to_process.extend(_ModuleUtilsProcessEntry.from_module_name(name) for name in extension_manager.module_names)
+
+ module_info: ModuleUtilLocatorBase
# we'll be adding new modules inline as we discover them, so just keep going til we've processed them all
while modules_to_process:
modules_to_process.sort() # not strictly necessary, but nice to process things in predictable and repeatable order
- py_module_name, is_ambiguous, child_is_redirected, is_optional = modules_to_process.pop(0)
+ entry = modules_to_process.pop(0)
- if py_module_name in py_module_cache:
+ if entry.name_parts in py_module_cache:
# this is normal; we'll often see the same module imported many times, but we only need to process it once
continue
- if py_module_name[0:2] == ('ansible', 'module_utils'):
- module_info = LegacyModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
- mu_paths=module_utils_paths, child_is_redirected=child_is_redirected)
- elif py_module_name[0] == 'ansible_collections':
- module_info = CollectionModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
- child_is_redirected=child_is_redirected, is_optional=is_optional)
+ if entry.name_parts[0:2] == ('ansible', 'module_utils'):
+ module_info = LegacyModuleUtilLocator(entry.name_parts, is_ambiguous=entry.is_ambiguous,
+ mu_paths=module_utils_paths, child_is_redirected=entry.child_is_redirected)
+ elif entry.name_parts[0] == 'ansible_collections':
+ module_info = CollectionModuleUtilLocator(entry.name_parts, is_ambiguous=entry.is_ambiguous,
+ child_is_redirected=entry.child_is_redirected, is_optional=entry.is_optional)
else:
# FIXME: dot-joined result
display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
- % [py_module_name])
+ % [entry.name_parts])
continue
# Could not find the module. Construct a helpful error message.
if not module_info.found:
- if is_optional:
+ if entry.is_optional:
# this was a best-effort optional import that we couldn't find, oh well, move along...
continue
# FIXME: use dot-joined candidate names
- msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined)
+ msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined)
raise AnsibleError(msg)
# check the cache one more time with the module we actually found, since the name could be different than the input
@@ -963,14 +813,9 @@ def recursive_finder(name, module_fqn, module_data, zf, date_time=None):
if module_info.fq_name_parts in py_module_cache:
continue
- # compile the source, process all relevant imported modules
- try:
- tree = compile(module_info.source_code, '', 'exec', ast.PyCF_ONLY_AST)
- except (SyntaxError, IndentationError) as e:
- raise AnsibleError("Unable to import %s due to %s" % (module_info.fq_name_parts, e.msg))
-
+ tree = _compile_module_ast('.'.join(module_info.fq_name_parts), module_info.source_code)
finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), tree, module_info.is_package)
- modules_to_process.extend(ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports)
+ modules_to_process.extend(_ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports)
for m in finder.submodules if m not in py_module_cache)
# we've processed this item, add it to the output list
@@ -982,20 +827,36 @@ def recursive_finder(name, module_fqn, module_data, zf, date_time=None):
accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
normalized_name = tuple(accumulated_pkg_name) # extra machinations to get a hashable type (list is not)
if normalized_name not in py_module_cache:
- modules_to_process.append(ModuleUtilsProcessEntry(normalized_name, False, module_info.redirected, is_optional=is_optional))
+ modules_to_process.append(_ModuleUtilsProcessEntry(normalized_name, False, module_info.redirected, is_optional=entry.is_optional))
for py_module_name in py_module_cache:
- py_module_file_name = py_module_cache[py_module_name][1]
+ source_code, py_module_file_name = py_module_cache[py_module_name]
+
+ zf.writestr(_make_zinfo(py_module_file_name, date_time, zf=zf), source_code)
+
+ if extension_manager.debugger_enabled and (origin := Origin.get_tag(source_code)) and origin.path:
+ extension_manager.source_mapping[origin.path] = py_module_file_name
- zf.writestr(
- _make_zinfo(py_module_file_name, date_time, zf=zf),
- py_module_cache[py_module_name][0]
- )
mu_file = to_text(py_module_file_name, errors='surrogate_or_strict')
display.vvvvv("Including module_utils file %s" % mu_file)
+ return module_metadata
+
+
+def _compile_module_ast(module_name: str, source_code: str | bytes) -> ast.Module:
+ origin = Origin.get_tag(source_code) or Origin.UNKNOWN
+
+ # compile the source, process all relevant imported modules
+ try:
+ tree = t.cast(ast.Module, compile(source_code, str(origin), 'exec', ast.PyCF_ONLY_AST))
+ except SyntaxError as ex:
+ raise AnsibleError(f"Unable to compile {module_name!r}.", obj=origin.replace(line_num=ex.lineno, col_num=ex.offset)) from ex
+
+ return tree
+
def _is_binary(b_module_data):
+ """Heuristic to classify a file as binary by sniffing a 1k header; see https://stackoverflow.com/a/7392391"""
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
@@ -1034,17 +895,29 @@ def _get_ansible_module_fqn(module_path):
return remote_module_fqn
-def _add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data):
+def _add_module_to_zip(
+ zf: zipfile.ZipFile,
+ date_time: datetime.datetime,
+ remote_module_fqn: str,
+ b_module_data: bytes,
+ module_path: str,
+ extension_manager: _builder.ExtensionManager,
+) -> None:
"""Add a module from ansible or from an ansible collection into the module zip"""
module_path_parts = remote_module_fqn.split('.')
# Write the module
- module_path = '/'.join(module_path_parts) + '.py'
+ zip_module_path = '/'.join(module_path_parts) + '.py'
zf.writestr(
- _make_zinfo(module_path, date_time, zf=zf),
+ _make_zinfo(zip_module_path, date_time, zf=zf),
b_module_data
)
+ if extension_manager.debugger_enabled:
+ extension_manager.source_mapping[module_path] = zip_module_path
+
+ existing_paths: frozenset[str]
+
# Write the __init__.py's necessary to get there
if module_path_parts[0] == 'ansible':
# The ansible namespace is setup as part of the module_utils setup...
@@ -1068,12 +941,60 @@ def _add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data):
)
-def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
- become_method, become_user, become_password, become_flags, environment, remote_is_local=False):
+@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
+class _BuiltModule:
+ """Payload required to execute an Ansible module, along with information required to do so."""
+ b_module_data: bytes
+ module_style: t.Literal['binary', 'new', 'non_native_want_json', 'old']
+ shebang: str | None
+ serialization_profile: str
+
+
+@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
+class _CachedModule:
+ """Cached Python module created by AnsiballZ."""
+
+ # FIXME: switch this to use a locked down pickle config or don't use pickle- easy to mess up and reach objects that shouldn't be pickled
+
+ zip_data: bytes
+ metadata: ModuleMetadata
+ source_mapping: dict[str, str]
+ """A mapping of controller absolute source locations to target relative source locations within the AnsiballZ payload."""
+
+ def dump(self, path: str) -> None:
+ temp_path = pathlib.Path(path + '-part')
+
+ with temp_path.open('wb') as cache_file:
+ pickle.dump(self, cache_file)
+
+ temp_path.rename(path)
+
+ @classmethod
+ def load(cls, path: str) -> t.Self:
+ with pathlib.Path(path).open('rb') as cache_file:
+ return pickle.load(cache_file)
+
+
+def _find_module_utils(
+ *,
+ module_name: str,
+ b_module_data: bytes,
+ module_path: str,
+ module_args: dict[object, object],
+ task_vars: dict[str, object],
+ templar: Templar,
+ module_compression: str,
+ async_timeout: int,
+ become_plugin: BecomeBase | None,
+ environment: dict[str, str],
+ remote_is_local: bool = False
+) -> _BuiltModule:
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
+ module_substyle: t.Literal['binary', 'jsonargs', 'non_native_want_json', 'old', 'powershell', 'python']
+ module_style: t.Literal['binary', 'new', 'non_native_want_json', 'old']
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
@@ -1096,12 +1017,10 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
- b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
+ b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#AnsibleRequires -PowerShell Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
- or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
- or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
- or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \
- or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
+ or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE) \
+ or re.search(b'#AnsibleRequires -(OSVersion|PowerShell|CSharpUtil|Wrapper)', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
@@ -1114,7 +1033,12 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
- return b_module_data, module_style, shebang
+ return _BuiltModule(
+ b_module_data=b_module_data,
+ module_style=module_style,
+ shebang=shebang,
+ serialization_profile='legacy',
+ )
output = BytesIO()
@@ -1127,18 +1051,15 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
# People should start writing collections instead of modules in roles so we
# may never fix this
display.debug('ANSIBALLZ: Could not determine module FQN')
- remote_module_fqn = 'ansible.modules.%s' % module_name
+ # FIXME: add integration test to validate that builtins and legacy modules with the same name are tracked separately by the caching mechanism
+ # FIXME: surrogate FQN should be unique per source path- role-packaged modules with name collisions can still be aliased
+ remote_module_fqn = 'ansible.legacy.%s' % module_name
if module_substyle == 'python':
- date_time = time.gmtime()[:6]
- if date_time[0] < 1980:
- date_string = datetime.datetime(*date_time, tzinfo=datetime.timezone.utc).strftime('%c')
- raise AnsibleError(f'Cannot create zipfile due to pre-1980 configured date: {date_string}')
- params = dict(ANSIBLE_MODULE_ARGS=module_args,)
- try:
- python_repred_params = repr(json.dumps(params, cls=AnsibleJSONEncoder, vault_to_text=True))
- except TypeError as e:
- raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
+ date_time = datetime.datetime.now(datetime.timezone.utc)
+
+ if date_time.year < 1980:
+ raise AnsibleError(f'Cannot create zipfile due to pre-1980 configured date: {date_time}')
try:
compression_method = getattr(zipfile, module_compression)
@@ -1146,30 +1067,24 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
- lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
- cached_module_filename = os.path.join(lookup_path, "%s-%s" % (remote_module_fqn, module_compression))
+ extension_manager = _builder.ExtensionManager.create(task_vars=task_vars)
+ extension_key = '~'.join(extension_manager.extension_names) if extension_manager.extension_names else 'none'
+ lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache') # type: ignore[attr-defined]
+ cached_module_filename = os.path.join(lookup_path, '-'.join((remote_module_fqn, module_compression, extension_key)))
+
+ os.makedirs(os.path.dirname(cached_module_filename), exist_ok=True)
+
+ cached_module: _CachedModule | None = None
- zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
- with open(cached_module_filename, 'rb') as module_data:
- zipdata = module_data.read()
+ cached_module = _CachedModule.load(cached_module_filename)
else:
- if module_name in action_write_locks.action_write_locks:
- display.debug('ANSIBALLZ: Using lock for %s' % module_name)
- lock = action_write_locks.action_write_locks[module_name]
- else:
- # If the action plugin directly invokes the module (instead of
- # going through a strategy) then we don't have a cross-process
- # Lock specifically for this module. Use the "unexpected
- # module" lock instead
- display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
- lock = action_write_locks.action_write_locks[None]
-
display.debug('ANSIBALLZ: Acquiring lock')
- with lock:
- display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
+ lock_path = f'{cached_module_filename}.lock'
+ with _locking.named_mutex(lock_path):
+ display.debug(f'ANSIBALLZ: Lock acquired: {lock_path}')
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
@@ -1179,53 +1094,40 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# walk the module imports, looking for module_utils to send- they'll be added to the zipfile
- recursive_finder(module_name, remote_module_fqn, b_module_data, zf, date_time)
+ module_metadata = recursive_finder(
+ module_name,
+ remote_module_fqn,
+ Origin(path=module_path).tag(b_module_data),
+ zf,
+ date_time,
+ extension_manager,
+ )
display.debug('ANSIBALLZ: Writing module into payload')
- _add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data)
+ _add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data, module_path, extension_manager)
zf.close()
- zipdata = base64.b64encode(zipoutput.getvalue())
+ zip_data = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
- #
- # FIXME: Once split controller/remote is merged, this can be simplified to
- # os.makedirs(lookup_path, exist_ok=True)
- if not os.path.exists(lookup_path):
- try:
- # Note -- if we have a global function to setup, that would
- # be a better place to run this
- os.makedirs(lookup_path)
- except OSError:
- # Multiple processes tried to create the directory. If it still does not
- # exist, raise the original exception.
- if not os.path.exists(lookup_path):
- raise
+ os.makedirs(lookup_path, exist_ok=True)
display.debug('ANSIBALLZ: Writing module')
- with open(cached_module_filename + '-part', 'wb') as f:
- f.write(zipdata)
-
- # Rename the file into its final position in the cache so
- # future users of this module can read it off the
- # filesystem instead of constructing from scratch.
- display.debug('ANSIBALLZ: Renaming module')
- os.rename(cached_module_filename + '-part', cached_module_filename)
+ cached_module = _CachedModule(zip_data=zip_data, metadata=module_metadata, source_mapping=extension_manager.source_mapping)
+ cached_module.dump(cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
- if zipdata is None:
+ if not cached_module:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
- with open(cached_module_filename, 'rb') as f:
- zipdata = f.read()
- except IOError:
+ cached_module = _CachedModule.load(cached_module_filename)
+ except OSError as ex:
raise AnsibleError('A different worker process failed to create module file. '
- 'Look at traceback for that process for debugging information.')
- zipdata = to_text(zipdata, errors='surrogate_or_strict')
+ 'Look at traceback for that process for debugging information.') from ex
o_interpreter, o_args = _extract_interpreter(b_module_data)
if o_interpreter is None:
@@ -1237,63 +1139,75 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
if not isinstance(rlimit_nofile, int):
- rlimit_nofile = int(templar.template(rlimit_nofile))
+ rlimit_nofile = int(templar._engine.template(rlimit_nofile, options=TemplateOptions(value_for_omit=0)))
- if rlimit_nofile:
- rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
- rlimit_nofile=rlimit_nofile,
- )
- else:
- rlimit = ''
+ if not isinstance(cached_module.metadata, ModuleMetadataV1):
+ raise NotImplementedError()
- coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
+ params = dict(ANSIBLE_MODULE_ARGS=module_args,)
+ encoder = get_module_encoder(cached_module.metadata.serialization_profile, Direction.CONTROLLER_TO_MODULE)
- if coverage_config:
- coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT']
+ try:
+ encoded_params = json.dumps(params, cls=encoder)
+ except TypeError as ex:
+ raise AnsibleError(f'Failed to serialize arguments for the {module_name!r} module.') from ex
- if coverage_output:
- # Enable code coverage analysis of the module.
- # This feature is for internal testing and may change without notice.
- coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
- coverage_config=coverage_config,
- coverage_output=coverage_output,
- )
- else:
- # Verify coverage is available without importing it.
- # This will detect when a module would fail with coverage enabled with minimal overhead.
- coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE
- else:
- coverage = ''
+ extension_manager.source_mapping = cached_module.source_mapping
- output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
- zipdata=zipdata,
+ code = _get_ansiballz_code(shebang)
+ args = dict(
ansible_module=module_name,
module_fqn=remote_module_fqn,
- params=python_repred_params,
- shebang=shebang,
- coding=ENCODING_STRING,
+ profile=cached_module.metadata.serialization_profile,
date_time=date_time,
- coverage=coverage,
- rlimit=rlimit,
- )))
+ rlimit_nofile=rlimit_nofile,
+ params=encoded_params,
+ extensions=extension_manager.get_extensions(),
+ zip_data=to_text(cached_module.zip_data),
+ )
+
+ args_string = '\n'.join(f'{key}={value!r},' for key, value in args.items())
+
+ wrapper = f"""{code}
+
+
+if __name__ == "__main__":
+ _ansiballz_main(
+{args_string}
+)
+"""
+
+ output.write(to_bytes(wrapper))
+
+ module_metadata = cached_module.metadata
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
+ module_metadata = ModuleMetadataV1(serialization_profile='legacy') # DTFIX-FUTURE: support serialization profiles for PowerShell modules
+
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
- shebang = u'#!powershell'
+ shebang = '#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
- b_module_data, module_path, module_args, environment,
- async_timeout, become, become_method, become_user, become_password,
- become_flags, module_substyle, task_vars, remote_module_fqn
+ name=remote_module_fqn,
+ module_data=b_module_data,
+ module_path=module_path,
+ module_args=module_args,
+ environment=environment,
+ async_timeout=async_timeout,
+ become_plugin=become_plugin,
+ substyle=module_substyle,
+ task_vars=task_vars,
+ profile=module_metadata.serialization_profile,
)
elif module_substyle == 'jsonargs':
- module_args_json = to_bytes(json.dumps(module_args, cls=AnsibleJSONEncoder, vault_to_text=True))
+ encoder = get_module_encoder('legacy', Direction.CONTROLLER_TO_MODULE)
+ module_args_json = to_bytes(json.dumps(module_args, cls=encoder))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
@@ -1303,15 +1217,32 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
- b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
+ b_module_data = b_module_data.replace(
+ REPLACER_SELINUX,
+ to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS))) # type: ignore[attr-defined]
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
- facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
+ syslog_facility = task_vars.get(
+ 'ansible_syslog_facility',
+ C.DEFAULT_SYSLOG_FACILITY) # type: ignore[attr-defined]
+ facility = b'syslog.' + to_bytes(syslog_facility, errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
- return (b_module_data, module_style, shebang)
+ module_metadata = ModuleMetadataV1(serialization_profile='legacy')
+ else:
+ module_metadata = ModuleMetadataV1(serialization_profile='legacy')
+
+ if not isinstance(module_metadata, ModuleMetadataV1):
+ raise NotImplementedError(type(module_metadata))
+
+ return _BuiltModule(
+ b_module_data=b_module_data,
+ module_style=module_style,
+ shebang=shebang,
+ serialization_profile=module_metadata.serialization_profile,
+ )
def _extract_interpreter(b_module_data):
@@ -1337,8 +1268,19 @@ def _extract_interpreter(b_module_data):
return interpreter, args
-def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
- become_method=None, become_user=None, become_password=None, become_flags=None, environment=None, remote_is_local=False):
+def modify_module(
+ *,
+ module_name: str,
+ module_path,
+ module_args,
+ templar,
+ task_vars=None,
+ module_compression='ZIP_STORED',
+ async_timeout=0,
+ become_plugin=None,
+ environment=None,
+ remote_is_local=False,
+) -> _BuiltModule:
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
@@ -1367,13 +1309,30 @@ def modify_module(module_name, module_path, module_args, templar, task_vars=None
# read in the module source
b_module_data = f.read()
- (b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
- async_timeout=async_timeout, become=become, become_method=become_method,
- become_user=become_user, become_password=become_password, become_flags=become_flags,
- environment=environment, remote_is_local=remote_is_local)
+ module_bits = _find_module_utils(
+ module_name=module_name,
+ b_module_data=b_module_data,
+ module_path=module_path,
+ module_args=module_args,
+ task_vars=task_vars,
+ templar=templar,
+ module_compression=module_compression,
+ async_timeout=async_timeout,
+ become_plugin=become_plugin,
+ environment=environment,
+ remote_is_local=remote_is_local,
+ )
+
+ b_module_data = module_bits.b_module_data
+ shebang = module_bits.shebang
- if module_style == 'binary':
- return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
+ if module_bits.module_style == 'binary':
+ return _BuiltModule(
+ b_module_data=module_bits.b_module_data,
+ module_style=module_bits.module_style,
+ shebang=to_text(module_bits.shebang, nonstring='passthru'),
+ serialization_profile=module_bits.serialization_profile,
+ )
elif shebang is None:
interpreter, args = _extract_interpreter(b_module_data)
# No interpreter/shebang, assume a binary module?
@@ -1387,15 +1346,20 @@ def modify_module(module_name, module_path, module_args, templar, task_vars=None
if interpreter != new_interpreter:
b_lines[0] = to_bytes(shebang, errors='surrogate_or_strict', nonstring='passthru')
- if os.path.basename(interpreter).startswith(u'python'):
- b_lines.insert(1, b_ENCODING_STRING)
-
b_module_data = b"\n".join(b_lines)
- return (b_module_data, module_style, shebang)
+ return _BuiltModule(
+ b_module_data=b_module_data,
+ module_style=module_bits.module_style,
+ shebang=shebang,
+ serialization_profile=module_bits.serialization_profile,
+ )
+
+def _get_action_arg_defaults(action: str, task: Task, templar: TemplateEngine) -> dict[str, t.Any]:
+ action_groups = task._parent._play._action_groups
+ defaults = task.module_defaults
-def get_action_args_with_defaults(action, args, defaults, templar, action_groups=None):
# Get the list of groups that contain this action
if action_groups is None:
msg = (
@@ -1408,7 +1372,7 @@ def get_action_args_with_defaults(action, args, defaults, templar, action_groups
else:
group_names = action_groups.get(action, [])
- tmp_args = {}
+ tmp_args: dict[str, t.Any] = {}
module_defaults = {}
# Merge latest defaults into dict, since they are a list of dicts
@@ -1416,18 +1380,20 @@ def get_action_args_with_defaults(action, args, defaults, templar, action_groups
for default in defaults:
module_defaults.update(default)
- # module_defaults keys are static, but the values may be templated
- module_defaults = templar.template(module_defaults)
for default in module_defaults:
if default.startswith('group/'):
group_name = default.split('group/')[-1]
if group_name in group_names:
- tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy())
+ tmp_args.update(templar.resolve_to_container(module_defaults.get(f'group/{group_name}', {})))
# handle specific action defaults
- tmp_args.update(module_defaults.get(action, {}).copy())
-
- # direct args override all
- tmp_args.update(args)
+ tmp_args.update(templar.resolve_to_container(module_defaults.get(action, {})))
return tmp_args
+
+
+def _apply_action_arg_defaults(action: str, task: Task, action_args: dict[str, t.Any], templar: Templar) -> dict[str, t.Any]:
+ args = _get_action_arg_defaults(action, task, templar._engine)
+ args.update(action_args)
+
+ return args
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
index e512b64b840..69d0b00b0e7 100644
--- a/lib/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -155,9 +155,6 @@ class PlayIterator:
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
- # TODO: hardcoded resolution here, but should use actual resolution code in the end,
- # in case of 'legacy' mismatch
- setup_task.resolved_action = 'ansible.builtin.gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {}
@@ -255,7 +252,6 @@ class PlayIterator:
self.set_state_for_host(host.name, s)
display.debug("done getting next task for host %s" % host.name)
- display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
@@ -292,7 +288,7 @@ class PlayIterator:
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
- (gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
+ (gathering == 'smart' and implied and not self._variable_manager._facts_gathered_for_host(host.name)):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
@@ -450,8 +446,7 @@ class PlayIterator:
# skip implicit flush_handlers if there are no handlers notified
if (
task.implicit
- and task.action in C._ACTION_META
- and task.args.get('_raw_params', None) == 'flush_handlers'
+ and task._get_meta() == 'flush_handlers'
and (
# the state store in the `state` variable could be a nested state,
# notifications are always stored in the top level state, get it here
@@ -598,28 +593,22 @@ class PlayIterator:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
- target_block = state._blocks[state.cur_block].copy()
- before = target_block.block[:state.cur_regular_task]
- after = target_block.block[state.cur_regular_task:]
- target_block.block = before + task_list + after
+ target_block = state._blocks[state.cur_block].copy(exclude_tasks=True)
+ target_block.block[state.cur_regular_task:state.cur_regular_task] = task_list
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
- target_block = state._blocks[state.cur_block].copy()
- before = target_block.rescue[:state.cur_rescue_task]
- after = target_block.rescue[state.cur_rescue_task:]
- target_block.rescue = before + task_list + after
+ target_block = state._blocks[state.cur_block].copy(exclude_tasks=True)
+ target_block.rescue[state.cur_rescue_task:state.cur_rescue_task] = task_list
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
- target_block = state._blocks[state.cur_block].copy()
- before = target_block.always[:state.cur_always_task]
- after = target_block.always[state.cur_always_task:]
- target_block.always = before + task_list + after
+ target_block = state._blocks[state.cur_block].copy(exclude_tasks=True)
+ target_block.always[state.cur_always_task:state.cur_always_task] = task_list
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.HANDLERS:
state.handlers[state.cur_handlers_task:state.cur_handlers_task] = [h for b in task_list for h in b.block]
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
index 468c4bdc709..78329df342f 100644
--- a/lib/ansible/executor/playbook_executor.py
+++ b/lib/ansible/executor/playbook_executor.py
@@ -26,7 +26,7 @@ from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.loader import become_loader, connection_loader, shell_loader
from ansible.playbook import Playbook
-from ansible.template import Templar
+from ansible._internal._templating._engine import TemplateEngine
from ansible.utils.helpers import pct_to_int
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
@@ -132,7 +132,7 @@ class PlaybookExecutor:
# Allow variables to be used in vars_prompt fields.
all_vars = self._variable_manager.get_vars(play=play)
- templar = Templar(loader=self._loader, variables=all_vars)
+ templar = TemplateEngine(loader=self._loader, variables=all_vars)
setattr(play, 'vars_prompt', templar.template(play.vars_prompt))
# FIXME: this should be a play 'sub object' like loop_control
@@ -158,7 +158,7 @@ class PlaybookExecutor:
# Post validate so any play level variables are templated
all_vars = self._variable_manager.get_vars(play=play)
- templar = Templar(loader=self._loader, variables=all_vars)
+ templar = TemplateEngine(loader=self._loader, variables=all_vars)
play.post_validate(templar)
if context.CLIARGS['syntax']:
diff --git a/lib/ansible/executor/powershell/async_watchdog.ps1 b/lib/ansible/executor/powershell/async_watchdog.ps1
index c2138e35914..391016de563 100644
--- a/lib/ansible/executor/powershell/async_watchdog.ps1
+++ b/lib/ansible/executor/powershell/async_watchdog.ps1
@@ -1,117 +1,112 @@
-# (c) 2018 Ansible Project
+# (c) 2025 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-param(
- [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
+using namespace Microsoft.Win32.SafeHandles
+using namespace System.Collections
+using namespace System.IO
+using namespace System.Management.Automation
+using namespace System.Text
+using namespace System.Threading
+
+[CmdletBinding()]
+param (
+ [Parameter(Mandatory)]
+ [string]
+ $ResultPath,
+
+ [Parameter(Mandatory)]
+ [int]
+ $Timeout,
+
+ [Parameter(Mandatory)]
+ [Int64]
+ $WaitHandleId
)
-# help with debugging errors as we don't have visibility of this running process
-trap {
- $watchdog_path = "$($env:TEMP)\ansible-async-watchdog-error-$(Get-Date -Format "yyyy-MM-ddTHH-mm-ss.ffffZ").txt"
- $error_msg = "Error while running the async exec wrapper`r`n$(Format-AnsibleException -ErrorRecord $_)"
- Set-Content -Path $watchdog_path -Value $error_msg
- break
+if (-not (Test-Path -LiteralPath $ResultPath)) {
+ throw "async result file at '$ResultPath' does not exist"
}
+$result = Get-Content -LiteralPath $ResultPath | ConvertFrom-Json | Convert-JsonObject
-$ErrorActionPreference = "Stop"
-
-Write-AnsibleLog "INFO - starting async_watchdog" "async_watchdog"
-
-# pop 0th action as entrypoint
-$payload.actions = $payload.actions[1..99]
-
-$actions = $Payload.actions
-$entrypoint = $payload.($actions[0])
-$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
-
-$resultfile_path = $payload.async_results_path
-$max_exec_time_sec = $payload.async_timeout_sec
-
-Write-AnsibleLog "INFO - deserializing existing result file args at: '$resultfile_path'" "async_watchdog"
-if (-not (Test-Path -Path $resultfile_path)) {
- $msg = "result file at '$resultfile_path' does not exist"
- Write-AnsibleLog "ERROR - $msg" "async_watchdog"
- throw $msg
-}
-$result_json = Get-Content -Path $resultfile_path -Raw
-Write-AnsibleLog "INFO - result file json is: $result_json" "async_watchdog"
-$result = ConvertFrom-AnsibleJson -InputObject $result_json
-
-Write-AnsibleLog "INFO - creating async runspace" "async_watchdog"
-$rs = [RunspaceFactory]::CreateRunspace()
-$rs.Open()
-
-Write-AnsibleLog "INFO - creating async PowerShell pipeline" "async_watchdog"
+# The intermediate script is used so that things are set up like it normally
+# is. The new Runspace is used to ensure we can stop it once the async time is
+# exceeded.
+$execInfo = Get-AnsibleExecWrapper -ManifestAsParam -IncludeScriptBlock
$ps = [PowerShell]::Create()
-$ps.Runspace = $rs
-
-# these functions are set in exec_wrapper
-Write-AnsibleLog "INFO - adding global functions to PowerShell pipeline script" "async_watchdog"
-$ps.AddScript($script:common_functions).AddStatement() > $null
-$ps.AddScript($script:wrapper_functions).AddStatement() > $null
-$function_params = @{
- Name = "common_functions"
- Value = $script:common_functions
- Scope = "script"
+$null = $ps.AddScript(@'
+[CmdletBinding()]
+param([ScriptBlock]$ScriptBlock, $Param)
+
+& $ScriptBlock.Ast.GetScriptBlock() @Param
+'@).AddParameters(
+ @{
+ ScriptBlock = $execInfo.ScriptInfo.ScriptBlock
+ Param = $execInfo.Parameters
+ })
+
+# It is important we run with the invocation settings so that it has access
+# to the same PSHost. The pipeline input also needs to be marked as complete
+# so the exec_wrapper isn't waiting for input indefinitely.
+$pipelineInput = [PSDataCollection[object]]::new()
+$pipelineInput.Complete()
+$invocationSettings = [PSInvocationSettings]@{
+ Host = $host
}
-$ps.AddCommand("Set-Variable").AddParameters($function_params).AddStatement() > $null
-
-Write-AnsibleLog "INFO - adding $($actions[0]) to PowerShell pipeline script" "async_watchdog"
-$ps.AddScript($entrypoint).AddArgument($payload) > $null
-
-Write-AnsibleLog "INFO - async job start, calling BeginInvoke()" "async_watchdog"
-$job_async_result = $ps.BeginInvoke()
-
-Write-AnsibleLog "INFO - waiting '$max_exec_time_sec' seconds for async job to complete" "async_watchdog"
-$job_async_result.AsyncWaitHandle.WaitOne($max_exec_time_sec * 1000) > $null
-$result.finished = 1
-if ($job_async_result.IsCompleted) {
- Write-AnsibleLog "INFO - async job completed, calling EndInvoke()" "async_watchdog"
-
- $job_output = $ps.EndInvoke($job_async_result)
- $job_error = $ps.Streams.Error
-
- Write-AnsibleLog "INFO - raw module stdout:`r`n$($job_output | Out-String)" "async_watchdog"
- if ($job_error) {
- Write-AnsibleLog "WARN - raw module stderr:`r`n$($job_error | Out-String)" "async_watchdog"
- }
-
- # write success/output/error to result object
- # TODO: cleanse leading/trailing junk
- try {
- Write-AnsibleLog "INFO - deserializing Ansible stdout" "async_watchdog"
- $module_result = ConvertFrom-AnsibleJson -InputObject $job_output
+# Signals async_wrapper that we are ready to start the job and to stop waiting
+$waitHandle = [SafeWaitHandle]::new([IntPtr]$WaitHandleId, $true)
+$waitEvent = [ManualResetEvent]::new($false)
+$waitEvent.SafeWaitHandle = $waitHandle
+$null = $waitEvent.Set()
+
+$jobOutput = $null
+$jobError = $null
+try {
+ $jobAsyncResult = $ps.BeginInvoke($pipelineInput, $invocationSettings, $null, $null)
+ $jobAsyncResult.AsyncWaitHandle.WaitOne($Timeout * 1000) > $null
+ $result.finished = $true
+
+ if ($jobAsyncResult.IsCompleted) {
+ $jobOutput = $ps.EndInvoke($jobAsyncResult)
+ $jobError = $ps.Streams.Error
+
+ # write success/output/error to result object
+ # TODO: cleanse leading/trailing junk
+ $moduleResult = $jobOutput | ConvertFrom-Json | Convert-JsonObject
# TODO: check for conflicting keys
- $result = $result + $module_result
- }
- catch {
- $result.failed = $true
- $result.msg = "failed to parse module output: $($_.Exception.Message)"
- # return output back to Ansible to help with debugging errors
- $result.stdout = $job_output | Out-String
- $result.stderr = $job_error | Out-String
+ $result = $result + $moduleResult
}
+ else {
+ # We can't call Stop() as pwsh won't respond if it is busy calling a .NET
+ # method. The process end will shut everything down instead.
+ $ps.BeginStop($null, $null) > $null
- $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
- Set-Content -Path $resultfile_path -Value $result_json
-
- Write-AnsibleLog "INFO - wrote output to $resultfile_path" "async_watchdog"
+ throw "timed out waiting for module completion"
+ }
}
-else {
- Write-AnsibleLog "ERROR - reached timeout on async job, stopping job" "async_watchdog"
- $ps.BeginStop($null, $null) > $null # best effort stop
-
- # write timeout to result object
+catch {
+ $exception = @(
+ "$_"
+ "$($_.InvocationInfo.PositionMessage)"
+ "+ CategoryInfo : $($_.CategoryInfo)"
+ "+ FullyQualifiedErrorId : $($_.FullyQualifiedErrorId)"
+ ""
+ "ScriptStackTrace:"
+ "$($_.ScriptStackTrace)"
+
+ if ($_.Exception.StackTrace) {
+ "$($_.Exception.StackTrace)"
+ }
+ ) -join ([Environment]::NewLine)
+
+ $result.exception = $exception
$result.failed = $true
- $result.msg = "timed out waiting for module completion"
- $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
- Set-Content -Path $resultfile_path -Value $result_json
-
- Write-AnsibleLog "INFO - wrote timeout to '$resultfile_path'" "async_watchdog"
+ $result.msg = "failure during async watchdog: $_"
+ # return output back, if available, to Ansible to help with debugging errors
+ $result.stdout = $jobOutput | Out-String
+ $result.stderr = $jobError | Out-String
+}
+finally {
+ $resultJson = ConvertTo-Json -InputObject $result -Depth 99 -Compress
+ Set-Content -LiteralPath $ResultPath -Value $resultJson -Encoding UTF8
}
-
-# in the case of a hung pipeline, this will cause the process to stay alive until it's un-hung...
-#$rs.Close() | Out-Null
-
-Write-AnsibleLog "INFO - ending async_watchdog" "async_watchdog"
diff --git a/lib/ansible/executor/powershell/async_wrapper.ps1 b/lib/ansible/executor/powershell/async_wrapper.ps1
index dd5a9becc5b..dbd21e98f95 100644
--- a/lib/ansible/executor/powershell/async_wrapper.ps1
+++ b/lib/ansible/executor/powershell/async_wrapper.ps1
@@ -1,174 +1,225 @@
-# (c) 2018 Ansible Project
+# (c) 2025 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-param(
- [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
-)
-
-$ErrorActionPreference = "Stop"
+#AnsibleRequires -CSharpUtil Ansible._Async
-Write-AnsibleLog "INFO - starting async_wrapper" "async_wrapper"
+using namespace System.Collections
+using namespace System.Diagnostics
+using namespace System.IO
+using namespace System.IO.Pipes
+using namespace System.Text
+using namespace System.Threading
-if (-not $Payload.environment.ContainsKey("ANSIBLE_ASYNC_DIR")) {
- Write-AnsibleError -Message "internal error: the environment variable ANSIBLE_ASYNC_DIR is not set and is required for an async task"
- $host.SetShouldExit(1)
- return
-}
-$async_dir = [System.Environment]::ExpandEnvironmentVariables($Payload.environment.ANSIBLE_ASYNC_DIR)
-
-# calculate the result path so we can include it in the worker payload
-$jid = $Payload.async_jid
-$local_jid = $jid + "." + $pid
-
-$results_path = [System.IO.Path]::Combine($async_dir, $local_jid)
-
-Write-AnsibleLog "INFO - creating async results path at '$results_path'" "async_wrapper"
-
-$Payload.async_results_path = $results_path
-[System.IO.Directory]::CreateDirectory([System.IO.Path]::GetDirectoryName($results_path)) > $null
-
-# we use Win32_Process to escape the current process job, CreateProcess with a
-# breakaway flag won't work for psrp as the psrp process does not have breakaway
-# rights. Unfortunately we can't read/write to the spawned process as we can't
-# inherit the handles. We use a locked down named pipe to send the exec_wrapper
-# payload. Anonymous pipes won't work as the spawned process will not be a child
-# of the current one and will not be able to inherit the handles
-
-# pop the async_wrapper action so we don't get stuck in a loop and create new
-# exec_wrapper for our async process
-$Payload.actions = $Payload.actions[1..99]
-$payload_json = ConvertTo-Json -InputObject $Payload -Depth 99 -Compress
-
-#
-$exec_wrapper = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.exec_wrapper))
-$exec_wrapper += "`0`0`0`0" + $payload_json
-$payload_bytes = [System.Text.Encoding]::UTF8.GetBytes($exec_wrapper)
-$pipe_name = "ansible-async-$jid-$([guid]::NewGuid())"
-
-# template the async process command line with the payload details
-$bootstrap_wrapper = {
- # help with debugging errors as we loose visibility of the process output
- # from here on
- trap {
- $wrapper_path = "$($env:TEMP)\ansible-async-wrapper-error-$(Get-Date -Format "yyyy-MM-ddTHH-mm-ss.ffffZ").txt"
- $error_msg = "Error while running the async exec wrapper`r`n$($_ | Out-String)`r`n$($_.ScriptStackTrace)"
- Set-Content -Path $wrapper_path -Value $error_msg
- break
- }
+[CmdletBinding()]
+param (
+ [Parameter(Mandatory)]
+ [string]
+ $AsyncDir,
- &chcp.com 65001 > $null
-
- # store the pipe name and no. of bytes to read, these are populated before
- # before the process is created - do not remove or changed
- $pipe_name = ""
- $bytes_length = 0
-
- $input_bytes = New-Object -TypeName byte[] -ArgumentList $bytes_length
- $pipe = New-Object -TypeName System.IO.Pipes.NamedPipeClientStream -ArgumentList @(
- ".", # localhost
- $pipe_name,
- [System.IO.Pipes.PipeDirection]::In,
- [System.IO.Pipes.PipeOptions]::None,
- [System.Security.Principal.TokenImpersonationLevel]::Anonymous
- )
- try {
- $pipe.Connect()
- $pipe.Read($input_bytes, 0, $bytes_length) > $null
- }
- finally {
- $pipe.Close()
- }
- $exec = [System.Text.Encoding]::UTF8.GetString($input_bytes)
- $exec_parts = $exec.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
- Set-Variable -Name json_raw -Value $exec_parts[1]
- $exec = [ScriptBlock]::Create($exec_parts[0])
- &$exec
-}
+ [Parameter(Mandatory)]
+ [string]
+ $AsyncJid,
-$bootstrap_wrapper = $bootstrap_wrapper.ToString().Replace('$pipe_name = ""', "`$pipe_name = `"$pipe_name`"")
-$bootstrap_wrapper = $bootstrap_wrapper.Replace('$bytes_length = 0', "`$bytes_length = $($payload_bytes.Count)")
-$encoded_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper))
-$pwsh_path = "$env:SystemRoot\System32\WindowsPowerShell\v1.0\powershell.exe"
-$exec_args = "`"$pwsh_path`" -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $encoded_command"
-
-# create a named pipe that is set to allow only the current user read access
-$current_user = ([Security.Principal.WindowsIdentity]::GetCurrent()).User
-$pipe_sec = New-Object -TypeName System.IO.Pipes.PipeSecurity
-$pipe_ar = New-Object -TypeName System.IO.Pipes.PipeAccessRule -ArgumentList @(
- $current_user,
- [System.IO.Pipes.PipeAccessRights]::Read,
- [System.Security.AccessControl.AccessControlType]::Allow
-)
-$pipe_sec.AddAccessRule($pipe_ar)
-
-Write-AnsibleLog "INFO - creating named pipe '$pipe_name'" "async_wrapper"
-$pipe = New-Object -TypeName System.IO.Pipes.NamedPipeServerStream -ArgumentList @(
- $pipe_name,
- [System.IO.Pipes.PipeDirection]::Out,
- 1,
- [System.IO.Pipes.PipeTransmissionMode]::Byte,
- [System.IO.Pipes.PipeOptions]::Asynchronous,
- 0,
- 0,
- $pipe_sec
+ [Parameter(Mandatory)]
+ [int]
+ $StartupTimeout
)
+Import-CSharpUtil -Name 'Ansible._Async.cs'
+
+$AsyncDir = [Environment]::ExpandEnvironmentVariables($AsyncDir)
+if (-not [Directory]::Exists($asyncDir)) {
+ $null = [Directory]::CreateDirectory($asyncDir)
+}
+
+$parentProcessId = 0
+$parentProcessHandle = $stdoutReader = $stderrReader = $stdinPipe = $stdoutPipe = $stderrPipe = $asyncProcess = $waitHandle = $null
try {
- Write-AnsibleLog "INFO - creating async process '$exec_args'" "async_wrapper"
- $process = Invoke-CimMethod -ClassName Win32_Process -Name Create -Arguments @{CommandLine = $exec_args }
- $rc = $process.ReturnValue
-
- Write-AnsibleLog "INFO - return value from async process exec: $rc" "async_wrapper"
- if ($rc -ne 0) {
- $error_msg = switch ($rc) {
- 2 { "Access denied" }
- 3 { "Insufficient privilege" }
- 8 { "Unknown failure" }
- 9 { "Path not found" }
- 21 { "Invalid parameter" }
- default { "Other" }
+ $utf8 = [UTF8Encoding]::new($false)
+ $stdinPipe = [AnonymousPipeServerStream]::new([PipeDirection]::Out, [HandleInheritability]::Inheritable)
+ $stdoutPipe = [AnonymousPipeServerStream]::new([PipeDirection]::In, [HandleInheritability]::Inheritable)
+ $stderrPipe = [AnonymousPipeServerStream]::new([PipeDirection]::In, [HandleInheritability]::Inheritable)
+ $stdoutReader = [StreamReader]::new($stdoutPipe, $utf8, $false)
+ $stderrReader = [StreamReader]::new($stderrPipe, $utf8, $false)
+ $clientWaitHandle = $waitHandle = [Ansible._Async.AsyncUtil]::CreateInheritableEvent()
+
+ $stdinHandle = $stdinPipe.ClientSafePipeHandle
+ $stdoutHandle = $stdoutPipe.ClientSafePipeHandle
+ $stderrHandle = $stderrPipe.ClientSafePipeHandle
+
+ $executable = if ($PSVersionTable.PSVersion -lt '6.0') {
+ 'powershell.exe'
+ }
+ else {
+ 'pwsh.exe'
+ }
+ $executablePath = Join-Path -Path $PSHome -ChildPath $executable
+
+ # We need to escape the job of the current process to allow the async
+ # process to outlive the Windows job. If the current process is not part of
+ # a job or job allows us to breakaway we can spawn the process directly.
+ # Otherwise we use WMI Win32_Process.Create to create a process as our user
+ # outside the job and use that as the async process parent. The winrm and
+ # ssh connection plugin allows breaking away from the job but psrp does not.
+ if (-not [Ansible._Async.AsyncUtil]::CanCreateBreakawayProcess()) {
+ # We hide the console window and suspend the process to avoid it running
+ # anything. We only need the process to be created outside the job and not
+ # for it to run.
+ $psi = New-CimInstance -ClassName Win32_ProcessStartup -ClientOnly -Property @{
+ CreateFlags = [uint32]4 # CREATE_SUSPENDED
+ ShowWindow = [uint16]0 # SW_HIDE
+ }
+ $procInfo = Invoke-CimMethod -ClassName Win32_Process -Name Create -Arguments @{
+ CommandLine = $executablePath
+ ProcessStartupInformation = $psi
+ }
+ $rc = $procInfo.ReturnValue
+ if ($rc -ne 0) {
+ $msg = switch ($rc) {
+ 2 { "Access denied" }
+ 3 { "Insufficient privilege" }
+ 8 { "Unknown failure" }
+ 9 { "Path not found" }
+ 21 { "Invalid parameter" }
+ default { "Other" }
+ }
+ throw "Failed to start async parent process: $rc $msg"
}
- throw "Failed to start async process: $rc ($error_msg)"
+
+ # WMI returns a UInt32, we want the signed equivalent of those bytes.
+ $parentProcessId = [Convert]::ToInt32(
+ [Convert]::ToString($procInfo.ProcessId, 16),
+ 16)
+
+ $parentProcessHandle = [Ansible._Async.AsyncUtil]::OpenProcessAsParent($parentProcessId)
+ $clientWaitHandle = [Ansible._Async.AsyncUtil]::DuplicateHandleToProcess($waitHandle, $parentProcessHandle)
+ $stdinHandle = [Ansible._Async.AsyncUtil]::DuplicateHandleToProcess($stdinHandle, $parentProcessHandle)
+ $stdoutHandle = [Ansible._Async.AsyncUtil]::DuplicateHandleToProcess($stdoutHandle, $parentProcessHandle)
+ $stderrHandle = [Ansible._Async.AsyncUtil]::DuplicateHandleToProcess($stderrHandle, $parentProcessHandle)
+ $stdinPipe.DisposeLocalCopyOfClientHandle()
+ $stdoutPipe.DisposeLocalCopyOfClientHandle()
+ $stderrPipe.DisposeLocalCopyOfClientHandle()
}
- $watchdog_pid = $process.ProcessId
- Write-AnsibleLog "INFO - created async process PID: $watchdog_pid" "async_wrapper"
- # populate initial results before we send the async data to avoid result race
+ $localJid = "$AsyncJid.$pid"
+ $resultsPath = [Path]::Combine($AsyncDir, $localJid)
+
+ $bootstrapWrapper = Get-AnsibleScript -Name bootstrap_wrapper.ps1
+ $execAction = Get-AnsibleExecWrapper -EncodeInputOutput
+
+ $execAction.Parameters.ActionParameters = @{
+ ResultPath = $resultsPath
+ WaitHandleId = [Int64]$clientWaitHandle.DangerousGetHandle()
+ }
+ $execWrapper = @{
+ name = 'exec_wrapper-async.ps1'
+ script = $execAction.ScriptInfo.Script
+ params = $execAction.Parameters
+ } | ConvertTo-Json -Compress -Depth 99
+ $asyncInput = "$execWrapper`n`0`0`0`0`n$($execAction.InputData)"
+
+ $encCommand = [Convert]::ToBase64String([Encoding]::Unicode.GetBytes($bootstrapWrapper.Script))
+ $asyncCommand = "`"$executablePath`" -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $encCommand"
+
+ $asyncProcess = [Ansible._Async.AsyncUtil]::CreateAsyncProcess(
+ $executablePath,
+ $asyncCommand,
+ $stdinHandle,
+ $stdoutHandle,
+ $stderrHandle,
+ $clientWaitHandle,
+ $parentProcessHandle,
+ $stdoutReader,
+ $stderrReader)
+
+ # We need to write the result file before the process is started to ensure
+ # it can read the file.
$result = @{
- started = 1
- finished = 0
- results_file = $results_path
- ansible_job_id = $local_jid
+ started = $true
+ finished = $false
+ results_file = $resultsPath
+ ansible_job_id = $localJid
_ansible_suppress_tmpdir_delete = $true
- ansible_async_watchdog_pid = $watchdog_pid
+ ansible_async_watchdog_pid = $asyncProcess.ProcessId
+ }
+ $resultJson = ConvertTo-Json -InputObject $result -Depth 99 -Compress
+ [File]::WriteAllText($resultsPath, $resultJson, $utf8)
+
+ if ($parentProcessHandle) {
+ [Ansible._Async.AsyncUtil]::CloseHandleInProcess($stdinHandle, $parentProcessHandle)
+ [Ansible._Async.AsyncUtil]::CloseHandleInProcess($stdoutHandle, $parentProcessHandle)
+ [Ansible._Async.AsyncUtil]::CloseHandleInProcess($stderrHandle, $parentProcessHandle)
+ [Ansible._Async.AsyncUtil]::CloseHandleInProcess($clientWaitHandle, $parentProcessHandle)
+ }
+ else {
+ $stdinPipe.DisposeLocalCopyOfClientHandle()
+ $stdoutPipe.DisposeLocalCopyOfClientHandle()
+ $stderrPipe.DisposeLocalCopyOfClientHandle()
+ }
+
+ [Ansible._Async.AsyncUtil]::ResumeThread($asyncProcess.Thread)
+
+ # If writing to the pipe fails the process has already ended.
+ $procAlive = $true
+ $procIn = [StreamWriter]::new($stdinPipe, $utf8)
+ try {
+ $procIn.WriteLine($asyncInput)
+ $procIn.Flush()
+ $procIn.Dispose()
+ }
+ catch [IOException] {
+ $procAlive = $false
}
- Write-AnsibleLog "INFO - writing initial async results to '$results_path'" "async_wrapper"
- $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
- Set-Content $results_path -Value $result_json
-
- $np_timeout = $Payload.async_startup_timeout * 1000
- Write-AnsibleLog "INFO - waiting for async process to connect to named pipe for $np_timeout milliseconds" "async_wrapper"
- $wait_async = $pipe.BeginWaitForConnection($null, $null)
- $wait_async.AsyncWaitHandle.WaitOne($np_timeout) > $null
- if (-not $wait_async.IsCompleted) {
- $msg = "Ansible encountered a timeout while waiting for the async task to start and connect to the named"
- $msg += "pipe. This can be affected by the performance of the target - you can increase this timeout using"
- $msg += "WIN_ASYNC_STARTUP_TIMEOUT or just for this host using the win_async_startup_timeout hostvar if "
- $msg += "this keeps happening."
- throw $msg
+ if ($procAlive) {
+ # Wait for the process to signal it has started the async task or if it
+ # has ended early/timed out.
+ $waitTimespan = [TimeSpan]::FromSeconds($StartupTimeout)
+ $handleIdx = [WaitHandle]::WaitAny(
+ @(
+ [Ansible._Async.ManagedWaitHandle]::new($waitHandle),
+ [Ansible._Async.ManagedWaitHandle]::new($asyncProcess.Process)
+ ),
+ $waitTimespan)
+ if ($handleIdx -eq [WaitHandle]::WaitTimeout) {
+ $msg = -join @(
+ "Ansible encountered a timeout while waiting for the async task to start and signal it has started. "
+ "This can be affected by the performance of the target - you can increase this timeout using "
+ "WIN_ASYNC_STARTUP_TIMEOUT or just for this host using the ansible_win_async_startup_timeout hostvar "
+ "if this keeps happening."
+ )
+ throw $msg
+ }
+ $procAlive = $handleIdx -eq 0
}
- $pipe.EndWaitForConnection($wait_async)
- Write-AnsibleLog "INFO - writing exec_wrapper and payload to async process" "async_wrapper"
- $pipe.Write($payload_bytes, 0, $payload_bytes.Count)
- $pipe.Flush()
- $pipe.WaitForPipeDrain()
+ if ($procAlive) {
+ $resultJson
+ }
+ else {
+ # If the process had ended before it signaled it was ready, we return
+ # back the raw output and hope it contains an error.
+ Remove-Item -LiteralPath $resultsPath -ErrorAction Ignore
+
+ $stdout = $asyncProcess.StdoutReader.GetAwaiter().GetResult()
+ $stderr = $asyncProcess.StderrReader.GetAwaiter().GetResult()
+ $rc = [Ansible._Async.AsyncUtil]::GetProcessExitCode($asyncProcess.Process)
+
+ $host.UI.WriteLine($stdout)
+ Write-PowerShellClixmlStderr -Output $stderr
+ $host.SetShouldExit($rc)
+ }
}
finally {
- $pipe.Close()
+ if ($parentProcessHandle) { $parentProcessHandle.Dispose() }
+ if ($parentProcessId) {
+ Stop-Process -Id $parentProcessId -Force -ErrorAction Ignore
+ }
+ if ($stdoutReader) { $stdoutReader.Dispose() }
+ if ($stderrReader) { $stderrReader.Dispose() }
+ if ($stdinPipe) { $stdinPipe.Dispose() }
+ if ($stdoutPipe) { $stdoutPipe.Dispose() }
+ if ($stderrPipe) { $stderrPipe.Dispose() }
+ if ($asyncProcess) { $asyncProcess.Dispose() }
+ if ($waitHandle) { $waitHandle.Dispose() }
}
-
-Write-AnsibleLog "INFO - outputting initial async result: $result_json" "async_wrapper"
-Write-Output -InputObject $result_json
-Write-AnsibleLog "INFO - ending async_wrapper" "async_wrapper"
diff --git a/lib/ansible/executor/powershell/become_wrapper.ps1 b/lib/ansible/executor/powershell/become_wrapper.ps1
index cea42c128aa..6d2c250e58b 100644
--- a/lib/ansible/executor/powershell/become_wrapper.ps1
+++ b/lib/ansible/executor/powershell/become_wrapper.ps1
@@ -1,162 +1,125 @@
-# (c) 2018 Ansible Project
+# (c) 2025 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-param(
- [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
-)
-
-#Requires -Module Ansible.ModuleUtils.AddType
-#AnsibleRequires -CSharpUtil Ansible.AccessToken
#AnsibleRequires -CSharpUtil Ansible.Become
-$ErrorActionPreference = "Stop"
-
-Write-AnsibleLog "INFO - starting become_wrapper" "become_wrapper"
-
-Function Get-EnumValue($enum, $flag_type, $value) {
- $raw_enum_value = $value.Replace('_', '')
- try {
- $enum_value = [Enum]::Parse($enum, $raw_enum_value, $true)
- }
- catch [System.ArgumentException] {
- $valid_options = [Enum]::GetNames($enum) | ForEach-Object -Process {
- (($_ -creplace "(.)([A-Z][a-z]+)", '$1_$2') -creplace "([a-z0-9])([A-Z])", '$1_$2').ToString().ToLower()
- }
- throw "become_flags $flag_type value '$value' is not valid, valid values are: $($valid_options -join ", ")"
- }
- return $enum_value
-}
+using namespace System.Collections
+using namespace System.Diagnostics
+using namespace System.IO
+using namespace System.Management.Automation
+using namespace System.Management.Automation.Security
+using namespace System.Net
+using namespace System.Text
+
+[CmdletBinding()]
+param (
+ [Parameter()]
+ [AllowEmptyString()]
+ [string]
+ $BecomeUser,
+
+ [Parameter()]
+ [SecureString]
+ $BecomePassword,
+
+ [Parameter()]
+ [string]
+ $LogonType = 'Interactive',
+
+ [Parameter()]
+ [string]
+ $LogonFlags = 'WithProfile'
+)
-Function Get-BecomeFlag($flags) {
- $logon_type = [Ansible.AccessToken.LogonType]::Interactive
- $logon_flags = [Ansible.Become.LogonFlags]::WithProfile
-
- if ($null -eq $flags -or $flags -eq "") {
- $flag_split = @()
- }
- elseif ($flags -is [string]) {
- $flag_split = $flags.Split(" ")
- }
- else {
- throw "become_flags must be a string, was $($flags.GetType())"
- }
-
- foreach ($flag in $flag_split) {
- $split = $flag.Split("=")
- if ($split.Count -ne 2) {
- throw "become_flags entry '$flag' is in an invalid format, must be a key=value pair"
- }
- $flag_key = $split[0]
- $flag_value = $split[1]
- if ($flag_key -eq "logon_type") {
- $enum_details = @{
- enum = [Ansible.AccessToken.LogonType]
- flag_type = $flag_key
- value = $flag_value
- }
- $logon_type = Get-EnumValue @enum_details
- }
- elseif ($flag_key -eq "logon_flags") {
- $logon_flag_values = $flag_value.Split(",")
- $logon_flags = 0 -as [Ansible.Become.LogonFlags]
- foreach ($logon_flag_value in $logon_flag_values) {
- if ($logon_flag_value -eq "") {
- continue
- }
- $enum_details = @{
- enum = [Ansible.Become.LogonFlags]
- flag_type = $flag_key
- value = $logon_flag_value
- }
- $logon_flag = Get-EnumValue @enum_details
- $logon_flags = $logon_flags -bor $logon_flag
- }
- }
- else {
- throw "become_flags key '$flag_key' is not a valid runas flag, must be 'logon_type' or 'logon_flags'"
- }
- }
-
- return $logon_type, [Ansible.Become.LogonFlags]$logon_flags
-}
+Import-CSharpUtil -Name 'Ansible.AccessToken.cs', 'Ansible.Become.cs', 'Ansible.Process.cs'
-Write-AnsibleLog "INFO - loading C# become code" "become_wrapper"
-$add_type_b64 = $Payload.powershell_modules["Ansible.ModuleUtils.AddType"]
-$add_type = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($add_type_b64))
-New-Module -Name Ansible.ModuleUtils.AddType -ScriptBlock ([ScriptBlock]::Create($add_type)) | Import-Module > $null
-
-$new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"])
-$access_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.AccessToken"]))
-$become_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.Become"]))
-$process_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.Process"]))
-Add-CSharpType -References $access_def, $become_def, $process_def -TempPath $new_tmp -IncludeDebugInfo
-
-$username = $Payload.become_user
-$password = $Payload.become_password
-# We need to set password to the value of NullString so a null password is preserved when crossing the .NET
-# boundary. If we pass $null it will automatically be converted to "" and we need to keep the distinction for
-# accounts that don't have a password and when someone wants to become without knowing the password.
-if ($null -eq $password) {
- $password = [NullString]::Value
+# We need to set password to the value of NullString so a null password is
+# preserved when crossing the .NET boundary. If we pass $null it will
+# automatically be converted to "" and we need to keep the distinction for
+# accounts that don't have a password and when someone wants to become without
+# knowing the password.
+$password = [NullString]::Value
+if ($null -ne $BecomePassword) {
+ $password = [NetworkCredential]::new("", $BecomePassword).Password
}
-try {
- $logon_type, $logon_flags = Get-BecomeFlag -flags $Payload.become_flags
+$executable = if ($PSVersionTable.PSVersion -lt '6.0') {
+ 'powershell.exe'
}
-catch {
- Write-AnsibleError -Message "internal error: failed to parse become_flags '$($Payload.become_flags)'" -ErrorRecord $_
- $host.SetShouldExit(1)
- return
+else {
+ 'pwsh.exe'
}
-Write-AnsibleLog "INFO - parsed become input, user: '$username', type: '$logon_type', flags: '$logon_flags'" "become_wrapper"
+$executablePath = Join-Path -Path $PSHome -ChildPath $executable
+
+$actionInfo = Get-AnsibleExecWrapper -EncodeInputOutput
+$bootstrapManifest = ConvertTo-Json -InputObject @{
+ n = "exec_wrapper-become-$([Guid]::NewGuid()).ps1"
+ s = $actionInfo.ScriptInfo.Script
+ p = $actionInfo.Parameters
+} -Depth 99 -Compress
# NB: CreateProcessWithTokenW commandline maxes out at 1024 chars, must
-# bootstrap via small wrapper which contains the exec_wrapper passed through the
-# stdin pipe. Cannot use 'powershell -' as the $ErrorActionPreference is always
-# set to Stop and cannot be changed. Also need to split the payload from the wrapper to prevent potentially
-# sensitive content from being logged by the scriptblock logger.
-$bootstrap_wrapper = {
- [Console]::InputEncoding = [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding
- $ew = [System.Console]::In.ReadToEnd()
- $split_parts = $ew.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
- Set-Variable -Name json_raw -Value $split_parts[1]
- &([ScriptBlock]::Create($split_parts[0]))
+# bootstrap via small wrapper to invoke the exec_wrapper. Strings are used to
+# avoid sanity tests like aliases and spaces.
+[string]$command = @'
+$m=foreach($i in $input){
+ if([string]::Equals($i,"`0`0`0`0")){break}
+ $i
+}
+$m=$m|ConvertFrom-Json
+$p=@{}
+foreach($o in $m.p.PSObject.Properties){$p[$o.Name]=$o.Value}
+'@
+
+if ([SystemPolicy]::GetSystemLockdownPolicy() -eq 'Enforce') {
+ # If we started in CLM we need to execute the script from a file so that
+ # PowerShell validates our exec_wrapper is trusted and will run in FLM.
+ $command += @'
+$n=Join-Path $env:TEMP $m.n
+$null=New-Item $n -Value $m.s -Type File -Force
+try{$input|&$n @p}
+finally{if(Test-Path -LiteralPath $n){Remove-Item -LiteralPath $n -Force}}
+'@
+}
+else {
+ # If we started in FLM we pass the script through stdin and execute in
+ # memory.
+ $command += @'
+$c=[System.Management.Automation.Language.Parser]::ParseInput($m.s,$m.n,[ref]$null,[ref]$null).GetScriptBlock()
+$input|&$c @p
+'@
}
-$exec_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper.ToString()))
-$lp_command_line = "powershell.exe -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $exec_command"
-$lp_current_directory = $env:SystemRoot # TODO: should this be set to the become user's profile dir?
-
-# pop the become_wrapper action so we don't get stuck in a loop
-$Payload.actions = $Payload.actions[1..99]
-# we want the output from the exec_wrapper to be base64 encoded to preserve unicode chars
-$Payload.encoded_output = $true
-
-$payload_json = ConvertTo-Json -InputObject $Payload -Depth 99 -Compress
-# delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
-$exec_wrapper = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.exec_wrapper))
-$exec_wrapper += "`0`0`0`0" + $payload_json
+# Strip out any leading or trailing whitespace and remove empty lines.
+$command = @(
+ ($command -split "\r?\n") |
+ ForEach-Object { $_.Trim() } |
+ Where-Object { -not [string]::IsNullOrWhiteSpace($_) }
+) -join "`n"
+
+$encCommand = [Convert]::ToBase64String([Encoding]::Unicode.GetBytes($command))
+# Shortened version of '-NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $encCommand'
+$commandLine = "$executable -noni -nop -ex Bypass -e $encCommand"
+$result = [Ansible.Become.BecomeUtil]::CreateProcessAsUser(
+ $BecomeUser,
+ $password,
+ $LogonFlags,
+ $LogonType,
+ $executablePath,
+ $commandLine,
+ $env:SystemRoot,
+ $null,
+ "$bootstrapManifest`n`0`0`0`0`n$($actionInfo.InputData)")
+
+$stdout = $result.StandardOut
try {
- Write-AnsibleLog "INFO - starting become process '$lp_command_line'" "become_wrapper"
- $result = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($username, $password, $logon_flags, $logon_type,
- $null, $lp_command_line, $lp_current_directory, $null, $exec_wrapper)
- Write-AnsibleLog "INFO - become process complete with rc: $($result.ExitCode)" "become_wrapper"
- $stdout = $result.StandardOut
- try {
- $stdout = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($stdout))
- }
- catch [FormatException] {
- # output wasn't Base64, ignore as it may contain an error message we want to pass to Ansible
- Write-AnsibleLog "WARN - become process stdout was not base64 encoded as expected: $stdout"
- }
-
- $host.UI.WriteLine($stdout)
- $host.UI.WriteErrorLine($result.StandardError.Trim())
- $host.SetShouldExit($result.ExitCode)
+ $stdout = [Encoding]::UTF8.GetString([Convert]::FromBase64String($stdout))
}
-catch {
- Write-AnsibleError -Message "internal error: failed to become user '$username'" -ErrorRecord $_
- $host.SetShouldExit(1)
+catch [FormatException] {
+ # output wasn't Base64, ignore as it may contain an error message we want to pass to Ansible
+ $null = $_
}
-Write-AnsibleLog "INFO - ending become_wrapper" "become_wrapper"
+$Host.UI.WriteLine($stdout)
+Write-PowerShellClixmlStderr -Output $result.StandardError
+$Host.SetShouldExit($result.ExitCode)
diff --git a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
index 8e7141eb515..fec00b2772e 100644
--- a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
+++ b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
@@ -1,12 +1,49 @@
-try { [Console]::InputEncoding = [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding } catch { $null = $_ }
-
-if ($PSVersionTable.PSVersion -lt [Version]"3.0") {
- '{"failed":true,"msg":"Ansible requires PowerShell v3.0 or newer"}'
+if ($PSVersionTable.PSVersion -lt [Version]"5.1") {
+ '{"failed":true,"msg":"Ansible requires PowerShell v5.1"}'
exit 1
}
-$exec_wrapper_str = $input | Out-String
-$split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
-If (-not $split_parts.Length -eq 2) { throw "invalid payload" }
-Set-Variable -Name json_raw -Value $split_parts[1]
-& ([ScriptBlock]::Create($split_parts[0]))
+# First input is a JSON string with name/script/params of what to run. This
+# ends with a line of 4 null bytes and subsequent input is piped to the code
+# provided.
+$codeJson = foreach ($in in $input) {
+ if ([string]::Equals($in, "`0`0`0`0")) {
+ break
+ }
+ $in
+}
+$code = ConvertFrom-Json -InputObject $codeJson
+$splat = @{}
+foreach ($obj in $code.params.PSObject.Properties) {
+ $splat[$obj.Name] = $obj.Value
+}
+
+$filePath = $null
+try {
+ $cmd = if ($ExecutionContext.SessionState.LanguageMode -eq 'FullLanguage') {
+ # In FLM we can just invoke the code as a scriptblock without touching the
+ # disk.
+ [System.Management.Automation.Language.Parser]::ParseInput(
+ $code.script,
+ "$($code.name).ps1", # Name is used in stack traces.
+ [ref]$null,
+ [ref]$null).GetScriptBlock()
+ }
+ else {
+ # CLM needs to execute code from a file for it to run in FLM when trusted.
+ # Set-Item on 5.1 doesn't have a way to use UTF-8 without a BOM but luckily
+ # New-Item does that by default for both 5.1 and 7. We need to ensure we
+ # use UTF-8 without BOM so the signature is correct.
+ $filePath = Join-Path -Path $env:TEMP -ChildPath "$($code.name)-$(New-Guid).ps1"
+ $null = New-Item -Path $filePath -Value $code.script -ItemType File -Force
+
+ $filePath
+ }
+
+ $input | & $cmd @splat
+}
+finally {
+ if ($filePath -and (Test-Path -LiteralPath $filePath)) {
+ Remove-Item -LiteralPath $filePath -Force
+ }
+}
diff --git a/lib/ansible/executor/powershell/coverage_wrapper.ps1 b/lib/ansible/executor/powershell/coverage_wrapper.ps1
index 26cbe6603eb..d2421d7ae69 100644
--- a/lib/ansible/executor/powershell/coverage_wrapper.ps1
+++ b/lib/ansible/executor/powershell/coverage_wrapper.ps1
@@ -1,56 +1,83 @@
-# (c) 2019 Ansible Project
+# (c) 2025 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-param(
- [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
-)
+using namespace System.Collections.Generic
+using namespace System.IO
+using namespace System.Management.Automation
+using namespace System.Management.Automation.Language
+using namespace System.Reflection
+using namespace System.Text
-#AnsibleRequires -Wrapper module_wrapper
+param(
+ [Parameter(Mandatory)]
+ [string]
+ $ModuleName,
-$ErrorActionPreference = "Stop"
+ [Parameter(Mandatory)]
+ [string]
+ $OutputPath,
-Write-AnsibleLog "INFO - starting coverage_wrapper" "coverage_wrapper"
+ [Parameter(Mandatory)]
+ [string]
+ $PathFilter
+)
-# Required to be set for psrp to we can set a breakpoint in the remote runspace
-if ($PSVersionTable.PSVersion -ge [Version]'4.0') {
- $host.Runspace.Debugger.SetDebugMode([System.Management.Automation.DebugModes]::RemoteScript)
-}
+# Required to be set for psrp so we can set a breakpoint in the remote runspace
+$Host.Runspace.Debugger.SetDebugMode([DebugModes]::RemoteScript)
-Function New-CoverageBreakpoint {
+Function New-CoverageBreakpointsForScriptBlock {
Param (
- [String]$Path,
- [ScriptBlock]$Code,
- [String]$AnsiblePath
+ [Parameter(Mandatory)]
+ [string]
+ $ScriptName,
+
+ [Parameter(Mandatory)]
+ [ScriptBlockAst]
+ $ScriptBlockAst,
+
+ [Parameter(Mandatory)]
+ [String]
+ $AnsiblePath
)
- # It is quicker to pass in the code as a string instead of calling ParseFile as we already know the contents
$predicate = {
- $args[0] -is [System.Management.Automation.Language.CommandBaseAst]
+ $args[0] -is [CommandBaseAst]
}
- $script_cmds = $Code.Ast.FindAll($predicate, $true)
+ $scriptCmds = $ScriptBlockAst.FindAll($predicate, $true)
# Create an object that tracks the Ansible path of the file and the breakpoints that have been set in it
$info = [PSCustomObject]@{
Path = $AnsiblePath
- Breakpoints = [System.Collections.Generic.List`1[System.Management.Automation.Breakpoint]]@()
+ Breakpoints = [List[Breakpoint]]@()
+ }
+
+ # LineBreakpoint was only made public in PowerShell 6.0 so we need to use
+ # reflection to achieve the same thing in 5.1.
+ $lineCtor = if ($PSVersionTable.PSVersion -lt '6.0') {
+ [LineBreakpoint].GetConstructor(
+ [BindingFlags]'NonPublic, Instance',
+ $null,
+ [type[]]@([string], [int], [int], [scriptblock]),
+ $null)
+ }
+ else {
+ [LineBreakpoint]::new
}
# Keep track of lines that are already scanned. PowerShell can contains multiple commands in 1 line
- $scanned_lines = [System.Collections.Generic.HashSet`1[System.Int32]]@()
- foreach ($cmd in $script_cmds) {
- if (-not $scanned_lines.Add($cmd.Extent.StartLineNumber)) {
+ $scannedLines = [HashSet[int]]@()
+ foreach ($cmd in $scriptCmds) {
+ if (-not $scannedLines.Add($cmd.Extent.StartLineNumber)) {
continue
}
- # Do not add any -Action value, even if it is $null or {}. Doing so will balloon the runtime.
- $params = @{
- Script = $Path
- Line = $cmd.Extent.StartLineNumber
- Column = $cmd.Extent.StartColumnNumber
- }
- $info.Breakpoints.Add((Set-PSBreakpoint @params))
+ # Action is explicitly $null as it will slow down the runtime quite dramatically.
+ $b = $lineCtor.Invoke(@($ScriptName, $cmd.Extent.StartLineNumber, $cmd.Extent.StartColumnNumber, $null))
+ $info.Breakpoints.Add($b)
}
+ [Runspace]::DefaultRunspace.Debugger.SetBreakpoints($info.Breakpoints)
+
$info
}
@@ -68,132 +95,61 @@ Function Compare-PathFilterPattern {
return $false
}
-$module_name = $Payload.module_args["_ansible_module_name"]
-Write-AnsibleLog "INFO - building coverage payload for '$module_name'" "coverage_wrapper"
+$actionInfo = Get-NextAnsibleAction
+$actionParams = $actionInfo.Parameters
-# A PS Breakpoint needs an actual path to work properly, we create a temp directory that will store the module and
-# module_util code during execution
-$temp_path = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath "ansible-coverage-$([System.IO.Path]::GetRandomFileName())"
-Write-AnsibleLog "INFO - Creating temp path for coverage files '$temp_path'" "coverage_wrapper"
-New-Item -Path $temp_path -ItemType Directory > $null
-$breakpoint_info = [System.Collections.Generic.List`1[PSObject]]@()
-
-# Ensures we create files with UTF-8 encoding and a BOM. This is critical to force the powershell engine to read files
-# as UTF-8 and not as the system's codepage.
-$file_encoding = 'UTF8'
+# A PS Breakpoint needs a path to be associated with the ScriptBlock, luckily
+# the Get-AnsibleScript does this for us.
+$breakpointInfo = @()
try {
- $scripts = [System.Collections.Generic.List`1[System.Object]]@($script:common_functions)
-
- $coverage_path_filter = $Payload.coverage.path_filter.Split(":", [StringSplitOptions]::RemoveEmptyEntries)
-
- # We need to track what utils have already been added to the script for loading. This is because the load
- # order is important and can have module_utils that rely on other utils.
- $loaded_utils = [System.Collections.Generic.HashSet`1[System.String]]@()
- $parse_util = {
- $util_name = $args[0]
- if (-not $loaded_utils.Add($util_name)) {
- return
- }
-
- $util_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.powershell_modules.$util_name))
- $util_sb = [ScriptBlock]::Create($util_code)
- $util_path = Join-Path -Path $temp_path -ChildPath "$($util_name).psm1"
-
- Write-AnsibleLog "INFO - Outputting module_util $util_name to temp file '$util_path'" "coverage_wrapper"
- Set-Content -LiteralPath $util_path -Value $util_code -Encoding $file_encoding
-
- $ansible_path = $Payload.coverage.module_util_paths.$util_name
- if ((Compare-PathFilterPattern -Patterns $coverage_path_filter -Path $ansible_path)) {
- $cov_params = @{
- Path = $util_path
- Code = $util_sb
- AnsiblePath = $ansible_path
+ $coveragePathFilter = $PathFilter.Split(":", [StringSplitOptions]::RemoveEmptyEntries)
+ $breakpointInfo = @(
+ foreach ($scriptName in @($ModuleName; $actionParams.PowerShellModules)) {
+ # We don't use -IncludeScriptBlock as the script might be untrusted
+ # and will run under CLM. While we recreate the ScriptBlock here it
+ # is only to get the AST that contains the statements and their
+ # line numbers to create the breakpoint info for.
+ $scriptInfo = Get-AnsibleScript -Name $scriptName
+
+ if (Compare-PathFilterPattern -Patterns $coveragePathFilter -Path $scriptInfo.Path) {
+ $covParams = @{
+ ScriptName = $scriptInfo.Name
+ ScriptBlockAst = [ScriptBlock]::Create($scriptInfo.Script).Ast
+ AnsiblePath = $scriptInfo.Path
+ }
+ New-CoverageBreakpointsForScriptBlock @covParams
}
- $breakpoints = New-CoverageBreakpoint @cov_params
- $breakpoint_info.Add($breakpoints)
}
+ )
- if ($null -ne $util_sb.Ast.ScriptRequirements) {
- foreach ($required_util in $util_sb.Ast.ScriptRequirements.RequiredModules) {
- &$parse_util $required_util.Name
- }
- }
- Write-AnsibleLog "INFO - Adding util $util_name to scripts to run" "coverage_wrapper"
- $scripts.Add("Import-Module -Name '$util_path'")
- }
- foreach ($util in $Payload.powershell_modules.Keys) {
- &$parse_util $util
- }
-
- $module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
- $module_path = Join-Path -Path $temp_path -ChildPath "$($module_name).ps1"
- Write-AnsibleLog "INFO - Ouputting module $module_name to temp file '$module_path'" "coverage_wrapper"
- Set-Content -LiteralPath $module_path -Value $module -Encoding $file_encoding
- $scripts.Add($module_path)
-
- $ansible_path = $Payload.coverage.module_path
- if ((Compare-PathFilterPattern -Patterns $coverage_path_filter -Path $ansible_path)) {
- $cov_params = @{
- Path = $module_path
- Code = [ScriptBlock]::Create($module)
- AnsiblePath = $Payload.coverage.module_path
- }
- $breakpoints = New-CoverageBreakpoint @cov_params
- $breakpoint_info.Add($breakpoints)
- }
-
- $variables = [System.Collections.ArrayList]@(@{ Name = "complex_args"; Value = $Payload.module_args; Scope = "Global" })
- $entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.module_wrapper))
- $entrypoint = [ScriptBlock]::Create($entrypoint)
-
- $params = @{
- Scripts = $scripts
- Variables = $variables
- Environment = $Payload.environment
- ModuleName = $module_name
- }
- if ($breakpoint_info) {
- $params.Breakpoints = $breakpoint_info.Breakpoints
+ if ($breakpointInfo) {
+ $actionParams.Breakpoints = $breakpointInfo.Breakpoints
}
try {
- &$entrypoint @params
+ & $actionInfo.ScriptBlock @actionParams
}
finally {
# Processing here is kept to an absolute minimum to make sure each task runtime is kept as small as
# possible. Once all the tests have been run ansible-test will collect this info and process it locally in
# one go.
- Write-AnsibleLog "INFO - Creating coverage result output" "coverage_wrapper"
- $coverage_info = @{}
- foreach ($info in $breakpoint_info) {
- $coverage_info.($info.Path) = $info.Breakpoints | Select-Object -Property Line, HitCount
+ $coverageInfo = @{}
+ foreach ($info in $breakpointInfo) {
+ $coverageInfo[$info.Path] = $info.Breakpoints | Select-Object -Property Line, HitCount
}
- # The coverage.output value is a filename set by the Ansible controller. We append some more remote side
- # info to the filename to make it unique and identify the remote host a bit more.
- $ps_version = "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)"
- $coverage_output_path = "$($Payload.coverage.output)=powershell-$ps_version=coverage.$($env:COMPUTERNAME).$PID.$(Get-Random)"
- $code_cov_json = ConvertTo-Json -InputObject $coverage_info -Compress
+ $psVersion = "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)"
+ $coverageOutputPath = "$OutputPath=powershell-$psVersion=coverage.$($env:COMPUTERNAME).$PID.$(Get-Random)"
+ $codeCovJson = ConvertTo-Json -InputObject $coverageInfo -Compress
- Write-AnsibleLog "INFO - Outputting coverage json to '$coverage_output_path'" "coverage_wrapper"
# Ansible controller expects these files to be UTF-8 without a BOM, use .NET for this.
- $utf8_no_bom = New-Object -TypeName System.Text.UTF8Encoding -ArgumentList $false
- [System.IO.File]::WriteAllbytes($coverage_output_path, $utf8_no_bom.GetBytes($code_cov_json))
+ $utf8 = [UTF8Encoding]::new($false)
+ [File]::WriteAllText($coverageOutputPath, $codeCovJson, $utf8)
}
}
finally {
- try {
- if ($breakpoint_info) {
- foreach ($b in $breakpoint_info.Breakpoints) {
- Remove-PSBreakpoint -Breakpoint $b
- }
- }
- }
- finally {
- Write-AnsibleLog "INFO - Remove temp coverage folder '$temp_path'" "coverage_wrapper"
- Remove-Item -LiteralPath $temp_path -Force -Recurse
+ foreach ($b in $breakpointInfo.Breakpoints) {
+ Remove-PSBreakpoint -Breakpoint $b
}
}
-
-Write-AnsibleLog "INFO - ending coverage_wrapper" "coverage_wrapper"
diff --git a/lib/ansible/executor/powershell/exec_wrapper.ps1 b/lib/ansible/executor/powershell/exec_wrapper.ps1
index 4ecc1367c84..17eb2181b4b 100644
--- a/lib/ansible/executor/powershell/exec_wrapper.ps1
+++ b/lib/ansible/executor/powershell/exec_wrapper.ps1
@@ -1,238 +1,717 @@
-# (c) 2018 Ansible Project
+# (c) 2025 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+using namespace System.Collections
+using namespace System.Collections.Generic
+using namespace System.Diagnostics.CodeAnalysis
+using namespace System.IO
+using namespace System.Linq
+using namespace System.Management.Automation
+using namespace System.Management.Automation.Language
+using namespace System.Management.Automation.Security
+using namespace System.Reflection
+using namespace System.Security.Cryptography
+using namespace System.Text
+
+[SuppressMessageAttribute(
+ "PSUseCmdletCorrectly",
+ "",
+ Justification = "ConvertFrom-Json is being used in a steppable pipeline and works this way."
+)]
+[CmdletBinding()]
+param (
+ [Parameter(ValueFromPipeline)]
+ [string]
+ $InputObject,
+
+ [Parameter()]
+ [IDictionary]
+ $Manifest,
+
+ [Parameter()]
+ [switch]
+ $EncodeInputOutput,
+
+ [Parameter()]
+ [Version]
+ $MinOSVersion,
+
+ [Parameter()]
+ [Version]
+ $MinPSVersion,
+
+ [Parameter()]
+ [string]
+ $TempPath,
+
+ [Parameter()]
+ [PSObject]
+ $ActionParameters
+)
+
begin {
- $DebugPreference = "Continue"
- $ProgressPreference = "SilentlyContinue"
+ $DebugPreference = "SilentlyContinue"
$ErrorActionPreference = "Stop"
- Set-StrictMode -Version 2
-
- # common functions that are loaded in exec and module context, this is set
- # as a script scoped variable so async_watchdog and module_wrapper can
- # access the functions when creating their Runspaces
- $script:common_functions = {
- Function ConvertFrom-AnsibleJson {
- <#
- .SYNOPSIS
- Converts a JSON string to a Hashtable/Array in the fastest way
- possible. Unfortunately ConvertFrom-Json is still faster but outputs
- a PSCustomObject which is cumbersome for module consumption.
-
- .PARAMETER InputObject
- [String] The JSON string to deserialize.
- #>
- param(
- [Parameter(Mandatory = $true, Position = 0)][String]$InputObject
- )
-
- # we can use -AsHashtable to get PowerShell to convert the JSON to
- # a Hashtable and not a PSCustomObject. This was added in PowerShell
- # 6.0, fall back to a manual conversion for older versions
- $cmdlet = Get-Command -Name ConvertFrom-Json -CommandType Cmdlet
- if ("AsHashtable" -in $cmdlet.Parameters.Keys) {
- return , (ConvertFrom-Json -InputObject $InputObject -AsHashtable)
+ $ProgressPreference = "SilentlyContinue"
+
+ if ($PSCommandPath -and (Test-Path -LiteralPath $PSCommandPath)) {
+ Remove-Item -LiteralPath $PSCommandPath -Force
+ }
+
+ # Try and set the console encoding to UTF-8 allowing Ansible to read the
+ # output of the wrapper as UTF-8 bytes.
+ try {
+ [Console]::InputEncoding = [Console]::OutputEncoding = [UTF8Encoding]::new()
+ }
+ catch {
+ # PSRP will not have a console host so this will fail. The line here is
+ # to ignore sanity checks.
+ $null = $_
+ }
+
+ if ($MinOSVersion) {
+ [version]$actualOSVersion = (Get-Item -LiteralPath $env:SystemRoot\System32\kernel32.dll).VersionInfo.ProductVersion
+
+ if ($actualOSVersion -lt $MinOSVersion) {
+ @{
+ failed = $true
+ msg = "This module cannot run on this OS as it requires a minimum version of $MinOSVersion, actual was $actualOSVersion"
+ } | ConvertTo-Json -Compress
+ $Host.SetShouldExit(1)
+ return
+ }
+ }
+
+ if ($MinPSVersion) {
+ if ($PSVersionTable.PSVersion -lt $MinPSVersion) {
+ @{
+ failed = $true
+ msg = "This module cannot run as it requires a minimum PowerShell version of $MinPSVersion, actual was ""$($PSVersionTable.PSVersion)"""
+ } | ConvertTo-Json -Compress
+ $Host.SetShouldExit(1)
+ return
+ }
+ }
+
+ # $Script:AnsibleManifest = @{} # Defined in process/end.
+ $Script:AnsibleShouldConstrain = [SystemPolicy]::GetSystemLockdownPolicy() -eq 'Enforce'
+ $Script:AnsibleTrustedHashList = [HashSet[string]]::new([StringComparer]::OrdinalIgnoreCase)
+ $Script:AnsibleUnsupportedHashList = [HashSet[string]]::new([StringComparer]::OrdinalIgnoreCase)
+ $Script:AnsibleWrapperWarnings = [List[string]]::new()
+ $Script:AnsibleTempPath = @(
+ # Wrapper defined tmpdir
+ [Environment]::ExpandEnvironmentVariables($TempPath)
+ # Fallback to user's tmpdir
+ [Path]::GetTempPath()
+ # Should not happen but just in case use the current dir.
+ $pwd.Path
+ ) | Where-Object {
+ if (-not $_) {
+ return $false
+ }
+
+ try {
+ Test-Path -LiteralPath $_ -ErrorAction Ignore
+ }
+ catch {
+ # Access denied could cause Test-Path to throw an exception.
+ $false
+ }
+ } | Select-Object -First 1
+ $Script:AnsibleTempScripts = [List[string]]::new()
+ $Script:AnsibleClrFacadeSet = $false
+
+ Function Convert-JsonObject {
+ param(
+ [Parameter(Mandatory, ValueFromPipeline)]
+ [AllowNull()]
+ [object]
+ $InputObject
+ )
+
+ process {
+ # Using the full type name is important as PSCustomObject is an
+ # alias for PSObject which all piped objects are.
+ if ($InputObject -is [System.Management.Automation.PSCustomObject]) {
+ $value = @{}
+ foreach ($prop in $InputObject.PSObject.Properties) {
+ $value[$prop.Name] = Convert-JsonObject -InputObject $prop.Value
+ }
+ $value
+ }
+ elseif ($InputObject -is [Array]) {
+ , @($InputObject | Convert-JsonObject)
}
else {
- # get the PSCustomObject and then manually convert from there
- $raw_obj = ConvertFrom-Json -InputObject $InputObject
+ $InputObject
+ }
+ }
+ }
- Function ConvertTo-Hashtable {
- param($InputObject)
+ Function Get-AnsibleScript {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory)]
+ [string]
+ $Name,
- if ($null -eq $InputObject) {
- return $null
- }
+ [Parameter()]
+ [switch]
+ $IncludeScriptBlock,
- if ($InputObject -is [PSCustomObject]) {
- $new_value = @{}
- foreach ($prop in $InputObject.PSObject.Properties.GetEnumerator()) {
- $new_value.($prop.Name) = (ConvertTo-Hashtable -InputObject $prop.Value)
- }
- return , $new_value
- }
- elseif ($InputObject -is [Array]) {
- $new_value = [System.Collections.ArrayList]@()
- foreach ($val in $InputObject) {
- $new_value.Add((ConvertTo-Hashtable -InputObject $val)) > $null
- }
- return , $new_value.ToArray()
- }
- else {
- return , $InputObject
- }
+ [Parameter()]
+ [switch]
+ $SkipHashCheck
+ )
+
+ if (-not $Script:AnsibleManifest.scripts.Contains($Name)) {
+ $err = [ErrorRecord]::new(
+ [Exception]::new("Could not find the script '$Name'."),
+ "ScriptNotFound",
+ [ErrorCategory]::ObjectNotFound,
+ $Name)
+ $PSCmdlet.ThrowTerminatingError($err)
+ }
+
+ $scriptInfo = $Script:AnsibleManifest.scripts[$Name]
+ $scriptBytes = [Convert]::FromBase64String($scriptInfo.script)
+ $scriptContents = [Encoding]::UTF8.GetString($scriptBytes)
+
+ $sbk = $null
+ if ($IncludeScriptBlock) {
+ $sbk = [Parser]::ParseInput(
+ $scriptContents,
+ $Name,
+ [ref]$null,
+ [ref]$null).GetScriptBlock()
+ }
+
+ $outputValue = [PSCustomObject]@{
+ Name = $Name
+ Script = $scriptContents
+ Path = $scriptInfo.path
+ ScriptBlock = $sbk
+ ShouldConstrain = $false
+ }
+
+ if (-not $Script:AnsibleShouldConstrain) {
+ $outputValue
+ return
+ }
+
+ if (-not $SkipHashCheck) {
+ $sha256 = [SHA256]::Create()
+ $scriptHash = [BitConverter]::ToString($sha256.ComputeHash($scriptBytes)).Replace("-", "")
+ $sha256.Dispose()
+
+ if ($Script:AnsibleUnsupportedHashList.Contains($scriptHash)) {
+ $err = [ErrorRecord]::new(
+ [Exception]::new("Provided script for '$Name' is marked as unsupported in CLM mode."),
+ "ScriptUnsupported",
+ [ErrorCategory]::SecurityError,
+ $Name)
+ $PSCmdlet.ThrowTerminatingError($err)
+ }
+ elseif ($Script:AnsibleTrustedHashList.Contains($scriptHash)) {
+ $outputValue
+ return
+ }
+ }
+
+ # If we have reached here we are running in a locked down environment
+ # and the script is not trusted in the signed hashlists. Check if it
+ # contains the authenticode signature and verify that using PowerShell.
+ # [SystemPolicy]::GetFilePolicyEnforcement(...) is a new API but only
+ # present in Server 2025+ so we need to rely on the known behaviour of
+ # Get-Command to fail with CommandNotFoundException if the script is
+ # not allowed to run.
+ $outputValue.ShouldConstrain = $true
+ if ($scriptContents -like "*`r`n# SIG # Begin signature block`r`n*") {
+ Set-WinPSDefaultFileEncoding
+
+ # If the script is manually signed we need to ensure the signature
+ # is valid and trusted by the OS policy.
+ # We must use '.ps1' so the ExternalScript WDAC check will apply.
+ $tmpFile = [Path]::Combine($Script:AnsibleTempPath, "ansible-tmp-$([Guid]::NewGuid()).ps1")
+ try {
+ [File]::WriteAllBytes($tmpFile, $scriptBytes)
+ $cmd = Get-Command -Name $tmpFile -CommandType ExternalScript -ErrorAction Stop
+
+ # Get-Command caches the file contents after loading which we
+ # use to verify it was not modified before the signature check.
+ $expectedScript = $cmd.OriginalEncoding.GetString($scriptBytes)
+ if ($expectedScript -ne $cmd.ScriptContents) {
+ $err = [ErrorRecord]::new(
+ [Exception]::new("Script has been modified during signature check."),
+ "ScriptModifiedTrusted",
+ [ErrorCategory]::SecurityError,
+ $Name)
+ $PSCmdlet.ThrowTerminatingError($err)
}
- return , (ConvertTo-Hashtable -InputObject $raw_obj)
- }
- }
-
- Function Format-AnsibleException {
- <#
- .SYNOPSIS
- Formats a PowerShell ErrorRecord to a string that's fit for human
- consumption.
-
- .NOTES
- Using Out-String can give us the first part of the exception but it
- also wraps the messages at 80 chars which is not ideal. We also
- append the ScriptStackTrace and the .NET StackTrace if present.
- #>
- param([System.Management.Automation.ErrorRecord]$ErrorRecord)
-
- $exception = @"
-$($ErrorRecord.ToString())
-$($ErrorRecord.InvocationInfo.PositionMessage)
- + CategoryInfo : $($ErrorRecord.CategoryInfo.ToString())
- + FullyQualifiedErrorId : $($ErrorRecord.FullyQualifiedErrorId.ToString())
-"@
- # module_common strip comments and empty newlines, need to manually
- # add a preceding newline using `r`n
- $exception += "`r`n`r`nScriptStackTrace:`r`n$($ErrorRecord.ScriptStackTrace)`r`n"
-
- # exceptions from C# will also have a StackTrace which we
- # append if found
- if ($null -ne $ErrorRecord.Exception.StackTrace) {
- $exception += "`r`n$($ErrorRecord.Exception.ToString())"
- }
-
- return $exception
- }
- }
- .$common_functions
-
- # common wrapper functions used in the exec wrappers, this is defined in a
- # script scoped variable so async_watchdog can pass them into the async job
- $script:wrapper_functions = {
- Function Write-AnsibleError {
- <#
- .SYNOPSIS
- Writes an error message to a JSON string in the format that Ansible
- understands. Also optionally adds an exception record if the
- ErrorRecord is passed through.
- #>
- param(
- [Parameter(Mandatory = $true)][String]$Message,
- [System.Management.Automation.ErrorRecord]$ErrorRecord = $null
- )
- $result = @{
- msg = $Message
- failed = $true
+
+ $outputValue.ShouldConstrain = $false
+ }
+ catch [CommandNotFoundException] {
+ $null = $null # No-op but satisfies the linter.
+ }
+ finally {
+ if (Test-Path -LiteralPath $tmpFile) {
+ Remove-Item -LiteralPath $tmpFile -Force
+ }
+ }
+ }
+
+ if ($outputValue.ShouldConstrain -and $IncludeScriptBlock) {
+ # If the script is untrusted and a scriptblock was requested we
+ # error out as the sbk would have run in FLM.
+ $err = [ErrorRecord]::new(
+ [Exception]::new("Provided script for '$Name' is not trusted to run."),
+ "ScriptNotTrusted",
+ [ErrorCategory]::SecurityError,
+ $Name)
+ $PSCmdlet.ThrowTerminatingError($err)
+ }
+ else {
+ $outputValue
+ }
+ }
+
+ Function Get-NextAnsibleAction {
+ [CmdletBinding()]
+ param ()
+
+ $action, $newActions = $Script:AnsibleManifest.actions
+ $Script:AnsibleManifest.actions = @($newActions | Select-Object)
+
+ $actionName = $action.name
+ $actionParams = $action.params
+ $actionScript = Get-AnsibleScript -Name $actionName -IncludeScriptBlock
+
+ foreach ($kvp in $action.secure_params.GetEnumerator()) {
+ if (-not $kvp.Value) {
+ continue
}
- if ($null -ne $ErrorRecord) {
- $result.msg += ": $($ErrorRecord.Exception.Message)"
- $result.exception = (Format-AnsibleException -ErrorRecord $ErrorRecord)
- }
- Write-Output -InputObject (ConvertTo-Json -InputObject $result -Depth 99 -Compress)
- }
-
- Function Write-AnsibleLog {
- <#
- .SYNOPSIS
- Used as a debugging tool to log events to a file as they run in the
- exec wrappers. By default this is a noop function but the $log_path
- can be manually set to enable it. Manually set ANSIBLE_EXEC_DEBUG as
- an env value on the Windows host that this is run on to enable.
- #>
- param(
- [Parameter(Mandatory = $true, Position = 0)][String]$Message,
- [Parameter(Position = 1)][String]$Wrapper
- )
-
- $log_path = $env:ANSIBLE_EXEC_DEBUG
- if ($log_path) {
- $log_path = [System.Environment]::ExpandEnvironmentVariables($log_path)
- $parent_path = [System.IO.Path]::GetDirectoryName($log_path)
- if (Test-Path -LiteralPath $parent_path -PathType Container) {
- $msg = "{0:u} - {1} - {2} - " -f (Get-Date), $pid, ([System.Security.Principal.WindowsIdentity]::GetCurrent().Name)
- if ($null -ne $Wrapper) {
- $msg += "$Wrapper - "
+
+ $name = $kvp.Key
+ $actionParams.$name = $kvp.Value | ConvertTo-SecureString -AsPlainText -Force
+ }
+
+ [PSCustomObject]@{
+ Name = $actionName
+ ScriptBlock = $actionScript.ScriptBlock
+ Parameters = $actionParams
+ }
+ }
+
+ Function Get-AnsibleExecWrapper {
+ [CmdletBinding()]
+ param (
+ [Parameter()]
+ [switch]
+ $ManifestAsParam,
+
+ [Parameter()]
+ [switch]
+ $EncodeInputOutput,
+
+ [Parameter()]
+ [switch]
+ $IncludeScriptBlock
+ )
+
+ $scriptInfo = Get-AnsibleScript -Name exec_wrapper.ps1 -IncludeScriptBlock:$IncludeScriptBlock
+ $params = @{
+ # TempPath may contain env vars that change based on the runtime
+ # environment. Ensure we use that and not the $script:AnsibleTempPath
+ # when starting the exec wrapper.
+ TempPath = $TempPath
+ EncodeInputOutput = $EncodeInputOutput.IsPresent
+ }
+
+ $inputData = $null
+ if ($ManifestAsParam) {
+ $params.Manifest = $Script:AnsibleManifest
+ }
+ else {
+ $inputData = ConvertTo-Json -InputObject $Script:AnsibleManifest -Depth 99 -Compress
+ if ($EncodeInputOutput) {
+ $inputData = [Convert]::ToBase64String([Encoding]::UTF8.GetBytes($inputData))
+ }
+ }
+
+ [PSCustomObject]@{
+ ScriptInfo = $scriptInfo
+ Parameters = $params
+ InputData = $inputData
+ }
+ }
+
+ Function Import-PowerShellUtil {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory)]
+ [string[]]
+ $Name
+ )
+
+ foreach ($moduleName in $Name) {
+ $moduleInfo = Get-AnsibleScript -Name $moduleName -IncludeScriptBlock
+ $moduleShortName = [Path]::GetFileNameWithoutExtension($moduleName)
+ $null = New-Module -Name $moduleShortName -ScriptBlock $moduleInfo.ScriptBlock |
+ Import-Module -Scope Global
+ }
+ }
+
+ Function Import-CSharpUtil {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory)]
+ [string[]]
+ $Name
+ )
+
+ Import-PowerShellUtil -Name Ansible.ModuleUtils.AddType.psm1
+
+ $isBasicUtil = $false
+ $csharpModules = foreach ($moduleName in $Name) {
+ $scriptInfo = Get-AnsibleScript -Name $moduleName
+
+ if ($scriptInfo.ShouldConstrain) {
+ throw "C# module util '$Name' is not trusted and cannot be loaded."
+ }
+ if ($moduleName -eq 'Ansible.Basic.cs') {
+ $isBasicUtil = $true
+ }
+
+ $scriptInfo.Script
+ }
+
+ $fakeModule = [PSCustomObject]@{
+ Tmpdir = $Script:AnsibleTempPath
+ }
+ $warningFunc = [PSScriptMethod]::new('Warn', {
+ param($message)
+ $Script:AnsibleWrapperWarnings.Add($message)
+ })
+ $fakeModule.PSObject.Members.Add($warningFunc)
+ Add-CSharpType -References $csharpModules -AnsibleModule $fakeModule
+
+ if ($isBasicUtil) {
+ # Ansible.Basic.cs is a special case where we need to provide it
+ # with the wrapper warnings list so it injects it into the result.
+ [Ansible.Basic.AnsibleModule]::_WrapperWarnings = $Script:AnsibleWrapperWarnings
+ }
+ }
+
+ Function Import-SignedHashList {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory, ValueFromPipeline)]
+ [string]
+ $Name
+ )
+
+ process {
+ try {
+ # We skip the hash check to ensure we verify based on the
+ # authenticode signature and not whether it's trusted by an
+ # existing signed hash list.
+ $scriptInfo = Get-AnsibleScript -Name $Name -SkipHashCheck
+ if ($scriptInfo.ShouldConstrain) {
+ throw "script is not signed or not trusted to run."
+ }
+
+ $hashListAst = [Parser]::ParseInput(
+ $scriptInfo.Script,
+ $Name,
+ [ref]$null,
+ [ref]$null)
+ $manifestAst = $hashListAst.Find({ $args[0] -is [HashtableAst] }, $false)
+ if ($null -eq $manifestAst) {
+ throw "expecting a single hashtable in the signed manifest."
+ }
+
+ $out = $manifestAst.SafeGetValue()
+ if (-not $out.Contains('Version')) {
+ throw "expecting hash list to contain 'Version' key."
+ }
+ if ($out.Version -ne 1) {
+ throw "unsupported hash list Version $($out.Version), expecting 1."
+ }
+
+ if (-not $out.Contains('HashList')) {
+ throw "expecting hash list to contain 'HashList' key."
+ }
+
+ $out.HashList | ForEach-Object {
+ if ($_ -isnot [hashtable] -or -not $_.ContainsKey('Hash') -or $_.Hash -isnot [string] -or $_.Hash.Length -ne 64) {
+ throw "expecting hash list to contain hashtable with Hash key with a value of a SHA256 strings."
}
- $msg += $Message + "`r`n"
- $msg_bytes = [System.Text.Encoding]::UTF8.GetBytes($msg)
- $fs = [System.IO.File]::Open($log_path, [System.IO.FileMode]::Append,
- [System.IO.FileAccess]::Write, [System.IO.FileShare]::ReadWrite)
- try {
- $fs.Write($msg_bytes, 0, $msg_bytes.Length)
+ if ($_.Mode -eq 'Trusted') {
+ $null = $Script:AnsibleTrustedHashList.Add($_.Hash)
+ }
+ elseif ($_.Mode -eq 'Unsupported') {
+ # Allows us to provide a better error when trying to run
+ # something in CLM that is marked as unsupported.
+ $null = $Script:AnsibleUnsupportedHashList.Add($_.Hash)
}
- finally {
- $fs.Close()
+ else {
+ throw "expecting hash list entry for $($_.Hash) to contain a mode of 'Trusted' or 'Unsupported' but got '$($_.Mode)'."
}
}
}
+ catch {
+ $_.ErrorDetails = [ErrorDetails]::new("Failed to process signed manifest '$Name': $_")
+ $PSCmdlet.WriteError($_)
+ }
}
}
- .$wrapper_functions
- # only init and stream in $json_raw if it wasn't set by the enclosing scope
- if (-not $(Get-Variable "json_raw" -ErrorAction SilentlyContinue)) {
- $json_raw = ''
+ Function New-TempAnsibleFile {
+ [OutputType([string])]
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory)]
+ [string]
+ $FileName,
+
+ [Parameter(Mandatory)]
+ [string]
+ $Content
+ )
+
+ $name = [Path]::GetFileNameWithoutExtension($FileName)
+ $ext = [Path]::GetExtension($FileName)
+ $newName = "$($name)-$([Guid]::NewGuid())$ext"
+
+ $path = Join-Path -Path $Script:AnsibleTempPath $newName
+ Set-WinPSDefaultFileEncoding
+ [File]::WriteAllText($path, $Content, [UTF8Encoding]::new($false))
+
+ $path
}
-} process {
- $json_raw += [String]$input
-} end {
- Write-AnsibleLog "INFO - starting exec_wrapper" "exec_wrapper"
- if (-not $json_raw) {
- Write-AnsibleError -Message "internal error: no input given to PowerShell exec wrapper"
- exit 1
+
+ Function Set-WinPSDefaultFileEncoding {
+ [CmdletBinding()]
+ param ()
+
+ # WinPS defaults to the locale encoding when loading a script from the
+ # file path but in Ansible we expect it to always be UTF-8 without a
+ # BOM. This lazily sets an internal field so pwsh reads it as UTF-8.
+ # If we don't do this then scripts saved as UTF-8 on the Ansible
+ # controller will not run as expected.
+ if ($PSVersionTable.PSVersion -lt '6.0' -and -not $Script:AnsibleClrFacadeSet) {
+ $clrFacade = [PSObject].Assembly.GetType('System.Management.Automation.ClrFacade')
+ $defaultEncodingField = $clrFacade.GetField('_defaultEncoding', [BindingFlags]'NonPublic, Static')
+ $defaultEncodingField.SetValue($null, [UTF8Encoding]::new($false))
+ $Script:AnsibleClrFacadeSet = $true
+ }
+ }
+
+ Function Write-AnsibleErrorJson {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory)]
+ [ErrorRecord]
+ $ErrorRecord,
+
+ [Parameter()]
+ [string]
+ $Message = "failure during exec_wrapper"
+ )
+
+ $exception = @(
+ "$ErrorRecord"
+ "$($ErrorRecord.InvocationInfo.PositionMessage)"
+ "+ CategoryInfo : $($ErrorRecord.CategoryInfo)"
+ "+ FullyQualifiedErrorId : $($ErrorRecord.FullyQualifiedErrorId)"
+ ""
+ "ScriptStackTrace:"
+ "$($ErrorRecord.ScriptStackTrace)"
+
+ if ($ErrorRecord.Exception.StackTrace) {
+ "$($ErrorRecord.Exception.StackTrace)"
+ }
+ ) -join ([Environment]::NewLine)
+
+ @{
+ failed = $true
+ msg = "${Message}: $ErrorRecord"
+ exception = $exception
+ } | ConvertTo-Json -Compress
+ $host.SetShouldExit(1)
}
- Write-AnsibleLog "INFO - converting json raw to a payload" "exec_wrapper"
- $payload = ConvertFrom-AnsibleJson -InputObject $json_raw
- $payload.module_args._ansible_exec_wrapper_warnings = [System.Collections.Generic.List[string]]@()
+ Function Write-PowerShellClixmlStderr {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory)]
+ [AllowEmptyString()]
+ [string]
+ $Output
+ )
+
+ if (-not $Output) {
+ return
+ }
- # TODO: handle binary modules
- # TODO: handle persistence
+ # -EncodedCommand in WinPS will output CLIXML to stderr. This attempts to parse
+ # it into a human readable format otherwise it'll just output the raw CLIXML.
+ $wroteStderr = $false
+ if ($Output.StartsWith('#< CLIXML')) {
+ $clixml = $Output -split "\r?\n"
+ if ($clixml.Count -eq 2) {
+ try {
+ # PSSerialize.Deserialize doesn't tell us what streams each record
+ # is for so we get the S attribute manually.
+ $streams = @(([xml]$clixml[1]).Objs.GetEnumerator() | ForEach-Object { $_.S })
+ $objects = @([PSSerializer]::Deserialize($clixml[1]))
- if ($payload.min_os_version) {
- $min_os_version = [Version]$payload.min_os_version
- # Environment.OSVersion.Version is deprecated and may not return the
- # right version
- $actual_os_version = [Version](Get-Item -Path $env:SystemRoot\System32\kernel32.dll).VersionInfo.ProductVersion
+ for ($i = 0; $i -lt $objects.Count; $i++) {
+ $msg = $objects[$i]
+ if ($msg -isnot [string] -or $streams.Length -le $i) {
+ continue
+ }
- Write-AnsibleLog "INFO - checking if actual os version '$actual_os_version' is less than the min os version '$min_os_version'" "exec_wrapper"
- if ($actual_os_version -lt $min_os_version) {
- $msg = "internal error: This module cannot run on this OS as it requires a minimum version of $min_os_version, actual was $actual_os_version"
- Write-AnsibleError -Message $msg
- exit 1
+ # Doesn't use TrimEnd() so it only removes the last newline
+ if ($msg.EndsWith([Environment]::NewLine)) {
+ $msg = $msg.Substring(0, $msg.Length - [Environment]::NewLine.Length)
+ }
+ $stream = $streams[$i]
+ switch ($stream) {
+ 'error' { $host.UI.WriteErrorLine($msg) }
+ 'debug' { $host.UI.WriteDebugLine($msg) }
+ 'verbose' { $host.UI.WriteVerboseLine($msg) }
+ 'warning' { $host.UI.WriteWarningLine($msg) }
+ }
+ }
+ $wroteStderr = $true
+ }
+ catch {
+ $null = $_
+ }
+ }
+ }
+ if (-not $wroteStderr) {
+ $host.UI.WriteErrorLine($Output.TrimEnd())
}
}
- if ($payload.min_ps_version) {
- $min_ps_version = [Version]$payload.min_ps_version
- $actual_ps_version = $PSVersionTable.PSVersion
- Write-AnsibleLog "INFO - checking if actual PS version '$actual_ps_version' is less than the min PS version '$min_ps_version'" "exec_wrapper"
- if ($actual_ps_version -lt $min_ps_version) {
- $msg = "internal error: This module cannot run as it requires a minimum PowerShell version of $min_ps_version, actual was $actual_ps_version"
- Write-AnsibleError -Message $msg
- exit 1
+ # To handle optional input for the incoming manifest and optional input to
+ # the subsequent action we optionally run this step in the begin or end
+ # block.
+ $jsonPipeline = $null
+ $actionPipeline = $null
+ $setupManifest = {
+ [CmdletBinding()]
+ param (
+ [Parameter()]
+ [switch]
+ $ExpectingInput
+ )
+
+ if ($jsonPipeline) {
+ $Script:AnsibleManifest = $jsonPipeline.End()[0]
+ $jsonPipeline.Dispose()
+ $jsonPipeline = $null
+ }
+ else {
+ $Script:AnsibleManifest = $Manifest
+ }
+
+ if ($Script:AnsibleShouldConstrain) {
+ $Script:AnsibleManifest.signed_hashlist | Import-SignedHashList
+ }
+
+ $actionInfo = Get-NextAnsibleAction
+ $actionParams = $actionInfo.Parameters
+
+ if ($ActionParameters) {
+ foreach ($prop in $ActionParameters.PSObject.Properties) {
+ $actionParams[$prop.Name] = $prop.Value
+ }
+ }
+
+ $actionPipeline = { & $actionInfo.ScriptBlock @actionParams }.GetSteppablePipeline()
+ $actionPipeline.Begin($ExpectingInput)
+ if (-not $ExpectingInput) {
+ $null = $actionPipeline.Process()
}
}
- # pop 0th action as entrypoint
- $action = $payload.actions[0]
- Write-AnsibleLog "INFO - running action $action" "exec_wrapper"
+ try {
+ if ($Manifest) {
+ # If the manifest was provided through the parameter, we can start the
+ # action pipeline and all subsequent input (if any) will be sent to the
+ # action.
+ # It is important that $setupManifest is called by dot sourcing or
+ # else the pipelines started in it loose access to all parent scopes.
+ # https://github.com/PowerShell/PowerShell/issues/17868
+ . $setupManifest -ExpectingInput:$MyInvocation.ExpectingInput
+ }
+ else {
+ # Otherwise the first part of the input is the manifest json with the
+ # chance for extra data afterwards.
+ $jsonPipeline = { ConvertFrom-Json | Convert-JsonObject }.GetSteppablePipeline()
+ $jsonPipeline.Begin($true)
+ }
+ }
+ catch {
+ Write-AnsibleErrorJson -ErrorRecord $_
+ }
+}
- $entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.($action)))
- $entrypoint = [ScriptBlock]::Create($entrypoint)
- # so we preserve the formatting and don't fall prey to locale issues, some
- # wrappers want the output to be in base64 form, we store the value here in
- # case the wrapper changes the value when they create a payload for their
- # own exec_wrapper
- $encoded_output = $payload.encoded_output
+process {
+ try {
+ if ($actionPipeline) {
+ # We received our manifest and started the action pipeline, redirect
+ # all further input to that pipeline.
+ $null = $actionPipeline.Process($InputObject)
+ }
+ elseif ([string]::Equals($InputObject, "`0`0`0`0")) {
+ # Special marker used to indicate all subsequent input is for the
+ # action. Setup that pipeline and finalise the manifest.
+ . $setupManifest -ExpectingInput
+ }
+ elseif ($jsonPipeline) {
+ # Data is for the JSON manifest, decode if needed.
+ if ($EncodeInputOutput) {
+ $jsonPipeline.Process([Encoding]::UTF8.GetString([Convert]::FromBase64String($InputObject)))
+ }
+ else {
+ $jsonPipeline.Process($InputObject)
+ }
+ }
+ }
+ catch {
+ Write-AnsibleErrorJson -ErrorRecord $_
+ }
+}
+end {
try {
- $output = &$entrypoint -Payload $payload
- if ($encoded_output -and $null -ne $output) {
- $b64_output = [System.Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($output))
- Write-Output -InputObject $b64_output
+ if ($jsonPipeline) {
+ # Only manifest input was received, process it now and start the
+ # action pipeline with no input being provided.
+ . $setupManifest
+ }
+
+ $out = $actionPipeline.End()
+ if ($EncodeInputOutput) {
+ [Convert]::ToBase64String([Encoding]::UTF8.GetBytes($out))
}
else {
- $output
+ $out
}
}
catch {
- Write-AnsibleError -Message "internal error: failed to run exec_wrapper action $action" -ErrorRecord $_
- exit 1
+ Write-AnsibleErrorJson -ErrorRecord $_
+ }
+ finally {
+ $actionPipeline.Dispose()
+ if ($Script:AnsibleTempScripts) {
+ Remove-Item -LiteralPath $Script:AnsibleTempScripts -Force -ErrorAction Ignore
+ }
}
- Write-AnsibleLog "INFO - ending exec_wrapper" "exec_wrapper"
}
diff --git a/lib/ansible/executor/powershell/module_manifest.py b/lib/ansible/executor/powershell/module_manifest.py
index da69c9dacb5..94bb8fd9d22 100644
--- a/lib/ansible/executor/powershell/module_manifest.py
+++ b/lib/ansible/executor/powershell/module_manifest.py
@@ -4,38 +4,67 @@
from __future__ import annotations
import base64
+import dataclasses
import errno
import json
import os
import pkgutil
import secrets
import re
+import typing as t
+
from importlib import import_module
from ansible.module_utils.compat.version import LooseVersion
from ansible import constants as C
-from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.common.json import Direction, get_module_encoder
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.plugins.become import BecomeBase
+from ansible.plugins.become.runas import BecomeModule as RunasBecomeModule
from ansible.plugins.loader import ps_module_utils_loader
-from ansible.utils.collection_loader import resource_from_fqcr
+
+
+@dataclasses.dataclass(frozen=True)
+class _ExecManifest:
+ scripts: dict[str, _ScriptInfo] = dataclasses.field(default_factory=dict)
+ actions: list[_ManifestAction] = dataclasses.field(default_factory=list)
+ signed_hashlist: list[str] = dataclasses.field(default_factory=list)
+
+
+@dataclasses.dataclass(frozen=True, kw_only=True)
+class _ScriptInfo:
+ content: dataclasses.InitVar[bytes]
+ path: str
+ script: str = dataclasses.field(init=False)
+
+ def __post_init__(self, content: bytes) -> None:
+ object.__setattr__(self, 'script', base64.b64encode(content).decode())
+
+
+@dataclasses.dataclass(frozen=True, kw_only=True)
+class _ManifestAction:
+ name: str
+ params: dict[str, object] = dataclasses.field(default_factory=dict)
+ secure_params: dict[str, object] = dataclasses.field(default_factory=dict)
class PSModuleDepFinder(object):
- def __init__(self):
+ def __init__(self) -> None:
# This is also used by validate-modules to get a module's required utils in base and a collection.
- self.ps_modules = dict()
- self.exec_scripts = dict()
+ self.scripts: dict[str, _ScriptInfo] = {}
+ self.signed_hashlist: set[str] = set()
+
+ if builtin_hashlist := _get_powershell_signed_hashlist():
+ self.signed_hashlist.add(builtin_hashlist.path)
+ self.scripts[builtin_hashlist.path] = builtin_hashlist
- # by defining an explicit dict of cs utils and where they are used, we
- # can potentially save time by not adding the type multiple times if it
- # isn't needed
- self.cs_utils_wrapper = dict()
- self.cs_utils_module = dict()
+ self._util_deps: dict[str, set[str]] = {}
- self.ps_version = None
- self.os_version = None
+ self.ps_version: str | None = None
+ self.os_version: str | None = None
self.become = False
self._re_cs_module = [
@@ -70,36 +99,58 @@ class PSModuleDepFinder(object):
r'(\.[\w\.]+))(?P\s+-Optional){0,1}')),
]
- self._re_wrapper = re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-wrapper\s+(\w*)'))
self._re_ps_version = re.compile(to_bytes(r'(?i)^#requires\s+\-version\s+([0-9]+(\.[0-9]+){0,3})$'))
self._re_os_version = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-osversion\s+([0-9]+(\.[0-9]+){0,3})$'))
self._re_become = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-become$'))
- def scan_module(self, module_data, fqn=None, wrapper=False, powershell=True):
+ def scan_exec_script(self, name: str) -> None:
+ # scans lib/ansible/executor/powershell for scripts used in the module
+ # exec side. It also scans these scripts for any dependencies
+ if name in self.scripts:
+ return
+
+ exec_code = _get_powershell_script(name)
+ self.scripts[name] = _ScriptInfo(
+ content=exec_code,
+ path=name,
+ )
+ self.scan_module(exec_code, powershell=True)
+
+ def scan_module(
+ self,
+ module_data: bytes,
+ fqn: str | None = None,
+ powershell: bool = True,
+ ) -> set[str]:
lines = module_data.split(b'\n')
- module_utils = set()
- if wrapper:
- cs_utils = self.cs_utils_wrapper
- else:
- cs_utils = self.cs_utils_module
+ module_utils: set[tuple[str, str, bool]] = set()
+
+ if fqn and fqn.startswith("ansible_collections."):
+ submodules = fqn.split('.')
+ collection_name = '.'.join(submodules[:3])
+
+ collection_hashlist = _get_powershell_signed_hashlist(collection_name)
+ if collection_hashlist and collection_hashlist.path not in self.signed_hashlist:
+ self.signed_hashlist.add(collection_hashlist.path)
+ self.scripts[collection_hashlist.path] = collection_hashlist
if powershell:
checks = [
# PS module contains '#Requires -Module Ansible.ModuleUtils.*'
# PS module contains '#AnsibleRequires -Powershell Ansible.*' (or collections module_utils ref)
- (self._re_ps_module, self.ps_modules, ".psm1"),
+ (self._re_ps_module, ".psm1"),
# PS module contains '#AnsibleRequires -CSharpUtil Ansible.*' (or collections module_utils ref)
- (self._re_cs_in_ps_module, cs_utils, ".cs"),
+ (self._re_cs_in_ps_module, ".cs"),
]
else:
checks = [
# CS module contains 'using Ansible.*;' or 'using ansible_collections.ns.coll.plugins.module_utils.*;'
- (self._re_cs_module, cs_utils, ".cs"),
+ (self._re_cs_module, ".cs"),
]
for line in lines:
- for check in checks:
- for pattern in check[0]:
+ for patterns, util_extension in checks:
+ for pattern in patterns:
match = pattern.match(line)
if match:
# tolerate windows line endings by stripping any remaining
@@ -107,82 +158,66 @@ class PSModuleDepFinder(object):
module_util_name = to_text(match.group(1).rstrip())
match_dict = match.groupdict()
optional = match_dict.get('optional', None) is not None
-
- if module_util_name not in check[1].keys():
- module_utils.add((module_util_name, check[2], fqn, optional))
-
+ module_utils.add((module_util_name, util_extension, optional))
break
- if powershell:
- ps_version_match = self._re_ps_version.match(line)
- if ps_version_match:
- self._parse_version_match(ps_version_match, "ps_version")
-
- os_version_match = self._re_os_version.match(line)
- if os_version_match:
- self._parse_version_match(os_version_match, "os_version")
-
- # once become is set, no need to keep on checking recursively
- if not self.become:
- become_match = self._re_become.match(line)
- if become_match:
- self.become = True
-
- if wrapper:
- wrapper_match = self._re_wrapper.match(line)
- if wrapper_match:
- self.scan_exec_script(wrapper_match.group(1).rstrip())
-
- # recursively drill into each Requires to see if there are any more
- # requirements
- for m in set(module_utils):
- self._add_module(*m, wrapper=wrapper)
-
- def scan_exec_script(self, name):
- # scans lib/ansible/executor/powershell for scripts used in the module
- # exec side. It also scans these scripts for any dependencies
- name = to_text(name)
- if name in self.exec_scripts.keys():
- return
-
- data = pkgutil.get_data("ansible.executor.powershell", to_native(name + ".ps1"))
- if data is None:
- raise AnsibleError("Could not find executor powershell script "
- "for '%s'" % name)
-
- b_data = to_bytes(data)
-
- # remove comments to reduce the payload size in the exec wrappers
- if C.DEFAULT_DEBUG:
- exec_script = b_data
- else:
- exec_script = _strip_comments(b_data)
- self.exec_scripts[name] = to_bytes(exec_script)
- self.scan_module(b_data, wrapper=True, powershell=True)
-
- def _add_module(self, name, ext, fqn, optional, wrapper=False):
- m = to_text(name)
-
- util_fqn = None
-
- if m.startswith("Ansible."):
- # Builtin util, use plugin loader to get the data
- mu_path = ps_module_utils_loader.find_plugin(m, ext)
-
- if not mu_path:
+ if not powershell:
+ continue
+
+ if ps_version_match := self._re_ps_version.match(line):
+ self._parse_version_match(ps_version_match, "ps_version")
+
+ if os_version_match := self._re_os_version.match(line):
+ self._parse_version_match(os_version_match, "os_version")
+
+ # once become is set, no need to keep on checking recursively
+ if not self.become and self._re_become.match(line):
+ self.become = True
+
+ dependencies: set[str] = set()
+ for name, ext, optional in set(module_utils):
+ util_name = self._scan_module_util(name, ext, fqn, optional)
+ if util_name:
+ dependencies.add(util_name)
+ util_deps = self._util_deps[util_name]
+ dependencies.update(util_deps)
+
+ return dependencies
+
+ def _scan_module_util(
+ self,
+ name: str,
+ ext: str,
+ module_fqn: str | None,
+ optional: bool,
+ ) -> str | None:
+ util_name: str
+ util_path: str
+ util_data: bytes
+ util_fqn: str | None = None
+
+ if name.startswith("Ansible."):
+ # Builtin util, or the old role module_utils reference.
+ util_name = f"{name}{ext}"
+
+ if util_name in self._util_deps:
+ return util_name
+
+ util_path = ps_module_utils_loader.find_plugin(name, ext)
+ if not util_path or not os.path.exists(util_path):
if optional:
- return
+ return None
- raise AnsibleError('Could not find imported module support code '
- 'for \'%s\'' % m)
+ raise AnsibleError(f"Could not find imported module util '{name}'")
+
+ with open(util_path, 'rb') as mu_file:
+ util_data = mu_file.read()
- module_util_data = to_bytes(_slurp(mu_path))
else:
# Collection util, load the package data based on the util import.
-
- submodules = m.split(".")
- if m.startswith('.'):
- fqn_submodules = fqn.split('.')
+ submodules = name.split(".")
+ if name.startswith('.'):
+ fqn_submodules = (module_fqn or "").split('.')
for submodule in submodules:
if submodule:
break
@@ -190,56 +225,70 @@ class PSModuleDepFinder(object):
submodules = fqn_submodules + [s for s in submodules if s]
- n_package_name = to_native('.'.join(submodules[:-1]), errors='surrogate_or_strict')
- n_resource_name = to_native(submodules[-1] + ext, errors='surrogate_or_strict')
+ util_package = '.'.join(submodules[:-1])
+ util_resource_name = f"{submodules[-1]}{ext}"
+ util_fqn = f"{util_package}.{submodules[-1]}"
+ util_name = f"{util_package}.{util_resource_name}"
+
+ if util_name in self._util_deps:
+ return util_name
try:
- module_util = import_module(n_package_name)
- pkg_data = pkgutil.get_data(n_package_name, n_resource_name)
- if pkg_data is None:
+ module_util = import_module(util_package)
+ util_code = pkgutil.get_data(util_package, util_resource_name)
+ if util_code is None:
raise ImportError("No package data found")
-
- module_util_data = to_bytes(pkg_data, errors='surrogate_or_strict')
- util_fqn = to_text("%s.%s " % (n_package_name, submodules[-1]), errors='surrogate_or_strict')
+ util_data = util_code
# Get the path of the util which is required for coverage collection.
resource_paths = list(module_util.__path__)
if len(resource_paths) != 1:
# This should never happen with a collection but we are just being defensive about it.
- raise AnsibleError("Internal error: Referenced module_util package '%s' contains 0 or multiple "
- "import locations when we only expect 1." % n_package_name)
- mu_path = os.path.join(resource_paths[0], n_resource_name)
+ raise AnsibleError(f"Internal error: Referenced module_util package '{util_package}' contains 0 "
+ "or multiple import locations when we only expect 1.")
+
+ util_path = os.path.join(resource_paths[0], util_resource_name)
except (ImportError, OSError) as err:
if getattr(err, "errno", errno.ENOENT) == errno.ENOENT:
if optional:
- return
+ return None
- raise AnsibleError('Could not find collection imported module support code for \'%s\''
- % to_native(m))
+ raise AnsibleError(f"Could not find collection imported module support code for '{name}'")
else:
raise
- util_info = {
- 'data': module_util_data,
- 'path': to_text(mu_path),
- }
- if ext == ".psm1":
- self.ps_modules[m] = util_info
- else:
- if wrapper:
- self.cs_utils_wrapper[m] = util_info
- else:
- self.cs_utils_module[m] = util_info
- self.scan_module(module_util_data, fqn=util_fqn, wrapper=wrapper, powershell=(ext == ".psm1"))
+ # This is important to be set before scan_module is called to avoid
+ # recursive dependencies.
+ self.scripts[util_name] = _ScriptInfo(
+ content=util_data,
+ path=util_path,
+ )
+
+ # It is important this is set before calling scan_module to ensure
+ # recursive dependencies don't result in an infinite loop.
+ dependencies = self._util_deps[util_name] = set()
+
+ util_deps = self.scan_module(util_data, fqn=util_fqn, powershell=(ext == ".psm1"))
+ dependencies.update(util_deps)
+ for dep in dependencies:
+ if dep_list := self._util_deps.get(dep):
+ dependencies.update(dep_list)
+
+ if ext == ".cs":
+ # Any C# code requires the AddType.psm1 module to load.
+ dependencies.add("Ansible.ModuleUtils.AddType.psm1")
+ self._scan_module_util("Ansible.ModuleUtils.AddType", ".psm1", None, False)
- def _parse_version_match(self, match, attribute):
+ return util_name
+
+ def _parse_version_match(self, match: re.Match, attribute: str) -> None:
new_version = to_text(match.group(1)).rstrip()
# PowerShell cannot cast a string of "1" to Version, it must have at
# least the major.minor for it to be valid so we append 0
if match.group(2) is None:
- new_version = "%s.0" % new_version
+ new_version = f"{new_version}.0"
existing_version = getattr(self, attribute, None)
if existing_version is None:
@@ -250,151 +299,307 @@ class PSModuleDepFinder(object):
setattr(self, attribute, new_version)
-def _slurp(path):
- if not os.path.exists(path):
- raise AnsibleError("imported module support code does not exist at %s"
- % os.path.abspath(path))
- with open(path, 'rb') as fd:
- data = fd.read()
- return data
-
-
-def _strip_comments(source):
- # Strip comments and blank lines from the wrapper
- buf = []
- start_block = False
- for line in source.splitlines():
- l = line.strip()
-
- if start_block and l.endswith(b'#>'):
- start_block = False
- continue
- elif start_block:
- continue
- elif l.startswith(b'<#'):
- start_block = True
- continue
- elif not l or l.startswith(b'#'):
- continue
-
- buf.append(line)
- return b'\n'.join(buf)
-
-
-def _create_powershell_wrapper(b_module_data, module_path, module_args,
- environment, async_timeout, become,
- become_method, become_user, become_password,
- become_flags, substyle, task_vars, module_fqn):
- # creates the manifest/wrapper used in PowerShell/C# modules to enable
- # things like become and async - this is also called in action/script.py
-
- # FUTURE: add process_wrapper.ps1 to run module_wrapper in a new process
- # if running under a persistent connection and substyle is C# so we
- # don't have type conflicts
+def _bootstrap_powershell_script(
+ name: str,
+ parameters: dict[str, t.Any] | None = None,
+ *,
+ has_input: bool = False,
+) -> tuple[str, bytes]:
+ """Build bootstrap wrapper for specified script.
+
+ Builds the bootstrap wrapper and input needed to run the specified executor
+ PowerShell script specified.
+
+ :param name: The name of the PowerShell script to run.
+ :param parameters: The parameters to pass to the script.
+ :param has_input: The script will be provided with input data.
+ :return: The bootstrap wrapper and input to provide to it.
+ """
+ exec_manifest = _ExecManifest()
+
+ script = _get_powershell_script(name)
+ exec_manifest.scripts[name] = _ScriptInfo(
+ content=script,
+ path=name,
+ )
+
+ exec_manifest.actions.append(
+ _ManifestAction(
+ name=name,
+ params=parameters or {},
+ )
+ )
+
+ if hashlist := _get_powershell_signed_hashlist():
+ exec_manifest.signed_hashlist.append(hashlist.path)
+ exec_manifest.scripts[hashlist.path] = hashlist
+
+ bootstrap_wrapper = _get_powershell_script("bootstrap_wrapper.ps1")
+ bootstrap_input = _get_bootstrap_input(exec_manifest)
+ if has_input:
+ bootstrap_input += b"\n\0\0\0\0\n"
+
+ return bootstrap_wrapper.decode(), bootstrap_input
+
+
+def _get_powershell_script(
+ name: str,
+) -> bytes:
+ """Get the requested PowerShell script.
+
+ Gets the script stored in the ansible.executore.powershell package.
+
+ :param name: The name of the PowerShell script to retrieve.
+ :return: The contents of the requested PowerShell script as a byte string.
+ """
+ package_name = 'ansible.executor.powershell'
+
+ code = pkgutil.get_data(package_name, name)
+ if code is None:
+ raise AnsibleFileNotFound(f"Could not find powershell script '{package_name}.{name}'")
+
+ try:
+ sig_data = pkgutil.get_data(package_name, f"{name}.authenticode")
+ except FileNotFoundError:
+ sig_data = None
+
+ if sig_data:
+ code = code + b"\r\n" + b"\r\n".join(sig_data.splitlines()) + b"\r\n"
+
+ return code
+
+
+def _create_powershell_wrapper(
+ *,
+ name: str,
+ module_data: bytes,
+ module_path: str,
+ module_args: dict[t.Any, t.Any],
+ environment: dict[str, str],
+ async_timeout: int,
+ become_plugin: BecomeBase | None,
+ substyle: t.Literal["powershell", "script"],
+ task_vars: dict[str, t.Any],
+ profile: str,
+) -> bytes:
+ """Creates module or script wrapper for PowerShell.
+
+ Creates the input data to provide to bootstrap_wrapper.ps1 when running a
+ PowerShell module or script.
+
+ :param name: The fully qualified name of the module or script filename (without extension).
+ :param module_data: The data of the module or script.
+ :param module_path: The path of the module or script.
+ :param module_args: The arguments to pass to the module or script.
+ :param environment: The environment variables to set when running the module or script.
+ :param async_timeout: The timeout to use for async execution or 0 for no async.
+ :param become_plugin: The become plugin to use for privilege escalation or None for no become.
+ :param substyle: The substyle of the module or script to run [powershell or script].
+ :param task_vars: The task variables used on the task.
+
+ :return: The input data for bootstrap_wrapper.ps1 as a byte string.
+ """
+
+ actions: list[_ManifestAction] = []
finder = PSModuleDepFinder()
- if substyle != 'script':
- # don't scan the module for util dependencies and other Ansible related
- # flags if the substyle is 'script' which is set by action/script
- finder.scan_module(b_module_data, fqn=module_fqn, powershell=(substyle == "powershell"))
-
- module_wrapper = "module_%s_wrapper" % substyle
- exec_manifest = dict(
- module_entry=to_text(base64.b64encode(b_module_data)),
- powershell_modules=dict(),
- csharp_utils=dict(),
- csharp_utils_module=list(), # csharp_utils only required by a module
- module_args=module_args,
- actions=[module_wrapper],
- environment=environment,
- encoded_output=False,
+ finder.scan_exec_script('module_wrapper.ps1')
+
+ ext = os.path.splitext(module_path)[1]
+ name_with_ext = f"{name}{ext}"
+ finder.scripts[name_with_ext] = _ScriptInfo(
+ content=module_data,
+ path=module_path,
)
- finder.scan_exec_script(module_wrapper)
+
+ module_params: dict[str, t.Any] = {
+ 'Script': name_with_ext,
+ 'Environment': environment,
+ }
+ if substyle != 'script':
+ module_deps = finder.scan_module(
+ module_data,
+ fqn=name,
+ powershell=True,
+ )
+ cs_deps = []
+ ps_deps = []
+ for dep in module_deps:
+ if dep.endswith('.cs'):
+ cs_deps.append(dep)
+ else:
+ ps_deps.append(dep)
+
+ module_params |= {
+ 'Variables': [
+ {
+ 'Name': 'complex_args',
+ 'Value': _prepare_module_args(module_args, profile),
+ 'Scope': 'Global',
+ },
+ ],
+ 'CSharpModules': cs_deps,
+ 'PowerShellModules': ps_deps,
+ 'ForModule': True,
+ }
+
+ if become_plugin or finder.become:
+ become_script = 'become_wrapper.ps1'
+ become_params: dict[str, t.Any] = {
+ 'BecomeUser': 'SYSTEM',
+ }
+ become_secure_params: dict[str, t.Any] = {}
+
+ if become_plugin:
+ if not isinstance(become_plugin, RunasBecomeModule):
+ msg = f"Become plugin {become_plugin.name} is not supported by the Windows exec wrapper. Make sure to set the become method to runas."
+ raise AnsibleError(msg)
+
+ become_script, become_params, become_secure_params = become_plugin._build_powershell_wrapper_action()
+
+ finder.scan_exec_script('exec_wrapper.ps1')
+ finder.scan_exec_script(become_script)
+ actions.append(
+ _ManifestAction(
+ name=become_script,
+ params=become_params,
+ secure_params=become_secure_params,
+ )
+ )
if async_timeout > 0:
- finder.scan_exec_script('exec_wrapper')
- finder.scan_exec_script('async_watchdog')
- finder.scan_exec_script('async_wrapper')
-
- exec_manifest["actions"].insert(0, 'async_watchdog')
- exec_manifest["actions"].insert(0, 'async_wrapper')
- exec_manifest["async_jid"] = f'j{secrets.randbelow(999999999999)}'
- exec_manifest["async_timeout_sec"] = async_timeout
- exec_manifest["async_startup_timeout"] = C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars)
-
- if become and resource_from_fqcr(become_method) == 'runas': # runas and namespace.collection.runas
- finder.scan_exec_script('exec_wrapper')
- finder.scan_exec_script('become_wrapper')
-
- exec_manifest["actions"].insert(0, 'become_wrapper')
- exec_manifest["become_user"] = become_user
- exec_manifest["become_password"] = become_password
- exec_manifest['become_flags'] = become_flags
-
- exec_manifest['min_ps_version'] = finder.ps_version
- exec_manifest['min_os_version'] = finder.os_version
- if finder.become and 'become_wrapper' not in exec_manifest['actions']:
- finder.scan_exec_script('exec_wrapper')
- finder.scan_exec_script('become_wrapper')
-
- exec_manifest['actions'].insert(0, 'become_wrapper')
- exec_manifest['become_user'] = 'SYSTEM'
- exec_manifest['become_password'] = None
- exec_manifest['become_flags'] = None
-
- coverage_manifest = dict(
- module_path=module_path,
- module_util_paths=dict(),
- output=None,
- )
+ finder.scan_exec_script('bootstrap_wrapper.ps1')
+ finder.scan_exec_script('exec_wrapper.ps1')
+
+ async_dir = environment.get('ANSIBLE_ASYNC_DIR', None)
+ if not async_dir:
+ raise AnsibleError("The environment variable 'ANSIBLE_ASYNC_DIR' is not set.")
+
+ finder.scan_exec_script('async_wrapper.ps1')
+ actions.append(
+ _ManifestAction(
+ name='async_wrapper.ps1',
+ params={
+ 'AsyncDir': async_dir,
+ 'AsyncJid': f'j{secrets.randbelow(999999999999)}',
+ 'StartupTimeout': C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars),
+ },
+ )
+ )
+
+ finder.scan_exec_script('async_watchdog.ps1')
+ actions.append(
+ _ManifestAction(
+ name='async_watchdog.ps1',
+ params={
+ 'Timeout': async_timeout,
+ },
+ )
+ )
+
coverage_output = C.config.get_config_value('COVERAGE_REMOTE_OUTPUT', variables=task_vars)
if coverage_output and substyle == 'powershell':
- finder.scan_exec_script('coverage_wrapper')
- coverage_manifest['output'] = coverage_output
-
- coverage_enabled = C.config.get_config_value('COVERAGE_REMOTE_PATHS', variables=task_vars)
- coverage_manifest['path_filter'] = coverage_enabled
-
- # make sure Ansible.ModuleUtils.AddType is added if any C# utils are used
- if len(finder.cs_utils_wrapper) > 0 or len(finder.cs_utils_module) > 0:
- finder._add_module(b"Ansible.ModuleUtils.AddType", ".psm1", None, False,
- wrapper=False)
-
- # exec_wrapper is only required to be part of the payload if using
- # become or async, to save on payload space we check if exec_wrapper has
- # already been added, and remove it manually if it hasn't later
- exec_required = "exec_wrapper" in finder.exec_scripts.keys()
- finder.scan_exec_script("exec_wrapper")
- # must contain an empty newline so it runs the begin/process/end block
- finder.exec_scripts["exec_wrapper"] += b"\n\n"
-
- exec_wrapper = finder.exec_scripts["exec_wrapper"]
- if not exec_required:
- finder.exec_scripts.pop("exec_wrapper")
-
- for name, data in finder.exec_scripts.items():
- b64_data = to_text(base64.b64encode(data))
- exec_manifest[name] = b64_data
-
- for name, data in finder.ps_modules.items():
- b64_data = to_text(base64.b64encode(data['data']))
- exec_manifest['powershell_modules'][name] = b64_data
- coverage_manifest['module_util_paths'][name] = data['path']
-
- cs_utils = {}
- for cs_util in [finder.cs_utils_wrapper, finder.cs_utils_module]:
- for name, data in cs_util.items():
- cs_utils[name] = data['data']
-
- for name, data in cs_utils.items():
- b64_data = to_text(base64.b64encode(data))
- exec_manifest['csharp_utils'][name] = b64_data
- exec_manifest['csharp_utils_module'] = list(finder.cs_utils_module.keys())
-
- # To save on the data we are sending across we only add the coverage info if coverage is being run
- if 'coverage_wrapper' in exec_manifest:
- exec_manifest['coverage'] = coverage_manifest
-
- b_json = to_bytes(json.dumps(exec_manifest))
- # delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
- b_data = exec_wrapper + b'\0\0\0\0' + b_json
- return b_data
+ path_filter = C.config.get_config_value('COVERAGE_REMOTE_PATHS', variables=task_vars)
+
+ finder.scan_exec_script('coverage_wrapper.ps1')
+ actions.append(
+ _ManifestAction(
+ name='coverage_wrapper.ps1',
+ params={
+ 'ModuleName': name_with_ext,
+ 'OutputPath': coverage_output,
+ 'PathFilter': path_filter,
+ },
+ )
+ )
+
+ actions.append(
+ _ManifestAction(
+ name='module_wrapper.ps1',
+ params=module_params,
+ ),
+ )
+
+ temp_path: str | None = None
+ for temp_key in ['_ansible_tmpdir', '_ansible_remote_tmp']:
+ if temp_value := module_args.get(temp_key, None):
+ temp_path = temp_value
+ break
+
+ exec_manifest = _ExecManifest(
+ scripts=finder.scripts,
+ actions=actions,
+ signed_hashlist=list(finder.signed_hashlist),
+ )
+
+ return _get_bootstrap_input(
+ exec_manifest,
+ min_os_version=finder.os_version,
+ min_ps_version=finder.ps_version,
+ temp_path=temp_path,
+ )
+
+
+def _get_bootstrap_input(
+ manifest: _ExecManifest,
+ min_os_version: str | None = None,
+ min_ps_version: str | None = None,
+ temp_path: str | None = None,
+) -> bytes:
+ """Gets the input for bootstrap_wrapper.ps1
+
+ Gets the input needed to send to bootstrap_wrapper.ps1 to run code through
+ exec_wrapper.ps1.
+
+ :param manifest: The exec wrapper manifest of scripts and actions to run.
+ :param min_os_version: The minimum OS version required to run the scripts.
+ :param min_ps_version: The minimum PowerShell version required to run the scripts.
+ :param temp_path: The temporary path to use for the scripts if needed.
+ :return: The input for bootstrap_wrapper.ps1 as a byte string.
+ """
+ bootstrap_manifest = {
+ 'name': 'exec_wrapper',
+ 'script': _get_powershell_script("exec_wrapper.ps1").decode(),
+ 'params': {
+ 'MinOSVersion': min_os_version,
+ 'MinPSVersion': min_ps_version,
+ 'TempPath': temp_path,
+ },
+ }
+
+ bootstrap_input = json.dumps(bootstrap_manifest, ensure_ascii=True)
+ exec_input = json.dumps(dataclasses.asdict(manifest))
+ return f"{bootstrap_input}\n\0\0\0\0\n{exec_input}".encode()
+
+
+def _prepare_module_args(module_args: dict[str, t.Any], profile: str) -> dict[str, t.Any]:
+ """
+ Serialize the module args with the specified profile and deserialize them with the Python built-in JSON decoder.
+ This is used to facilitate serializing module args with a different encoder (profile) than is used for the manifest.
+ """
+ encoder = get_module_encoder(profile, Direction.CONTROLLER_TO_MODULE)
+
+ return json.loads(json.dumps(module_args, cls=encoder))
+
+
+def _get_powershell_signed_hashlist(
+ collection: str | None = None,
+) -> _ScriptInfo | None:
+ """Gets the signed hashlist script stored in either the Ansible package or for
+ the collection specified.
+
+ :param collection: The collection namespace to get the signed hashlist for or None for the builtin.
+ :return: The _ScriptInfo payload of the signed hashlist script if found, None if not.
+ """
+ resource = 'ansible.config' if collection is None else f"{collection}.meta"
+ signature_file = 'powershell_signatures.psd1'
+
+ try:
+ sig_data = pkgutil.get_data(resource, signature_file)
+ except FileNotFoundError:
+ sig_data = None
+
+ if sig_data:
+ resource_path = f"{resource}.{signature_file}"
+ return _ScriptInfo(content=sig_data, path=resource_path)
+
+ return None
diff --git a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
deleted file mode 100644
index f79dd6fbc86..00000000000
--- a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
+++ /dev/null
@@ -1,86 +0,0 @@
-# (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-param(
- [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
-)
-
-#AnsibleRequires -Wrapper module_wrapper
-
-$ErrorActionPreference = "Stop"
-
-Write-AnsibleLog "INFO - starting module_powershell_wrapper" "module_powershell_wrapper"
-
-$module_name = $Payload.module_args["_ansible_module_name"]
-Write-AnsibleLog "INFO - building module payload for '$module_name'" "module_powershell_wrapper"
-
-# compile any C# module utils passed in from the controller, Add-CSharpType is
-# automatically added to the payload manifest if any csharp util is set
-$csharp_utils = [System.Collections.ArrayList]@()
-foreach ($csharp_util in $Payload.csharp_utils_module) {
- Write-AnsibleLog "INFO - adding $csharp_util to list of C# references to compile" "module_powershell_wrapper"
- $util_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils[$csharp_util]))
- $csharp_utils.Add($util_code) > $null
-}
-if ($csharp_utils.Count -gt 0) {
- $add_type_b64 = $Payload.powershell_modules["Ansible.ModuleUtils.AddType"]
- $add_type = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($add_type_b64))
- New-Module -Name Ansible.ModuleUtils.AddType -ScriptBlock ([ScriptBlock]::Create($add_type)) | Import-Module > $null
-
- # add any C# references so the module does not have to do so
- $new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"])
-
- # We use a fake module object to capture warnings
- $fake_module = [PSCustomObject]@{
- Tmpdir = $new_tmp
- Verbosity = 3
- }
- $warning_func = New-Object -TypeName System.Management.Automation.PSScriptMethod -ArgumentList Warn, {
- param($message)
- $Payload.module_args._ansible_exec_wrapper_warnings.Add($message)
- }
- $fake_module.PSObject.Members.Add($warning_func)
- Add-CSharpType -References $csharp_utils -AnsibleModule $fake_module
-}
-
-if ($Payload.ContainsKey("coverage") -and $null -ne $host.Runspace -and $null -ne $host.Runspace.Debugger) {
- $entrypoint = $payload.coverage_wrapper
-
- $params = @{
- Payload = $Payload
- }
-}
-else {
- # get the common module_wrapper code and invoke that to run the module
- $module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
- $variables = [System.Collections.ArrayList]@(@{ Name = "complex_args"; Value = $Payload.module_args; Scope = "Global" })
- $entrypoint = $Payload.module_wrapper
-
- $params = @{
- Scripts = @($script:common_functions, $module)
- Variables = $variables
- Environment = $Payload.environment
- Modules = $Payload.powershell_modules
- ModuleName = $module_name
- }
-}
-
-$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
-$entrypoint = [ScriptBlock]::Create($entrypoint)
-
-try {
- &$entrypoint @params
-}
-catch {
- # failed to invoke the PowerShell module, capture the exception and
- # output a pretty error for Ansible to parse
- $result = @{
- msg = "Failed to invoke PowerShell module: $($_.Exception.Message)"
- failed = $true
- exception = (Format-AnsibleException -ErrorRecord $_)
- }
- Write-Output -InputObject (ConvertTo-Json -InputObject $result -Depth 99 -Compress)
- $host.SetShouldExit(1)
-}
-
-Write-AnsibleLog "INFO - ending module_powershell_wrapper" "module_powershell_wrapper"
diff --git a/lib/ansible/executor/powershell/module_script_wrapper.ps1 b/lib/ansible/executor/powershell/module_script_wrapper.ps1
deleted file mode 100644
index dd8420fb77d..00000000000
--- a/lib/ansible/executor/powershell/module_script_wrapper.ps1
+++ /dev/null
@@ -1,22 +0,0 @@
-# (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-param(
- [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
-)
-
-#AnsibleRequires -Wrapper module_wrapper
-
-$ErrorActionPreference = "Stop"
-
-Write-AnsibleLog "INFO - starting module_script_wrapper" "module_script_wrapper"
-
-$script = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
-
-# get the common module_wrapper code and invoke that to run the module
-$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.module_wrapper))
-$entrypoint = [ScriptBlock]::Create($entrypoint)
-
-&$entrypoint -Scripts $script -Environment $Payload.environment -ModuleName "script"
-
-Write-AnsibleLog "INFO - ending module_script_wrapper" "module_script_wrapper"
diff --git a/lib/ansible/executor/powershell/module_wrapper.ps1 b/lib/ansible/executor/powershell/module_wrapper.ps1
index 1cfaf3ceae1..253cff29c7d 100644
--- a/lib/ansible/executor/powershell/module_wrapper.ps1
+++ b/lib/ansible/executor/powershell/module_wrapper.ps1
@@ -1,229 +1,238 @@
-# (c) 2018 Ansible Project
+# (c) 2025 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-<#
-.SYNOPSIS
-Invokes an Ansible module in a new Runspace. This cmdlet will output the
-module's output and write any errors to the error stream of the current
-host.
-
-.PARAMETER Scripts
-[Object[]] String or ScriptBlocks to execute.
-
-.PARAMETER Variables
-[System.Collections.ArrayList] The variables to set in the new Pipeline.
-Each value is a hashtable that contains the parameters to use with
-Set-Variable;
- Name: the name of the variable to set
- Value: the value of the variable to set
- Scope: the scope of the variable
-
-.PARAMETER Environment
-[System.Collections.IDictionary] A Dictionary of environment key/values to
-set in the new Pipeline.
-
-.PARAMETER Modules
-[System.Collections.IDictionary] A Dictionary of PowerShell modules to
-import into the new Pipeline. The key is the name of the module and the
-value is a base64 string of the module util code.
-
-.PARAMETER ModuleName
-[String] The name of the module that is being executed.
-
-.PARAMETER Breakpoints
-A list of line breakpoints to add to the runspace debugger. This is used to
-track module and module_utils coverage.
-#>
+using namespace System.Collections
+using namespace System.IO
+using namespace System.Management.Automation
+using namespace System.Management.Automation.Language
+using namespace System.Management.Automation.Security
+using namespace System.Text
+
+[CmdletBinding()]
param(
- [Object[]]$Scripts,
- [System.Collections.ArrayList][AllowEmptyCollection()]$Variables,
- [System.Collections.IDictionary]$Environment,
- [System.Collections.IDictionary]$Modules,
- [String]$ModuleName,
- [System.Management.Automation.LineBreakpoint[]]$Breakpoints = @()
+ [Parameter(Mandatory)]
+ [string]
+ $Script,
+
+ [Parameter()]
+ [IDictionary[]]
+ [AllowEmptyCollection()]
+ $Variables = @(),
+
+ [Parameter()]
+ [IDictionary]
+ $Environment,
+
+ [Parameter()]
+ [AllowEmptyCollection()]
+ [string[]]
+ $CSharpModules,
+
+ [Parameter()]
+ [AllowEmptyCollection()]
+ [string[]]
+ $PowerShellModules,
+
+ [Parameter()]
+ [LineBreakpoint[]]
+ $Breakpoints,
+
+ [Parameter()]
+ [switch]
+ $ForModule
)
-Write-AnsibleLog "INFO - creating new PowerShell pipeline for $ModuleName" "module_wrapper"
+Function Write-AnsibleErrorDetail {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory)]
+ [System.Management.Automation.ErrorRecord]
+ $ErrorRecord,
+
+ [Parameter()]
+ [switch]
+ $ForModule
+ )
+
+ # Be more defensive when trying to find the InnerException in case it isn't
+ # set. This shouldn't ever be the case but if it is then it makes it more
+ # difficult to track down the problem.
+ if ($ErrorRecord.Exception.InnerException.ErrorRecord) {
+ $ErrorRecord = $ErrorRecord.Exception.InnerException.ErrorRecord
+ }
+
+ $exception = @(
+ "$ErrorRecord"
+
+ # stderr from sub processes have this error id, we don't want to format those errors
+ # like a normal powershell error record.
+ if ($ErrorRecord.FullyQualifiedErrorId -notin @('NativeCommandError', 'NativeCommandErrorMessage')) {
+ "$($ErrorRecord.InvocationInfo.PositionMessage)"
+ "+ CategoryInfo : $($ErrorRecord.CategoryInfo)"
+ "+ FullyQualifiedErrorId : $($ErrorRecord.FullyQualifiedErrorId)"
+ ""
+ "ScriptStackTrace:"
+ "$($ErrorRecord.ScriptStackTrace)"
+
+ if ($ErrorRecord.Exception.StackTrace) {
+ "$($ErrorRecord.Exception.StackTrace)"
+ }
+ }
+ ) -join ([Environment]::NewLine)
+
+ if ($ForModule) {
+ @{
+ failed = $true
+ msg = "Unhandled exception while executing module: $ErrorRecord"
+ exception = $exception
+ } | ConvertTo-Json -Compress
+ }
+ else {
+ $host.UI.WriteErrorLine($exception)
+ }
+}
+
$ps = [PowerShell]::Create()
-# do not set ErrorActionPreference for script
-if ($ModuleName -ne "script") {
+if ($ForModule) {
$ps.Runspace.SessionStateProxy.SetVariable("ErrorActionPreference", "Stop")
}
-
-# force input encoding to preamble-free UTF8 so PS sub-processes (eg,
-# Start-Job) don't blow up. This is only required for WinRM, a PSRP
-# runspace doesn't have a host console and this will bomb out
-if ($host.Name -eq "ConsoleHost") {
- Write-AnsibleLog "INFO - setting console input encoding to UTF8 for $ModuleName" "module_wrapper"
- $ps.AddScript('[Console]::InputEncoding = New-Object Text.UTF8Encoding $false').AddStatement() > $null
+else {
+ # For script files we want to ensure we load it as UTF-8. We don't set this
+ # for modules as they are loaded from memory whereas a script is loaded
+ # from disk as part of the script being run than by us.
+ Set-WinPSDefaultFileEncoding
}
-# set the variables
foreach ($variable in $Variables) {
- Write-AnsibleLog "INFO - setting variable '$($variable.Name)' for $ModuleName" "module_wrapper"
- $ps.AddCommand("Set-Variable").AddParameters($variable).AddStatement() > $null
-}
-
-# set the environment vars
-if ($Environment) {
- # Escaping quotes can be problematic, instead just pass the string to the runspace and set it directly.
- Write-AnsibleLog "INFO - setting environment vars for $ModuleName" "module_wrapper"
- $ps.Runspace.SessionStateProxy.SetVariable("_AnsibleEnvironment", $Environment)
- $ps.AddScript(@'
-foreach ($env_kv in $_AnsibleEnvironment.GetEnumerator()) {
- [System.Environment]::SetEnvironmentVariable($env_kv.Key, $env_kv.Value)
-}
-'@).AddStatement() > $null
+ $null = $ps.AddCommand("Set-Variable").AddParameters($variable).AddStatement()
}
-# import the PS modules
-if ($Modules) {
- foreach ($module in $Modules.GetEnumerator()) {
- Write-AnsibleLog "INFO - create module util '$($module.Key)' for $ModuleName" "module_wrapper"
- $module_name = $module.Key
- $module_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($module.Value))
- $ps.AddCommand("New-Module").AddParameters(@{Name = $module_name; ScriptBlock = [ScriptBlock]::Create($module_code) }) > $null
- $ps.AddCommand("Import-Module").AddParameter("WarningAction", "SilentlyContinue") > $null
- $ps.AddCommand("Out-Null").AddStatement() > $null
- }
+# env vars are process side so we can just set them here.
+foreach ($env in $Environment.GetEnumerator()) {
+ [Environment]::SetEnvironmentVariable($env.Key, $env.Value)
}
-# redefine Write-Host to dump to output instead of failing
-# lots of scripts still use it
-$ps.AddScript('Function Write-Host($msg) { Write-Output -InputObject $msg }').AddStatement() > $null
+# Redefine Write-Host to dump to output instead of failing, lots of scripts
+# still use it.
+$null = $ps.AddScript('Function Write-Host($msg) { Write-Output -InputObject $msg }').AddStatement()
-# add the scripts and run
-foreach ($script in $Scripts) {
- $ps.AddScript($script).AddStatement() > $null
-}
+$scriptInfo = Get-AnsibleScript -Name $Script
+if ($scriptInfo.ShouldConstrain) {
+ # Fail if there are any module utils, in the future we may allow unsigned
+ # PowerShell utils in CLM but for now we don't.
+ if ($PowerShellModules -or $CSharpModules) {
+ throw "Cannot run untrusted PowerShell script '$Script' in ConstrainedLanguage mode with module util imports."
+ }
-if ($Breakpoints.Count -gt 0) {
- Write-AnsibleLog "INFO - adding breakpoint to runspace that will run the modules" "module_wrapper"
- if ($PSVersionTable.PSVersion.Major -eq 3) {
- # The SetBreakpoints method was only added in PowerShell v4+. We need to rely on a private method to
- # achieve the same functionality in this older PowerShell version. This should be removed once we drop
- # support for PowerShell v3.
- $set_method = $ps.Runspace.Debugger.GetType().GetMethod(
- 'AddLineBreakpoint', [System.Reflection.BindingFlags]'Instance, NonPublic'
- )
- foreach ($b in $Breakpoints) {
- $set_method.Invoke($ps.Runspace.Debugger, [Object[]]@(, $b)) > $null
+ # If the module is marked as needing to be constrained then we set the
+ # language mode to ConstrainedLanguage so that when parsed inside the
+ # Runspace it will run in CLM. We need to run it from a filepath as in
+ # CLM we cannot call the methods needed to create the ScriptBlock and we
+ # need to be in CLM to downgrade the language mode.
+ $null = $ps.AddScript('$ExecutionContext.SessionState.LanguageMode = "ConstrainedLanguage"').AddStatement()
+ $scriptPath = New-TempAnsibleFile -FileName $Script -Content $scriptInfo.Script
+ $null = $ps.AddCommand($scriptPath, $false).AddStatement()
+}
+else {
+ if ($PowerShellModules) {
+ foreach ($utilName in $PowerShellModules) {
+ $utilInfo = Get-AnsibleScript -Name $utilName
+ if ($utilInfo.ShouldConstrain) {
+ throw "PowerShell module util '$utilName' is not trusted and cannot be loaded."
+ }
+
+ $null = $ps.AddScript(@'
+param ($Name, $Script)
+
+$moduleName = [System.IO.Path]::GetFileNameWithoutExtension($Name)
+$sbk = [System.Management.Automation.Language.Parser]::ParseInput(
+ $Script,
+ $Name,
+ [ref]$null,
+ [ref]$null).GetScriptBlock()
+
+New-Module -Name $moduleName -ScriptBlock $sbk |
+ Import-Module -WarningAction SilentlyContinue -Scope Global
+'@, $true)
+ $null = $ps.AddParameters(
+ @{
+ Name = $utilName
+ Script = $utilInfo.Script
+ }
+ ).AddStatement()
}
}
- else {
- $ps.Runspace.Debugger.SetBreakpoints($Breakpoints)
- }
-}
-Write-AnsibleLog "INFO - start module exec with Invoke() - $ModuleName" "module_wrapper"
+ if ($CSharpModules) {
+ # C# utils are process wide so just load them here.
+ Import-CSharpUtil -Name $CSharpModules
+ }
-# temporarily override the stdout stream and create our own in a StringBuilder
-# we use this to ensure there's always an Out pipe and that we capture the
-# output for things like async or psrp
-$orig_out = [System.Console]::Out
-$sb = New-Object -TypeName System.Text.StringBuilder
-$new_out = New-Object -TypeName System.IO.StringWriter -ArgumentList $sb
+ # We invoke it through a command with useLocalScope $false to
+ # ensure the code runs with it's own $script: scope. It also
+ # cleans up the StackTrace on errors by not showing the stub
+ # execution line and starts immediately at the module "cmd".
+ $null = $ps.AddScript(@'
+${function:} = [System.Management.Automation.Language.Parser]::ParseInput(
+ $args[0],
+ $args[1],
+ [ref]$null,
+ [ref]$null).GetScriptBlock()
+'@).AddArgument($scriptInfo.Script).AddArgument($Script).AddStatement()
+ $null = $ps.AddCommand('', $false).AddStatement()
+}
+
+if ($Breakpoints) {
+ $ps.Runspace.Debugger.SetBreakpoints($Breakpoints)
+}
+
+# Temporarily override the stdout stream and create our own in a StringBuilder.
+# We use this to ensure there's always an Out pipe and that we capture the
+# output for things like async or psrp.
+$origOut = [Console]::Out
+$sb = [StringBuilder]::new()
try {
- [System.Console]::SetOut($new_out)
- $module_output = $ps.Invoke()
+ $newOut = [StringWriter]::new($sb)
+ [Console]::SetOut($newOut)
+
+ $modOut = @($ps.Invoke())
}
catch {
- # uncaught exception while executing module, present a prettier error for
- # Ansible to parse
- $error_params = @{
- Message = "Unhandled exception while executing module"
- ErrorRecord = $_
- }
-
- # Be more defensive when trying to find the InnerException in case it isn't
- # set. This shouldn't ever be the case but if it is then it makes it more
- # difficult to track down the problem.
- if ($_.Exception.PSObject.Properties.Name -contains "InnerException") {
- $inner_exception = $_.Exception.InnerException
- if ($inner_exception.PSObject.Properties.Name -contains "ErrorRecord") {
- $error_params.ErrorRecord = $inner_exception.ErrorRecord
- }
+ Write-AnsibleErrorDetail -ErrorRecord $_ -ForModule:$ForModule
+ if ($ForModule) {
+ $host.SetShouldExit(1)
+ return
}
-
- Write-AnsibleError @error_params
- $host.SetShouldExit(1)
- return
}
finally {
- [System.Console]::SetOut($orig_out)
- $new_out.Dispose()
-}
-
-# other types of errors may not throw an exception in Invoke but rather just
-# set the pipeline state to failed
-if ($ps.InvocationStateInfo.State -eq "Failed" -and $ModuleName -ne "script") {
- $reason = $ps.InvocationStateInfo.Reason
- $error_params = @{
- Message = "Unhandled exception while executing module"
+ if ($newOut) {
+ [Console]::SetOut($origOut)
+ $newOut.Dispose()
}
-
- # The error record should always be set on the reason but this does not
- # always happen on Server 2008 R2 for some reason (probably memory hotfix).
- # Be defensive when trying to get the error record and fall back to other
- # options.
- if ($null -eq $reason) {
- $error_params.Message += ": Unknown error"
- }
- elseif ($reason.PSObject.Properties.Name -contains "ErrorRecord") {
- $error_params.ErrorRecord = $reason.ErrorRecord
- }
- else {
- $error_params.Message += ": $($reason.ToString())"
- }
-
- Write-AnsibleError @error_params
- $host.SetShouldExit(1)
- return
}
-Write-AnsibleLog "INFO - module exec ended $ModuleName" "module_wrapper"
$stdout = $sb.ToString()
if ($stdout) {
- Write-Output -InputObject $stdout
+ $stdout
}
-if ($module_output.Count -gt 0) {
- # do not output if empty collection
- Write-AnsibleLog "INFO - using the output stream for module output - $ModuleName" "module_wrapper"
- Write-Output -InputObject ($module_output -join "`r`n")
+if ($modOut.Count) {
+ $modOut -join "`r`n"
}
-# we attempt to get the return code from the LASTEXITCODE variable
-# this is set explicitly in newer style variables when calling
-# ExitJson and FailJson. If set we set the current hosts' exit code
-# to that same value
+# Attempt to set the return code from the LASTEXITCODE variable. This is set
+# explicitly in newer style modules when calling ExitJson and FailJson.
$rc = $ps.Runspace.SessionStateProxy.GetVariable("LASTEXITCODE")
if ($null -ne $rc) {
- Write-AnsibleLog "INFO - got an rc of $rc from $ModuleName exec" "module_wrapper"
$host.SetShouldExit($rc)
}
-# PS3 doesn't properly set HadErrors in many cases, inspect the error stream as a fallback
-# with the trap handler that's now in place, this should only write to the output if
-# $ErrorActionPreference != "Stop", that's ok because this is sent to the stderr output
-# for a user to manually debug if something went horribly wrong
-if (
- $ps.Streams.Error.Count -and
- ($ps.HadErrors -or $PSVersionTable.PSVersion.Major -lt 4)
-) {
- Write-AnsibleLog "WARN - module had errors, outputting error info $ModuleName" "module_wrapper"
- # if the rc wasn't explicitly set, we return an exit code of 1
- if ($null -eq $rc) {
- $host.SetShouldExit(1)
- }
-
- # output each error to the error stream of the current pipeline
- foreach ($err in $ps.Streams.Error) {
- $error_msg = Format-AnsibleException -ErrorRecord $err
-
- # need to use the current hosts's UI class as we may not have
- # a console to write the stderr to, e.g. psrp
- Write-AnsibleLog "WARN - error msg for for $($ModuleName):`r`n$error_msg" "module_wrapper"
- $host.UI.WriteErrorLine($error_msg)
+foreach ($err in $ps.Streams.Error) {
+ Write-AnsibleErrorDetail -ErrorRecord $err -ForModule:$ForModule
+ if ($ForModule) {
+ if ($null -eq $rc) {
+ $host.SetShouldExit(1)
+ }
+ return
}
}
diff --git a/lib/ansible/executor/powershell/powershell_expand_user.ps1 b/lib/ansible/executor/powershell/powershell_expand_user.ps1
new file mode 100644
index 00000000000..ad9b749240b
--- /dev/null
+++ b/lib/ansible/executor/powershell/powershell_expand_user.ps1
@@ -0,0 +1,20 @@
+# (c) 2025 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+[CmdletBinding()]
+param (
+ [Parameter(Mandatory)]
+ [string]
+ $Path
+)
+
+$userProfile = [Environment]::GetFolderPath([Environment+SpecialFolder]::UserProfile)
+if ($Path -eq '~') {
+ $userProfile
+}
+elseif ($Path.StartsWith(('~\'))) {
+ Join-Path -Path $userProfile -ChildPath $Path.Substring(2)
+}
+else {
+ $Path
+}
diff --git a/lib/ansible/executor/powershell/powershell_mkdtemp.ps1 b/lib/ansible/executor/powershell/powershell_mkdtemp.ps1
new file mode 100644
index 00000000000..14749f94ef1
--- /dev/null
+++ b/lib/ansible/executor/powershell/powershell_mkdtemp.ps1
@@ -0,0 +1,17 @@
+# (c) 2025 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+[CmdletBinding()]
+param (
+ [Parameter(Mandatory)]
+ [string]
+ $Directory,
+
+ [Parameter(Mandatory)]
+ [string]
+ $Name
+)
+
+$path = [Environment]::ExpandEnvironmentVariables($Directory)
+$tmp = New-Item -Path $path -Name $Name -ItemType Directory
+$tmp.FullName
diff --git a/lib/ansible/executor/powershell/psrp_fetch_file.ps1 b/lib/ansible/executor/powershell/psrp_fetch_file.ps1
new file mode 100644
index 00000000000..f061affae01
--- /dev/null
+++ b/lib/ansible/executor/powershell/psrp_fetch_file.ps1
@@ -0,0 +1,41 @@
+# (c) 2025 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+using namespace System.IO
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory)]
+ [string]
+ $Path,
+
+ [Parameter(Mandatory)]
+ [int]
+ $BufferSize
+)
+
+if (Test-Path -LiteralPath $Path -PathType Leaf) {
+ "[FILE]"
+
+ $fs = [FileStream]::new(
+ $path,
+ [FileMode]::Open,
+ [FileAccess]::Read,
+ [FileShare]::Read)
+
+ try {
+ $buffer = [byte[]]::new($BufferSize)
+ while ($read = $fs.Read($buffer, 0, $buffer.Length)) {
+ [Convert]::ToBase64String($buffer, 0, $read)
+ }
+ }
+ finally {
+ $fs.Dispose()
+ }
+}
+elseif (Test-Path -LiteralPath $Path -PathType Container) {
+ "[DIR]"
+}
+else {
+ Write-Error -Message "$Path does not exist"
+}
diff --git a/lib/ansible/executor/powershell/psrp_put_file.ps1 b/lib/ansible/executor/powershell/psrp_put_file.ps1
new file mode 100644
index 00000000000..e91eaed404c
--- /dev/null
+++ b/lib/ansible/executor/powershell/psrp_put_file.ps1
@@ -0,0 +1,122 @@
+# (c) 2025 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+using namespace System.IO
+using namespace System.Reflection
+using namespace System.Security.Cryptography
+
+[CmdletBinding()]
+param (
+ [Parameter(Mandatory = $true)]
+ [string]
+ $Path,
+
+ [Parameter(Mandatory, ValueFromPipeline)]
+ [AllowEmptyString()]
+ [string]
+ $InputObject
+)
+
+begin {
+ $fd = [File]::Create($Path)
+ $algo = [SHA1]::Create()
+ $bytes = @()
+
+ $bindingFlags = [BindingFlags]'NonPublic, Instance'
+ Function Get-Property {
+ <#
+ .SYNOPSIS
+ Gets the private/internal property specified of the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory = $true, ValueFromPipeline = $true)]
+ [Object]
+ $Object,
+
+ [Parameter(Mandatory = $true, Position = 1)]
+ [String]
+ $Name
+ )
+
+ process {
+ $Object.GetType().GetProperty($Name, $bindingFlags).GetValue($Object, $null)
+ }
+ }
+
+ Function Set-Property {
+ <#
+ .SYNOPSIS
+ Sets the private/internal property specified on the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory = $true, ValueFromPipeline = $true)]
+ [Object]
+ $Object,
+
+ [Parameter(Mandatory = $true, Position = 1)]
+ [String]
+ $Name,
+
+ [Parameter(Mandatory = $true, Position = 2)]
+ [AllowNull()]
+ [Object]
+ $Value
+ )
+
+ process {
+ $Object.GetType().GetProperty($Name, $bindingFlags).SetValue($Object, $Value, $null)
+ }
+ }
+
+ Function Get-Field {
+ <#
+ .SYNOPSIS
+ Gets the private/internal field specified of the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory = $true, ValueFromPipeline = $true)]
+ [Object]
+ $Object,
+
+ [Parameter(Mandatory = $true, Position = 1)]
+ [String]
+ $Name
+ )
+
+ process {
+ $Object.GetType().GetField($Name, $bindingFlags).GetValue($Object)
+ }
+ }
+
+ # MaximumAllowedMemory is required to be set to so we can send input data that exceeds the limit on a PS
+ # Runspace. We use reflection to access/set this property as it is not accessible publicly. This is not ideal
+ # but works on all PowerShell versions I've tested with. We originally used WinRS to send the raw bytes to the
+ # host but this falls flat if someone is using a custom PS configuration name so this is a workaround. This
+ # isn't required for smaller files so if it fails we ignore the error and hope it wasn't needed.
+ # https://github.com/PowerShell/PowerShell/blob/c8e72d1e664b1ee04a14f226adf655cced24e5f0/src/System.Management.Automation/engine/serialization.cs#L325
+ try {
+ $Host | Get-Property 'ExternalHost' |
+ Get-Field '_transportManager' |
+ Get-Property 'Fragmentor' |
+ Get-Property 'DeserializationContext' |
+ Set-Property 'MaximumAllowedMemory' $null
+ }
+ catch {
+ # Satisfy pslint, we purposefully ignore this error as it is not critical it works.
+ $null = $null
+ }
+}
+process {
+ if ($InputObject) {
+ $bytes = [Convert]::FromBase64String($InputObject)
+ $algo.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) > $null
+ $fd.Write($bytes, 0, $bytes.Length)
+ }
+}
+end {
+ $fd.Close()
+
+ $algo.TransformFinalBlock($bytes, 0, 0) > $null
+ $hash = [BitConverter]::ToString($algo.Hash).Replace('-', '').ToLowerInvariant()
+ "{`"sha1`":`"$hash`"}"
+}
diff --git a/lib/ansible/executor/powershell/winrm_fetch_file.ps1 b/lib/ansible/executor/powershell/winrm_fetch_file.ps1
new file mode 100644
index 00000000000..596d1a33b68
--- /dev/null
+++ b/lib/ansible/executor/powershell/winrm_fetch_file.ps1
@@ -0,0 +1,46 @@
+# (c) 2025 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+using namespace System.IO
+
+[CmdletBinding()]
+param (
+ [Parameter(Mandatory)]
+ [string]
+ $Path,
+
+ [Parameter(Mandatory)]
+ [int]
+ $BufferSize,
+
+ [Parameter(Mandatory)]
+ [long]
+ $Offset
+)
+
+if (Test-Path -LiteralPath $Path -PathType Leaf) {
+ $stream = [FileStream]::new(
+ $Path,
+ [FileMode]::Open,
+ [FileAccess]::Read,
+ [FileShare]::ReadWrite)
+
+ try {
+ $null = $stream.Seek($Offset, [SeekOrigin]::Begin)
+ $buffer = [byte[]]::new($BufferSize)
+ $read = $stream.Read($buffer, 0, $buffer.Length)
+ if ($read) {
+ [Convert]::ToBase64String($buffer, 0, $read)
+ }
+ }
+ finally {
+ $stream.Dispose()
+ }
+}
+elseif (Test-Path -LiteralPath $Path -PathType Container) {
+ "[DIR]"
+}
+else {
+ $host.UI.WriteErrorLine("$Path does not exist")
+ exit 1
+}
diff --git a/lib/ansible/executor/powershell/winrm_put_file.ps1 b/lib/ansible/executor/powershell/winrm_put_file.ps1
new file mode 100644
index 00000000000..873f40b55ea
--- /dev/null
+++ b/lib/ansible/executor/powershell/winrm_put_file.ps1
@@ -0,0 +1,36 @@
+# (c) 2025 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+using namespace System.IO
+using namespace System.Security.Cryptography
+
+[CmdletBinding()]
+param (
+ [Parameter(Mandatory)]
+ [string]
+ $Path,
+
+ [Parameter(ValueFromPipeline)]
+ [string]
+ $InputObject
+)
+
+begin {
+ $fd = [File]::Create($Path)
+ $sha1 = [SHA1]::Create()
+ $bytes = @() #initialize for empty file case
+}
+
+process {
+ $bytes = [Convert]::FromBase64String($InputObject)
+ $null = $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0)
+ $fd.Write($bytes, 0, $bytes.Length)
+}
+
+end {
+ $fd.Dispose()
+ $null = $sha1.TransformFinalBlock($bytes, 0, 0)
+ $hash = [BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
+
+ '{{"sha1":"{0}"}}' -f $hash
+}
diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
index f5e7b979f42..47f18686186 100644
--- a/lib/ansible/executor/process/worker.py
+++ b/lib/ansible/executor/process/worker.py
@@ -17,18 +17,32 @@
from __future__ import annotations
+import io
import os
+import signal
import sys
+import textwrap
import traceback
+import types
+import typing as t
-from jinja2.exceptions import TemplateNotFound
from multiprocessing.queues import Queue
-from ansible.errors import AnsibleConnectionFailure, AnsibleError
+from ansible._internal import _task
+from ansible._internal._errors import _error_utils
+from ansible.errors import AnsibleError
from ansible.executor.task_executor import TaskExecutor
+from ansible.executor.task_queue_manager import FinalQueue, STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO
+from ansible.executor.task_result import _RawTaskResult
+from ansible.inventory.host import Host
from ansible.module_utils.common.text.converters import to_text
+from ansible.parsing.dataloader import DataLoader
+from ansible.playbook.task import Task
+from ansible.playbook.play_context import PlayContext
+from ansible.utils.context_objects import CLIArgs
from ansible.utils.display import Display
from ansible.utils.multiprocessing import context as multiprocessing_context
+from ansible.vars.manager import VariableManager
__all__ = ['WorkerProcess']
@@ -53,7 +67,20 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin
for reading later.
"""
- def __init__(self, final_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj, worker_id):
+ def __init__(
+ self,
+ *,
+ final_q: FinalQueue,
+ task_vars: dict,
+ host: Host,
+ task: Task,
+ play_context: PlayContext,
+ loader: DataLoader,
+ variable_manager: VariableManager,
+ shared_loader_obj: types.SimpleNamespace,
+ worker_id: int,
+ cliargs: CLIArgs
+ ) -> None:
super(WorkerProcess, self).__init__()
# takes a task queue manager as the sole param:
@@ -73,24 +100,16 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin
self.worker_queue = WorkerQueue(ctx=multiprocessing_context)
self.worker_id = worker_id
- def _save_stdin(self):
- self._new_stdin = None
- try:
- if sys.stdin.isatty() and sys.stdin.fileno() is not None:
- try:
- self._new_stdin = os.fdopen(os.dup(sys.stdin.fileno()))
- except OSError:
- # couldn't dupe stdin, most likely because it's
- # not a valid file descriptor
- pass
- except (AttributeError, ValueError):
- # couldn't get stdin's fileno
- pass
+ self._cliargs = cliargs
- if self._new_stdin is None:
- self._new_stdin = open(os.devnull)
+ def _term(self, signum, frame) -> None:
+ """
+ terminate the process group created by calling setsid when
+ a terminate signal is received by the fork
+ """
+ os.killpg(self.pid, signum)
- def start(self):
+ def start(self) -> None:
"""
multiprocessing.Process replaces the worker's stdin with a new file
but we wish to preserve it if it is connected to a terminal.
@@ -99,15 +118,16 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin
make sure it is closed in the parent when start() completes.
"""
- self._save_stdin()
# FUTURE: this lock can be removed once a more generalized pre-fork thread pause is in place
with display._lock:
- try:
- return super(WorkerProcess, self).start()
- finally:
- self._new_stdin.close()
-
- def _hard_exit(self, e):
+ super(WorkerProcess, self).start()
+ # Since setsid is called later, if the worker is termed
+ # it won't term the new process group
+ # register a handler to propagate the signal
+ signal.signal(signal.SIGTERM, self._term)
+ signal.signal(signal.SIGINT, self._term)
+
+ def _hard_exit(self, e: str) -> t.NoReturn:
"""
There is no safe exception to return to higher level code that does not
risk an innocent try/except finding itself executing in the wrong
@@ -118,14 +138,43 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin
try:
display.debug(u"WORKER HARD EXIT: %s" % to_text(e))
except BaseException:
- # If the cause of the fault is IOError being generated by stdio,
- # attempting to log a debug message may trigger another IOError.
+ # If the cause of the fault is OSError being generated by stdio,
+ # attempting to log a debug message may trigger another OSError.
# Try printing once then give up.
pass
os._exit(1)
- def run(self):
+ def _detach(self) -> None:
+ """
+ The intent here is to detach the child process from the inherited stdio fds,
+ including /dev/tty. Children should use Display instead of direct interactions
+ with stdio fds.
+ """
+ try:
+ os.setsid()
+ # Create new fds for stdin/stdout/stderr, but also capture python uses of sys.stdout/stderr
+ for fds, mode in (
+ ((STDIN_FILENO,), os.O_RDWR | os.O_NONBLOCK),
+ ((STDOUT_FILENO, STDERR_FILENO), os.O_WRONLY),
+ ):
+ stdio = os.open(os.devnull, mode)
+ for fd in fds:
+ os.dup2(stdio, fd)
+ os.close(stdio)
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
+ sys.stdin = os.fdopen(STDIN_FILENO, 'r', closefd=False)
+ # Close stdin so we don't get hanging workers
+ # We use sys.stdin.close() for places where sys.stdin is used,
+ # to give better errors, and to prevent fd 0 reuse
+ sys.stdin.close()
+ except Exception as e:
+ display.debug(f'Could not detach from stdio: {traceback.format_exc()}')
+ display.error(f'Could not detach from stdio: {e}')
+ os._exit(1)
+
+ def run(self) -> None:
"""
Wrap _run() to ensure no possibility an errant exception can cause
control to return to the StrategyBase task loop, or any other code
@@ -135,124 +184,65 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin
a try/except added in far-away code can cause a crashed child process
to suddenly assume the role and prior state of its parent.
"""
+ # Set the queue on Display so calls to Display.display are proxied over the queue
+ display.set_queue(self._final_q)
+ self._detach()
try:
- return self._run()
- except BaseException as e:
- self._hard_exit(e)
- finally:
- # This is a hack, pure and simple, to work around a potential deadlock
- # in ``multiprocessing.Process`` when flushing stdout/stderr during process
- # shutdown.
- #
- # We should no longer have a problem with ``Display``, as it now proxies over
- # the queue from a fork. However, to avoid any issues with plugins that may
- # be doing their own printing, this has been kept.
- #
- # This happens at the very end to avoid that deadlock, by simply side
- # stepping it. This should not be treated as a long term fix.
- #
- # TODO: Evaluate migrating away from the ``fork`` multiprocessing start method.
- sys.stdout = sys.stderr = open(os.devnull, 'w')
-
- def _run(self):
+ with _task.TaskContext(self._task):
+ return self._run()
+ except BaseException:
+ self._hard_exit(traceback.format_exc())
+
+ def _run(self) -> None:
"""
Called when the process is started. Pushes the result onto the
results queue. We also remove the host from the blocked hosts list, to
signify that they are ready for their next task.
"""
- # import cProfile, pstats, StringIO
- # pr = cProfile.Profile()
- # pr.enable()
-
- # Set the queue on Display so calls to Display.display are proxied over the queue
- display.set_queue(self._final_q)
-
global current_worker
+
current_worker = self
- try:
- # execute the task and build a TaskResult from the result
- display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
- executor_result = TaskExecutor(
- self._host,
- self._task,
- self._task_vars,
- self._play_context,
- self._new_stdin,
- self._loader,
- self._shared_loader_obj,
- self._final_q,
- self._variable_manager,
- ).run()
-
- display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid))
- self._host.vars = dict()
- self._host.groups = []
-
- # put the result on the result queue
- display.debug("sending task result for task %s" % self._task._uuid)
- try:
- self._final_q.send_task_result(
- self._host.name,
- self._task._uuid,
- executor_result,
- task_fields=self._task.dump_attrs(),
- )
- except Exception as e:
- display.debug(f'failed to send task result ({e}), sending surrogate result')
- self._final_q.send_task_result(
- self._host.name,
- self._task._uuid,
- # Overriding the task result, to represent the failure
- {
- 'failed': True,
- 'msg': f'{e}',
- 'exception': traceback.format_exc(),
- },
- # The failure pickling may have been caused by the task attrs, omit for safety
- {},
+ executor_result = TaskExecutor(
+ self._host,
+ self._task,
+ self._task_vars,
+ self._play_context,
+ self._loader,
+ self._shared_loader_obj,
+ self._final_q,
+ self._variable_manager,
+ ).run()
+
+ self._host.vars = dict()
+ self._host.groups = []
+
+ for name, stdio in (('stdout', sys.stdout), ('stderr', sys.stderr)):
+ if data := stdio.getvalue(): # type: ignore[union-attr]
+ display.warning(
+ (
+ f'WorkerProcess for [{self._host}/{self._task}] errantly sent data directly to {name} instead of using Display:\n'
+ f'{textwrap.indent(data[:256], " ")}\n'
+ ),
+ formatted=True
)
- display.debug("done sending task result for task %s" % self._task._uuid)
-
- except AnsibleConnectionFailure:
- self._host.vars = dict()
- self._host.groups = []
- self._final_q.send_task_result(
- self._host.name,
- self._task._uuid,
- dict(unreachable=True),
- task_fields=self._task.dump_attrs(),
- )
- except Exception as e:
- if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
- try:
- self._host.vars = dict()
- self._host.groups = []
- self._final_q.send_task_result(
- self._host.name,
- self._task._uuid,
- dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''),
- task_fields=self._task.dump_attrs(),
- )
- except Exception:
- display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
- display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
- finally:
- self._clean_up()
-
- display.debug("WORKER PROCESS EXITING")
-
- # pr.disable()
- # s = StringIO.StringIO()
- # sortby = 'time'
- # ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
- # ps.print_stats()
- # with open('worker_%06d.stats' % os.getpid(), 'w') as f:
- # f.write(s.getvalue())
-
- def _clean_up(self):
- # NOTE: see note in init about forks
- # ensure we cleanup all temp files for this worker
- self._loader.cleanup_all_tmp_files()
+ try:
+ self._final_q.send_task_result(_RawTaskResult(
+ host=self._host,
+ task=self._task,
+ return_data=executor_result,
+ task_fields=self._task.dump_attrs(),
+ ))
+ except Exception as ex:
+ try:
+ raise AnsibleError("Task result omitted due to queue send failure.") from ex
+ except Exception as ex_wrapper:
+ self._final_q.send_task_result(_RawTaskResult(
+ host=self._host,
+ task=self._task,
+ # ignore the real task result and don't allow result object contribution from the exception (in case the pickling error was related)
+ return_data=_error_utils.result_dict_from_exception(ex_wrapper),
+ task_fields={}, # The failure pickling may have been caused by the task attrs, omit for safety
+ ))
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
index ebef9cbfd15..60c6b392cbc 100644
--- a/lib/ansible/executor/task_executor.py
+++ b/lib/ansible/executor/task_executor.py
@@ -7,32 +7,42 @@ import os
import time
import json
import pathlib
-import signal
import subprocess
import sys
+
import traceback
+import typing as t
from ansible import constants as C
from ansible.cli import scripts
-from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure, AnsibleActionFail, AnsibleActionSkip
-from ansible.executor.task_result import TaskResult
-from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.errors import (
+ AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleTaskError,
+ AnsibleValueOmittedError,
+)
+from ansible.executor.task_result import _RawTaskResult
+from ansible._internal._datatag import _utils
+from ansible.module_utils._internal import _messages
+from ansible.module_utils.datatag import native_type_name, deprecator_from_collection_name
+from ansible._internal._datatag._tags import TrustedAsTemplate
from ansible.module_utils.parsing.convert_bool import boolean
-from ansible.module_utils.six import binary_type
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.connection import write_to_stream
from ansible.module_utils.six import string_types
-from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.plugins import get_plugin_class
from ansible.plugins.loader import become_loader, cliconf_loader, connection_loader, httpapi_loader, netconf_loader, terminal_loader
+from ansible._internal._templating._jinja_plugins import _invoke_lookup, _DirectCall
+from ansible._internal._templating._engine import TemplateEngine
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
-from ansible.utils.listify import listify_lookup_plugin_terms
-from ansible.utils.unsafe_proxy import to_unsafe_text, wrap_var
-from ansible.vars.clean import namespace_facts, clean_facts
-from ansible.utils.display import Display
+from ansible.utils.display import Display, _DeferredWarningContext
from ansible.utils.vars import combine_vars
+from ansible.vars.clean import namespace_facts, clean_facts
+from ansible.vars.manager import _deprecate_top_level_fact
+from ansible._internal._errors import _captured, _task_timeout, _error_utils
+
+if t.TYPE_CHECKING:
+ from ansible.executor.task_queue_manager import FinalQueue
display = Display()
@@ -42,47 +52,6 @@ RETURN_VARS = [x for x in C.MAGIC_VARIABLE_MAPPING.items() if 'become' not in x
__all__ = ['TaskExecutor']
-class TaskTimeoutError(BaseException):
- def __init__(self, message="", frame=None):
-
- if frame is not None:
- orig = frame
- root = pathlib.Path(__file__).parent
- while not pathlib.Path(frame.f_code.co_filename).is_relative_to(root):
- frame = frame.f_back
-
- self.frame = 'Interrupted at %s called from %s' % (orig, frame)
-
- super(TaskTimeoutError, self).__init__(message)
-
-
-def task_timeout(signum, frame):
- raise TaskTimeoutError(frame=frame)
-
-
-def remove_omit(task_args, omit_token):
- """
- Remove args with a value equal to the ``omit_token`` recursively
- to align with now having suboptions in the argument_spec
- """
-
- if not isinstance(task_args, dict):
- return task_args
-
- new_args = {}
- for i in task_args.items():
- if i[1] == omit_token:
- continue
- elif isinstance(i[1], dict):
- new_args[i[0]] = remove_omit(i[1], omit_token)
- elif isinstance(i[1], list):
- new_args[i[0]] = [remove_omit(v, omit_token) for v in i[1]]
- else:
- new_args[i[0]] = i[1]
-
- return new_args
-
-
class TaskExecutor:
"""
@@ -92,18 +61,18 @@ class TaskExecutor:
class.
"""
- def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, final_q, variable_manager):
+ def __init__(self, host, task: Task, job_vars, play_context, loader, shared_loader_obj, final_q: FinalQueue, variable_manager):
self._host = host
self._task = task
self._job_vars = job_vars
self._play_context = play_context
- self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
self._connection = None
self._final_q = final_q
self._variable_manager = variable_manager
self._loop_eval_error = None
+ self._task_templar = TemplateEngine(loader=self._loader, variables=self._job_vars)
self._task.squash()
@@ -135,10 +104,14 @@ class TaskExecutor:
# loop through the item results and set the global changed/failed/skipped result flags based on any item.
res['skipped'] = True
for item in item_results:
+ if item.get('_ansible_no_log'):
+ res.update(_ansible_no_log=True) # ensure no_log processing recognizes at least one item needs to be censored
+
if 'changed' in item and item['changed'] and not res.get('changed'):
res['changed'] = True
if res['skipped'] and ('skipped' not in item or ('skipped' in item and not item['skipped'])):
res['skipped'] = False
+ # FIXME: normalize `failed` to a bool, warn if the action/module used non-bool
if 'failed' in item and item['failed']:
item_ignore = item.pop('_ansible_ignore_errors')
if not res.get('failed'):
@@ -150,6 +123,7 @@ class TaskExecutor:
if 'unreachable' in item and item['unreachable']:
item_ignore_unreachable = item.pop('_ansible_ignore_unreachable')
if not res.get('unreachable'):
+ res['unreachable'] = True
self._task.ignore_unreachable = item_ignore_unreachable
elif self._task.ignore_unreachable and not item_ignore_unreachable:
self._task.ignore_unreachable = item_ignore_unreachable
@@ -164,6 +138,7 @@ class TaskExecutor:
res[array] = res[array] + item[array]
del item[array]
+ # FIXME: normalize `failed` to a bool, warn if the action/module used non-bool
if not res.get('failed', False):
res['msg'] = 'All items completed'
if res['skipped']:
@@ -172,43 +147,23 @@ class TaskExecutor:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
display.debug("calling self._execute()")
- res = self._execute()
+ res = self._execute(self._task_templar, self._job_vars)
display.debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
- def _clean_res(res, errors='surrogate_or_strict'):
- if isinstance(res, binary_type):
- return to_unsafe_text(res, errors=errors)
- elif isinstance(res, dict):
- for k in res:
- try:
- res[k] = _clean_res(res[k], errors=errors)
- except UnicodeError:
- if k == 'diff':
- # If this is a diff, substitute a replacement character if the value
- # is undecodable as utf8. (Fix #21804)
- display.warning("We were unable to decode all characters in the module return data."
- " Replaced some in an effort to return as much as possible")
- res[k] = _clean_res(res[k], errors='surrogate_then_replace')
- else:
- raise
- elif isinstance(res, list):
- for idx, item in enumerate(res):
- res[idx] = _clean_res(item, errors=errors)
- return res
-
- display.debug("dumping result to json")
- res = _clean_res(res)
- display.debug("done dumping result, returning")
return res
- except AnsibleError as e:
- return dict(failed=True, msg=wrap_var(to_text(e, nonstring='simplerepr')), _ansible_no_log=self._play_context.no_log)
- except Exception as e:
- return dict(failed=True, msg=wrap_var('Unexpected failure during module execution: %s' % (to_native(e, nonstring='simplerepr'))),
- exception=to_text(traceback.format_exc()), stdout='', _ansible_no_log=self._play_context.no_log)
+ except Exception as ex:
+ result = _error_utils.result_dict_from_exception(ex)
+
+ self._task.update_result_no_log(self._task_templar, result)
+
+ if not isinstance(ex, AnsibleError):
+ result.update(msg=f'Unexpected failure during task execution: {result["msg"]}')
+
+ return result
finally:
try:
self._connection.close()
@@ -217,7 +172,7 @@ class TaskExecutor:
except Exception as e:
display.debug(u"error closing connection: %s" % to_text(e))
- def _get_loop_items(self):
+ def _get_loop_items(self) -> list[t.Any] | None:
"""
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
@@ -230,49 +185,51 @@ class TaskExecutor:
if self._loader.get_basedir() not in self._job_vars['ansible_search_path']:
self._job_vars['ansible_search_path'].append(self._loader.get_basedir())
- templar = Templar(loader=self._loader, variables=self._job_vars)
items = None
if self._task.loop_with:
- if self._task.loop_with in self._shared_loader_obj.lookup_loader:
-
- # TODO: hardcoded so it fails for non first_found lookups, but this should be generalized for those that don't do their own templating
- # lookup prop/attribute?
- fail = bool(self._task.loop_with != 'first_found')
- loop_terms = listify_lookup_plugin_terms(terms=self._task.loop, templar=templar, fail_on_undefined=fail, convert_bare=False)
-
- # get lookup
- mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop_with, loader=self._loader, templar=templar)
-
- # give lookup task 'context' for subdir (mostly needed for first_found)
- for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
- if subdir in self._task.action:
- break
- setattr(mylookup, '_subdir', subdir + 's')
+ templar = self._task_templar
+ terms = self._task.loop
+
+ if isinstance(terms, str):
+ terms = templar.resolve_to_container(_utils.str_problematic_strip(terms))
+
+ if not isinstance(terms, list):
+ terms = [terms]
+
+ @_DirectCall.mark
+ def invoke_lookup() -> t.Any:
+ """Scope-capturing wrapper around _invoke_lookup to avoid functools.partial obscuring its usage from type-checking tools."""
+ return _invoke_lookup(
+ plugin_name=self._task.loop_with,
+ lookup_terms=terms,
+ lookup_kwargs=dict(wantlist=True),
+ invoked_as_with=True,
+ )
- # run lookup
- items = wrap_var(mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True))
- else:
- raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop_with)
+ # Smuggle a special wrapped lookup invocation in as a local variable for its exclusive use when being evaluated as `with_(lookup)`.
+ # This value will not be visible to other users of this templar or its `available_variables`.
+ items = templar.evaluate_expression(expression=TrustedAsTemplate().tag("invoke_lookup()"), local_variables=dict(invoke_lookup=invoke_lookup))
elif self._task.loop is not None:
- items = templar.template(self._task.loop)
+ items = self._task_templar.template(self._task.loop)
+
if not isinstance(items, list):
raise AnsibleError(
- "Invalid data passed to 'loop', it requires a list, got this instead: %s."
- " Hint: If you passed a list/dict of just one element,"
- " try adding wantlist=True to your lookup invocation or use q/query instead of lookup." % items
+ f"The `loop` value must resolve to a 'list', not {native_type_name(items)!r}.",
+ help_text="Provide a list of items/templates, or a template resolving to a list.",
+ obj=self._task.loop,
)
return items
- def _run_loop(self, items):
+ def _run_loop(self, items: list[t.Any]) -> list[dict[str, t.Any]]:
"""
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
"""
task_vars = self._job_vars
- templar = Templar(loader=self._loader, variables=task_vars)
+ templar = TemplateEngine(loader=self._loader, variables=task_vars)
self._task.loop_control.post_validate(templar=templar)
@@ -281,17 +238,20 @@ class TaskExecutor:
loop_pause = self._task.loop_control.pause
extended = self._task.loop_control.extended
extended_allitems = self._task.loop_control.extended_allitems
+
# ensure we always have a label
- label = self._task.loop_control.label or '{{' + loop_var + '}}'
+ label = self._task.loop_control.label or templar.variable_name_as_template(loop_var)
if loop_var in task_vars:
- display.warning(u"%s: The loop variable '%s' is already in use. "
- u"You should set the `loop_var` value in the `loop_control` option for the task"
- u" to something else to avoid variable collisions and unexpected behavior." % (self._task, loop_var))
+ display.warning(
+ msg=f"The loop variable {loop_var!r} is already in use.",
+ help_text="You should set the `loop_var` value in the `loop_control` option for the task "
+ "to something else to avoid variable collisions and unexpected behavior.",
+ obj=loop_var,
+ )
ran_once = False
task_fields = None
- no_log = False
items_len = len(items)
results = []
for item_index, item in enumerate(items):
@@ -331,7 +291,7 @@ class TaskExecutor:
ran_once = True
try:
- tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
+ tmp_task: Task = self._task.copy(exclude_parent=True, exclude_tasks=True)
tmp_task._parent = self._task._parent
tmp_play_context = self._play_context.copy()
except AnsibleParserError as e:
@@ -340,9 +300,11 @@ class TaskExecutor:
# now we swap the internal task and play context with their copies,
# execute, and swap them back so we can do the next iteration cleanly
+ # NB: this swap-a-dee-doo confuses some type checkers about the type of tmp_task/self._task
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
- res = self._execute(variables=task_vars)
+
+ res = self._execute(templar=templar, variables=task_vars)
if self._task.register:
# Ensure per loop iteration results are registered in case `_execute()`
@@ -354,9 +316,6 @@ class TaskExecutor:
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
- # update 'general no_log' based on specific no_log
- no_log = no_log or tmp_task.no_log
-
# now update the result with the item info, and append the result
# to the list of results
res[loop_var] = item
@@ -384,13 +343,14 @@ class TaskExecutor:
if self._connection and not isinstance(self._connection, string_types):
task_fields['connection'] = getattr(self._connection, 'ansible_name')
- tr = TaskResult(
- self._host.name,
- self._task._uuid,
- res,
+ tr = _RawTaskResult(
+ host=self._host,
+ task=self._task,
+ return_data=res,
task_fields=task_fields,
)
+ # FIXME: normalize `failed` to a bool, warn if the action/module used non-bool
if tr.is_failed() or tr.is_unreachable():
self._final_q.send_callback('v2_runner_item_on_failed', tr)
elif tr.is_skipped():
@@ -405,11 +365,14 @@ class TaskExecutor:
# break loop if break_when conditions are met
if self._task.loop_control and self._task.loop_control.break_when:
- cond = Conditional(loader=self._loader)
- cond.when = self._task.loop_control.get_validated_value(
- 'break_when', self._task.loop_control.fattributes.get('break_when'), self._task.loop_control.break_when, templar
+ break_when = self._task.loop_control.get_validated_value(
+ 'break_when',
+ self._task.loop_control.fattributes.get('break_when'),
+ self._task.loop_control.break_when,
+ templar,
)
- if cond.evaluate_conditional(templar, task_vars):
+
+ if self._task._resolve_conditional(break_when, task_vars):
# delete loop vars before exiting loop
del task_vars[loop_var]
break
@@ -431,7 +394,6 @@ class TaskExecutor:
if var in task_vars and var not in self._job_vars:
del task_vars[var]
- self._task.no_log = no_log
# NOTE: run_once cannot contain loop vars because it's templated earlier also
# This is saving the post-validated field from the last loop so the strategy can use the templated value post task execution
self._task.run_once = task_fields.get('run_once')
@@ -447,22 +409,50 @@ class TaskExecutor:
# At the point this is executed it is safe to mutate self._task,
# since `self._task` is either a copy referred to by `tmp_task` in `_run_loop`
# or just a singular non-looped task
- if delegated_host_name:
- self._task.delegate_to = delegated_host_name
- variables.update(delegated_vars)
- def _execute(self, variables=None):
+ self._task.delegate_to = delegated_host_name # always override, since a templated result could be an omit (-> None)
+ variables.update(delegated_vars)
+
+ def _execute(self, templar: TemplateEngine, variables: dict[str, t.Any]) -> dict[str, t.Any]:
+ result: dict[str, t.Any]
+
+ with _DeferredWarningContext(variables=variables) as warning_ctx:
+ try:
+ # DTFIX-FUTURE: improve error handling to prioritize the earliest exception, turning the remaining ones into warnings
+ result = self._execute_internal(templar, variables)
+ self._apply_task_result_compat(result, warning_ctx)
+ _captured.AnsibleActionCapturedError.maybe_raise_on_result(result)
+ except (Exception, _task_timeout.TaskTimeoutError) as ex: # TaskTimeoutError is BaseException
+ try:
+ raise AnsibleTaskError(obj=self._task.get_ds()) from ex
+ except AnsibleTaskError as atex:
+ result = _error_utils.result_dict_from_exception(atex, accept_result_contribution=True)
+ result.setdefault('changed', False)
+
+ self._task.update_result_no_log(templar, result)
+
+ # The warnings/deprecations in the result have already been captured in the _DeferredWarningContext by _apply_task_result_compat.
+ # The captured warnings/deprecations are a superset of the ones from the result, and may have been converted from a dict to a dataclass.
+ # These are then used to supersede the entries in the result.
+
+ result.pop('warnings', None)
+ result.pop('deprecations', None)
+
+ if warnings := warning_ctx.get_warnings():
+ result.update(warnings=warnings)
+
+ if deprecation_warnings := warning_ctx.get_deprecation_warnings():
+ result.update(deprecations=deprecation_warnings)
+
+ return result
+
+ def _execute_internal(self, templar: TemplateEngine, variables: dict[str, t.Any]) -> dict[str, t.Any]:
"""
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
"""
- if variables is None:
- variables = self._job_vars
-
- templar = Templar(loader=self._loader, variables=variables)
-
self._calculate_delegate_to(templar, variables)
context_validation_error = None
@@ -497,18 +487,13 @@ class TaskExecutor:
# skipping this task during the conditional evaluation step
context_validation_error = e
- no_log = self._play_context.no_log
-
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
try:
- conditional_result, false_condition = self._task.evaluate_conditional_with_result(templar, tempvars)
- if not conditional_result:
- display.debug("when evaluation is False, skipping this task")
- return dict(changed=False, skipped=True, skip_reason='Conditional result was False',
- false_condition=false_condition, _ansible_no_log=no_log)
+ if not self._task._resolve_conditional(self._task.when, tempvars, result_context=(rc := t.cast(dict[str, t.Any], {}))):
+ return dict(changed=False, skipped=True, skip_reason='Conditional result was False') | rc
except AnsibleError as e:
# loop error takes precedence
if self._loop_eval_error is not None:
@@ -524,22 +509,27 @@ class TaskExecutor:
# if we ran into an error while setting up the PlayContext, raise it now, unless is known issue with delegation
# and undefined vars (correct values are in cvars later on and connection plugins, if still error, blows up there)
+
+ # DTFIX-FUTURE: this should probably be declaratively handled in post_validate (or better, get rid of play_context)
if context_validation_error is not None:
raiseit = True
if self._task.delegate_to:
- if isinstance(context_validation_error, AnsibleUndefinedVariable):
- raiseit = False
- elif isinstance(context_validation_error, AnsibleParserError):
+ if isinstance(context_validation_error, AnsibleParserError):
# parser error, might be cause by undef too
- orig_exc = getattr(context_validation_error, 'orig_exc', None)
- if isinstance(orig_exc, AnsibleUndefinedVariable):
+ if isinstance(context_validation_error.__cause__, AnsibleUndefinedVariable):
raiseit = False
+ elif isinstance(context_validation_error, AnsibleUndefinedVariable):
+ # DTFIX-FUTURE: should not be possible to hit this now (all are AnsibleFieldAttributeError)?
+ raiseit = False
if raiseit:
raise context_validation_error # pylint: disable=raising-bad-type
# set templar to use temp variables until loop is evaluated
templar.available_variables = tempvars
+ # Now we do final validation on the task, which sets all fields to their final values.
+ self._task.post_validate(templar=templar)
+
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action in C._ACTION_INCLUDE_TASKS:
@@ -548,7 +538,6 @@ class TaskExecutor:
if not include_file:
return dict(failed=True, msg="No include file was specified to the include")
- include_file = templar.template(include_file)
return dict(include=include_file, include_args=include_args)
# if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
@@ -556,32 +545,9 @@ class TaskExecutor:
include_args = self._task.args.copy()
return dict(include_args=include_args)
- # Now we do final validation on the task, which sets all fields to their final values.
- try:
- self._task.post_validate(templar=templar)
- except AnsibleError:
- raise
- except Exception:
- return dict(changed=False, failed=True, _ansible_no_log=no_log, exception=to_text(traceback.format_exc()))
- if '_variable_params' in self._task.args:
- variable_params = self._task.args.pop('_variable_params')
- if isinstance(variable_params, dict):
- if C.INJECT_FACTS_AS_VARS:
- display.warning("Using a variable for a task's 'args' is unsafe in some situations "
- "(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)")
- variable_params.update(self._task.args)
- self._task.args = variable_params
- else:
- # if we didn't get a dict, it means there's garbage remaining after k=v parsing, just give up
- # see https://github.com/ansible/ansible/issues/79862
- raise AnsibleError(f"invalid or malformed argument: '{variable_params}'")
-
- # update no_log to task value, now that we have it templated
- no_log = self._task.no_log
-
# free tempvars up, not used anymore, cvars and vars_copy should be mainly used after this point
# updating the original 'variables' at the end
- tempvars = {}
+ del tempvars
# setup cvars copy, used for all connection related templating
if self._task.delegate_to:
@@ -633,23 +599,7 @@ class TaskExecutor:
cvars['ansible_python_interpreter'] = sys.executable
# get handler
- self._handler, module_context = self._get_action_handler_with_module_context(templar=templar)
-
- if module_context is not None:
- module_defaults_fqcn = module_context.resolved_fqcn
- else:
- module_defaults_fqcn = self._task.resolved_action
-
- # Apply default params for action/module, if present
- self._task.args = get_action_args_with_defaults(
- module_defaults_fqcn, self._task.args, self._task.module_defaults, templar,
- action_groups=self._task._parent._play._action_groups
- )
-
- # And filter out any fields which were set to default(omit), and got the omit token value
- omit_token = variables.get('omit')
- if omit_token is not None:
- self._task.args = remove_omit(self._task.args, omit_token)
+ self._handler, _module_context = self._get_action_handler_with_module_context(templar=templar)
retries = 1 # includes the default actual run + retries set by user/default
if self._task.retries is not None:
@@ -666,30 +616,12 @@ class TaskExecutor:
for attempt in range(1, retries + 1):
display.debug("running the handler")
try:
- if self._task.timeout:
- old_sig = signal.signal(signal.SIGALRM, task_timeout)
- signal.alarm(self._task.timeout)
- result = self._handler.run(task_vars=vars_copy)
- except (AnsibleActionFail, AnsibleActionSkip) as e:
- return e.result
- except AnsibleConnectionFailure as e:
- return dict(unreachable=True, msg=to_text(e))
- except TaskTimeoutError as e:
- msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout)
- return dict(failed=True, msg=msg, timedout={'frame': e.frame, 'period': self._task.timeout})
+ with _task_timeout.TaskTimeoutError.alarm_timeout(self._task.timeout):
+ result = self._handler.run(task_vars=vars_copy)
finally:
- if self._task.timeout:
- signal.alarm(0)
- old_sig = signal.signal(signal.SIGALRM, old_sig)
self._handler.cleanup()
display.debug("handler run complete")
- # preserve no log
- result["_ansible_no_log"] = no_log
-
- if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
- result = wrap_var(result)
-
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
@@ -701,37 +633,23 @@ class TaskExecutor:
if result.get('failed'):
self._final_q.send_callback(
'v2_runner_on_async_failed',
- TaskResult(self._host.name,
- self._task._uuid,
- result,
- task_fields=self._task.dump_attrs()))
+ _RawTaskResult(
+ host=self._host,
+ task=self._task,
+ return_data=result,
+ task_fields=self._task.dump_attrs(),
+ ),
+ )
else:
self._final_q.send_callback(
'v2_runner_on_async_ok',
- TaskResult(self._host.name,
- self._task._uuid,
- result,
- task_fields=self._task.dump_attrs()))
-
- # ensure no log is preserved
- result["_ansible_no_log"] = no_log
-
- # helper methods for use below in evaluating changed/failed_when
- def _evaluate_changed_when_result(result):
- if self._task.changed_when is not None and self._task.changed_when:
- cond = Conditional(loader=self._loader)
- cond.when = self._task.changed_when
- result['changed'] = cond.evaluate_conditional(templar, vars_copy)
-
- def _evaluate_failed_when_result(result):
- if self._task.failed_when:
- cond = Conditional(loader=self._loader)
- cond.when = self._task.failed_when
- failed_when_result = cond.evaluate_conditional(templar, vars_copy)
- result['failed_when_result'] = result['failed'] = failed_when_result
- else:
- failed_when_result = False
- return failed_when_result
+ _RawTaskResult(
+ host=self._host,
+ task=self._task,
+ return_data=result,
+ task_fields=self._task.dump_attrs(),
+ ),
+ )
if 'ansible_facts' in result and self._task.action not in C._ACTION_DEBUG:
if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
@@ -744,10 +662,11 @@ class TaskExecutor:
vars_copy.update(result['ansible_facts'])
else:
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
- af = wrap_var(result['ansible_facts'])
+ af = result['ansible_facts']
vars_copy['ansible_facts'] = combine_vars(vars_copy.get('ansible_facts', {}), namespace_facts(af))
if C.INJECT_FACTS_AS_VARS:
- vars_copy.update(clean_facts(af))
+ cleaned_toplevel = {k: _deprecate_top_level_fact(v) for k, v in clean_facts(af).items()}
+ vars_copy.update(cleaned_toplevel)
# set the failed property if it was missing.
if 'failed' not in result:
@@ -765,9 +684,6 @@ class TaskExecutor:
if 'changed' not in result:
result['changed'] = False
- if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
- result = wrap_var(result)
-
# re-update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
# This gives changed/failed_when access to additional recently modified
@@ -780,18 +696,30 @@ class TaskExecutor:
if 'skipped' not in result:
condname = 'changed'
+ # DTFIX-FUTURE: error normalization has not yet occurred; this means that the expressions used for until/failed_when/changed_when/break_when
+ # and when (for loops on the second and later iterations) cannot see the normalized error shapes. This, and the current impl of the expression
+ # handling here causes a number of problems:
+ # * any error in one of the post-task exec expressions is silently ignored and detail lost (eg: `failed_when: syntax ERROR @$123`)
+ # * they cannot reliably access error/warning details, since many of those details are inaccessible until the error normalization occurs
+ # * error normalization includes `msg` if present, and supplies `unknown error` if not; this leads to screwy results on True failed_when if
+ # `msg` is present, eg: `{debug: {}, failed_when: True` -> "Task failed: Action failed: Hello world!"
+ # * detail about failed_when is lost; any error details from the task could potentially be grafted in/preserved if error normalization was done
+
try:
- _evaluate_changed_when_result(result)
+ if self._task.changed_when is not None and self._task.changed_when:
+ result['changed'] = self._task._resolve_conditional(self._task.changed_when, vars_copy)
+
condname = 'failed'
- _evaluate_failed_when_result(result)
+
+ if self._task.failed_when:
+ result['failed_when_result'] = result['failed'] = self._task._resolve_conditional(self._task.failed_when, vars_copy)
+
except AnsibleError as e:
result['failed'] = True
result['%s_when_result' % condname] = to_text(e)
if retries > 1:
- cond = Conditional(loader=self._loader)
- cond.when = self._task.until or [not result['failed']]
- if cond.evaluate_conditional(templar, vars_copy):
+ if self._task._resolve_conditional(self._task.until or [not result['failed']], vars_copy):
break
else:
# no conditional check, or it failed, so sleep for the specified time
@@ -801,12 +729,12 @@ class TaskExecutor:
display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
self._final_q.send_callback(
'v2_runner_retry',
- TaskResult(
- self._host.name,
- self._task._uuid,
- result,
+ _RawTaskResult(
+ host=self._host,
+ task=self._task,
+ return_data=result,
task_fields=self._task.dump_attrs()
- )
+ ),
)
time.sleep(delay)
self._handler = self._get_action_handler(templar=templar)
@@ -816,9 +744,6 @@ class TaskExecutor:
result['attempts'] = retries - 1
result['failed'] = True
- if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
- result = wrap_var(result)
-
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self._task.register:
@@ -829,10 +754,12 @@ class TaskExecutor:
variables.update(result['ansible_facts'])
else:
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
- af = wrap_var(result['ansible_facts'])
+ af = result['ansible_facts']
variables['ansible_facts'] = combine_vars(variables.get('ansible_facts', {}), namespace_facts(af))
if C.INJECT_FACTS_AS_VARS:
- variables.update(clean_facts(af))
+ # DTFIX-FUTURE: why is this happening twice, esp since we're post-fork and these will be discarded?
+ cleaned_toplevel = {k: _deprecate_top_level_fact(v) for k, v in clean_facts(af).items()}
+ variables.update(cleaned_toplevel)
# save the notification target in the result, if it was specified, as
# this task may be running in a loop in which case the notification
@@ -857,6 +784,49 @@ class TaskExecutor:
display.debug("attempt loop complete, returning result")
return result
+ @staticmethod
+ def _apply_task_result_compat(result: dict[str, t.Any], warning_ctx: _DeferredWarningContext) -> None:
+ """Apply backward-compatibility mutations to the supplied task result."""
+ if warnings := result.get('warnings'):
+ if isinstance(warnings, list):
+ for warning in warnings:
+ if not isinstance(warning, _messages.WarningSummary):
+ # translate non-WarningMessageDetail messages
+ warning = _messages.WarningSummary(
+ event=_messages.Event(
+ msg=str(warning),
+ ),
+ )
+
+ warning_ctx.capture(warning)
+ else:
+ display.warning(f"Task result `warnings` was {type(warnings)} instead of {list}.")
+
+ if deprecations := result.get('deprecations'):
+ if isinstance(deprecations, list):
+ for deprecation in deprecations:
+ if not isinstance(deprecation, _messages.DeprecationSummary):
+ # translate non-DeprecationSummary message dicts
+ try:
+ if (collection_name := deprecation.pop('collection_name', ...)) is not ...:
+ # deprecated: description='enable the deprecation message for collection_name' core_version='2.23'
+ # CAUTION: This deprecation cannot be enabled until the replacement (deprecator) has been documented, and the schema finalized.
+ # self.deprecated('The `collection_name` key in the `deprecations` dictionary is deprecated.', version='2.27')
+ deprecation.update(deprecator=deprecator_from_collection_name(collection_name))
+
+ deprecation = _messages.DeprecationSummary(
+ event=_messages.Event(
+ msg=deprecation.pop('msg'),
+ ),
+ **deprecation,
+ )
+ except Exception as ex:
+ display.error_as_warning("Task result `deprecations` contained an invalid item.", exception=ex)
+
+ warning_ctx.capture(deprecation)
+ else:
+ display.warning(f"Task result `deprecations` was {type(deprecations)} instead of {list}.")
+
def _poll_async_result(self, result, templar, task_vars=None):
"""
Polls for the specified JID to be complete
@@ -890,7 +860,7 @@ class TaskExecutor:
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
- templar=templar,
+ templar=Templar._from_template_engine(templar),
shared_loader_obj=self._shared_loader_obj,
)
@@ -902,12 +872,12 @@ class TaskExecutor:
async_result = async_handler.run(task_vars=task_vars)
# We do not bail out of the loop in cases where the failure
# is associated with a parsing error. The async_runner can
- # have issues which result in a half-written/unparseable result
+ # have issues which result in a half-written/unparsable result
# file on disk, which manifests to the user as a timeout happening
# before it's time to timeout.
- if (int(async_result.get('finished', 0)) == 1 or
- ('failed' in async_result and async_result.get('_ansible_parsed', False)) or
- 'skipped' in async_result):
+ if (async_result.get('finished', False) or
+ (async_result.get('failed', False) and async_result.get('_ansible_parsed', False)) or
+ async_result.get('skipped', False)):
break
except Exception as e:
# Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
@@ -928,19 +898,19 @@ class TaskExecutor:
time_left -= self._task.poll
self._final_q.send_callback(
'v2_runner_on_async_poll',
- TaskResult(
- self._host.name,
- async_task._uuid,
- async_result,
+ _RawTaskResult(
+ host=self._host,
+ task=async_task,
+ return_data=async_result,
task_fields=async_task.dump_attrs(),
),
)
- if int(async_result.get('finished', 0)) != 1:
+ if not async_result.get('finished', False):
if async_result.get('_ansible_parsed'):
return dict(failed=True, msg="async task did not complete within the requested time - %ss" % self._task.async_val, async_result=async_result)
else:
- return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
+ return dict(failed=True, msg="async task produced unparsable results", async_result=async_result)
else:
# If the async task finished, automatically cleanup the temporary
# status file left behind.
@@ -960,7 +930,7 @@ class TaskExecutor:
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
- templar=templar,
+ templar=Templar._from_template_engine(templar),
shared_loader_obj=self._shared_loader_obj,
)
cleanup_handler.run(task_vars=task_vars)
@@ -991,7 +961,7 @@ class TaskExecutor:
connection, plugin_load_context = self._shared_loader_obj.connection_loader.get_with_context(
conn_type,
self._play_context,
- self._new_stdin,
+ new_stdin=None, # No longer used, kept for backwards compat for plugins that explicitly accept this as an arg
task_uuid=self._task._uuid,
ansible_playbook_pid=to_text(os.getppid())
)
@@ -1057,7 +1027,11 @@ class TaskExecutor:
options = {}
for k in option_vars:
if k in variables:
- options[k] = templar.template(variables[k])
+ try:
+ options[k] = templar.template(variables[k])
+ except AnsibleValueOmittedError:
+ pass
+
# TODO move to task method?
plugin.set_options(task_keys=task_keys, var_options=options)
@@ -1072,18 +1046,6 @@ class TaskExecutor:
option_vars = C.config.get_plugin_vars('connection', self._connection._load_name)
varnames.extend(option_vars)
- # create dict of 'templated vars'
- options = {'_extras': {}}
- for k in option_vars:
- if k in variables:
- options[k] = templar.template(variables[k])
-
- # add extras if plugin supports them
- if getattr(self._connection, 'allow_extras', False):
- for k in variables:
- if k.startswith('ansible_%s_' % self._connection.extras_prefix) and k not in options:
- options['_extras'][k] = templar.template(variables[k])
-
task_keys = self._task.dump_attrs()
# The task_keys 'timeout' attr is the task's timeout, not the connection timeout.
@@ -1101,7 +1063,8 @@ class TaskExecutor:
del task_keys['retries']
# set options with 'templated vars' specific to this plugin and dependent ones
- self._connection.set_options(task_keys=task_keys, var_options=options)
+ var_options = self._connection._resolve_option_variables(variables, templar)
+ self._connection.set_options(task_keys=task_keys, var_options=var_options)
varnames.extend(self._set_plugin_options('shell', variables, templar, task_keys))
if self._connection.become is not None:
@@ -1139,7 +1102,7 @@ class TaskExecutor:
"""
return self._get_action_handler_with_module_context(templar)[0]
- def _get_action_handler_with_module_context(self, templar):
+ def _get_action_handler_with_module_context(self, templar: TemplateEngine):
"""
Returns the correct action plugin to handle the requestion task action and the module context
"""
@@ -1166,7 +1129,7 @@ class TaskExecutor:
# let action plugin override module, fallback to 'normal' action plugin otherwise
elif self._shared_loader_obj.action_loader.has_plugin(self._task.action, collection_list=collections):
handler_name = self._task.action
- elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))):
+ elif module_prefix in C.NETWORK_GROUP_MODULES and self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections):
handler_name = network_action
display.vvvv("Using network group action {handler} for {action}".format(handler=handler_name,
action=self._task.action),
@@ -1201,7 +1164,7 @@ class TaskExecutor:
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
- templar=templar,
+ templar=Templar._from_template_engine(templar),
shared_loader_obj=self._shared_loader_obj,
collection_list=collections
)
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
index 75f8a698612..c02f2b3a4f9 100644
--- a/lib/ansible/executor/task_queue_manager.py
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -17,6 +17,7 @@
from __future__ import annotations
+import dataclasses
import os
import sys
import tempfile
@@ -27,35 +28,43 @@ import multiprocessing.queues
from ansible import constants as C
from ansible import context
-from ansible.errors import AnsibleError
+from ansible.errors import AnsibleError, ExitCode, AnsibleCallbackError
+from ansible._internal._errors._handler import ErrorHandler
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.stats import AggregateStats
-from ansible.executor.task_result import TaskResult
-from ansible.module_utils.six import string_types
-from ansible.module_utils.common.text.converters import to_text, to_native
+from ansible.executor.task_result import _RawTaskResult, _WireTaskResult
+from ansible.inventory.data import InventoryData
+from ansible.module_utils.common.text.converters import to_native
+from ansible.parsing.dataloader import DataLoader
from ansible.playbook.play_context import PlayContext
from ansible.playbook.task import Task
from ansible.plugins.loader import callback_loader, strategy_loader, module_loader
from ansible.plugins.callback import CallbackBase
-from ansible.template import Templar
+from ansible._internal._templating._engine import TemplateEngine
from ansible.vars.hostvars import HostVars
-from ansible.vars.reserved import warn_if_reserved
+from ansible.vars.manager import VariableManager
from ansible.utils.display import Display
from ansible.utils.lock import lock_decorator
from ansible.utils.multiprocessing import context as multiprocessing_context
-from dataclasses import dataclass
+if t.TYPE_CHECKING:
+ from ansible.executor.process.worker import WorkerProcess
__all__ = ['TaskQueueManager']
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
display = Display()
+_T = t.TypeVar('_T')
+
+@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
class CallbackSend:
- def __init__(self, method_name, *args, **kwargs):
- self.method_name = method_name
- self.args = args
- self.kwargs = kwargs
+ method_name: str
+ wire_task_result: _WireTaskResult
class DisplaySend:
@@ -65,7 +74,7 @@ class DisplaySend:
self.kwargs = kwargs
-@dataclass
+@dataclasses.dataclass
class PromptSend:
worker_id: int
prompt: str
@@ -80,19 +89,11 @@ class FinalQueue(multiprocessing.queues.SimpleQueue):
kwargs['ctx'] = multiprocessing_context
super().__init__(*args, **kwargs)
- def send_callback(self, method_name, *args, **kwargs):
- self.put(
- CallbackSend(method_name, *args, **kwargs),
- )
+ def send_callback(self, method_name: str, task_result: _RawTaskResult) -> None:
+ self.put(CallbackSend(method_name=method_name, wire_task_result=task_result.as_wire_task_result()))
- def send_task_result(self, *args, **kwargs):
- if isinstance(args[0], TaskResult):
- tr = args[0]
- else:
- tr = TaskResult(*args, **kwargs)
- self.put(
- tr,
- )
+ def send_task_result(self, task_result: _RawTaskResult) -> None:
+ self.put(task_result.as_wire_task_result())
def send_display(self, method, *args, **kwargs):
self.put(
@@ -122,27 +123,37 @@ class TaskQueueManager:
which dispatches the Play's tasks to hosts.
"""
- RUN_OK = 0
- RUN_ERROR = 1
- RUN_FAILED_HOSTS = 2
- RUN_UNREACHABLE_HOSTS = 4
- RUN_FAILED_BREAK_PLAY = 8
- RUN_UNKNOWN_ERROR = 255
-
- def __init__(self, inventory, variable_manager, loader, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False, forks=None):
-
+ RUN_OK = ExitCode.SUCCESS
+ RUN_ERROR = ExitCode.GENERIC_ERROR
+ RUN_FAILED_HOSTS = ExitCode.HOST_FAILED
+ RUN_UNREACHABLE_HOSTS = ExitCode.HOST_UNREACHABLE
+ RUN_FAILED_BREAK_PLAY = 8 # never leaves PlaybookExecutor.run
+ RUN_UNKNOWN_ERROR = 255 # never leaves PlaybookExecutor.run, intentionally includes the bit value for 8
+
+ _callback_dispatch_error_handler = ErrorHandler.from_config('_CALLBACK_DISPATCH_ERROR_BEHAVIOR')
+
+ def __init__(
+ self,
+ inventory: InventoryData,
+ variable_manager: VariableManager,
+ loader: DataLoader,
+ passwords: dict[str, str | None],
+ stdout_callback_name: str | None = None,
+ run_additional_callbacks: bool = True,
+ run_tree: bool = False,
+ forks: int | None = None,
+ ) -> None:
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._stats = AggregateStats()
self.passwords = passwords
- self._stdout_callback = stdout_callback
+ self._stdout_callback_name: str | None = stdout_callback_name or C.DEFAULT_STDOUT_CALLBACK
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._forks = forks or 5
- self._callbacks_loaded = False
- self._callback_plugins = []
+ self._callback_plugins: list[CallbackBase] = []
self._start_at_done = False
# make sure any module paths (if specified) are added to the module_loader
@@ -155,25 +166,29 @@ class TaskQueueManager:
self._terminated = False
# dictionaries to keep track of failed/unreachable hosts
- self._failed_hosts = dict()
- self._unreachable_hosts = dict()
+ self._failed_hosts: dict[str, t.Literal[True]] = dict()
+ self._unreachable_hosts: dict[str, t.Literal[True]] = dict()
try:
self._final_q = FinalQueue()
except OSError as e:
raise AnsibleError("Unable to use multiprocessing, this is normally caused by lack of access to /dev/shm: %s" % to_native(e))
+ try:
+ # Done in tqm, and not display, because this is only needed for commands that execute tasks
+ for fd in (STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO):
+ os.set_inheritable(fd, False)
+ except Exception as ex:
+ self.warning(f"failed to set stdio as non inheritable: {ex}")
+
self._callback_lock = threading.Lock()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
- def _initialize_processes(self, num):
- self._workers = []
-
- for i in range(num):
- self._workers.append(None)
+ def _initialize_processes(self, num: int) -> None:
+ self._workers: list[WorkerProcess | None] = [None] * num
def load_callbacks(self):
"""
@@ -182,44 +197,40 @@ class TaskQueueManager:
only one such callback plugin will be loaded.
"""
- if self._callbacks_loaded:
+ if self._callback_plugins:
return
- stdout_callback_loaded = False
- if self._stdout_callback is None:
- self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
+ if not self._stdout_callback_name:
+ raise AnsibleError("No stdout callback name provided.")
- if isinstance(self._stdout_callback, CallbackBase):
- stdout_callback_loaded = True
- elif isinstance(self._stdout_callback, string_types):
- if self._stdout_callback not in callback_loader:
- raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
- else:
- self._stdout_callback = callback_loader.get(self._stdout_callback)
- self._stdout_callback.set_options()
- stdout_callback_loaded = True
- else:
- raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
+ stdout_callback = callback_loader.get(self._stdout_callback_name)
+
+ if not stdout_callback:
+ raise AnsibleError(f"Could not load {self._stdout_callback_name!r} callback plugin.")
+
+ stdout_callback._init_callback_methods()
+ stdout_callback.set_options()
+
+ self._callback_plugins.append(stdout_callback)
# get all configured loadable callbacks (adjacent, builtin)
- callback_list = list(callback_loader.all(class_only=True))
+ plugin_types = {plugin_type.ansible_name: plugin_type for plugin_type in callback_loader.all(class_only=True)}
# add enabled callbacks that refer to collections, which might not appear in normal listing
for c in C.CALLBACKS_ENABLED:
# load all, as collection ones might be using short/redirected names and not a fqcn
plugin = callback_loader.get(c, class_only=True)
- # TODO: check if this skip is redundant, loader should handle bad file/plugin cases already
if plugin:
# avoids incorrect and dupes possible due to collections
- if plugin not in callback_list:
- callback_list.append(plugin)
+ plugin_types.setdefault(plugin.ansible_name, plugin)
else:
display.warning("Skipping callback plugin '%s', unable to load" % c)
- # for each callback in the list see if we should add it to 'active callbacks' used in the play
- for callback_plugin in callback_list:
+ plugin_types.pop(stdout_callback.ansible_name, None)
+ # for each callback in the list see if we should add it to 'active callbacks' used in the play
+ for callback_plugin in plugin_types.values():
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False))
@@ -235,10 +246,8 @@ class TaskQueueManager:
display.vvvvv("Attempting to use '%s' callback." % (callback_name))
if callback_type == 'stdout':
# we only allow one callback of type 'stdout' to be loaded,
- if callback_name != self._stdout_callback or stdout_callback_loaded:
- display.vv("Skipping callback '%s', as we already have a stdout callback." % (callback_name))
- continue
- stdout_callback_loaded = True
+ display.vv("Skipping callback '%s', as we already have a stdout callback." % (callback_name))
+ continue
elif callback_name == 'tree' and self._run_tree:
# TODO: remove special case for tree, which is an adhoc cli option --tree
pass
@@ -253,21 +262,16 @@ class TaskQueueManager:
# avoid bad plugin not returning an object, only needed cause we do class_only load and bypass loader checks,
# really a bug in the plugin itself which we ignore as callback errors are not supposed to be fatal.
if callback_obj:
- # skip initializing if we already did the work for the same plugin (even with diff names)
- if callback_obj not in self._callback_plugins:
- callback_obj.set_options()
- self._callback_plugins.append(callback_obj)
- else:
- display.vv("Skipping callback '%s', already loaded as '%s'." % (callback_plugin, callback_name))
+ callback_obj._init_callback_methods()
+ callback_obj.set_options()
+ self._callback_plugins.append(callback_obj)
else:
display.warning("Skipping callback '%s', as it does not create a valid plugin instance." % callback_name)
continue
- except Exception as e:
- display.warning("Skipping callback '%s', unable to load due to: %s" % (callback_name, to_native(e)))
+ except Exception as ex:
+ display.warning_as_error(f"Failed to load callback plugin {callback_name!r}.", exception=ex)
continue
- self._callbacks_loaded = True
-
def run(self, play):
"""
Iterates over the roles/tasks in a play, using the given (or default)
@@ -277,12 +281,10 @@ class TaskQueueManager:
are done with the current task).
"""
- if not self._callbacks_loaded:
- self.load_callbacks()
+ self.load_callbacks()
all_vars = self._variable_manager.get_vars(play=play)
- templar = Templar(loader=self._loader, variables=all_vars)
- warn_if_reserved(all_vars, templar.environment.globals.keys())
+ templar = TemplateEngine(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
@@ -295,13 +297,9 @@ class TaskQueueManager:
)
play_context = PlayContext(new_play, self.passwords, self._connection_lockfile.fileno())
- if (self._stdout_callback and
- hasattr(self._stdout_callback, 'set_play_context')):
- self._stdout_callback.set_play_context(play_context)
for callback_plugin in self._callback_plugins:
- if hasattr(callback_plugin, 'set_play_context'):
- callback_plugin.set_play_context(play_context)
+ callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
@@ -385,25 +383,25 @@ class TaskQueueManager:
except AttributeError:
pass
- def clear_failed_hosts(self):
+ def clear_failed_hosts(self) -> None:
self._failed_hosts = dict()
- def get_inventory(self):
+ def get_inventory(self) -> InventoryData:
return self._inventory
- def get_variable_manager(self):
+ def get_variable_manager(self) -> VariableManager:
return self._variable_manager
- def get_loader(self):
+ def get_loader(self) -> DataLoader:
return self._loader
def get_workers(self):
return self._workers[:]
- def terminate(self):
+ def terminate(self) -> None:
self._terminated = True
- def has_dead_workers(self):
+ def has_dead_workers(self) -> bool:
# [,
#
@@ -414,56 +412,54 @@ class TaskQueueManager:
defunct = True
return defunct
+ @staticmethod
+ def _first_arg_of_type(value_type: t.Type[_T], args: t.Sequence) -> _T | None:
+ return next((arg for arg in args if isinstance(arg, value_type)), None)
+
@lock_decorator(attr='_callback_lock')
def send_callback(self, method_name, *args, **kwargs):
- for callback_plugin in [self._stdout_callback] + self._callback_plugins:
+ # We always send events to stdout callback first, rest should follow config order
+ for callback_plugin in self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
- if getattr(callback_plugin, 'disabled', False):
+ if callback_plugin.disabled:
continue
# a plugin can opt in to implicit tasks (such as meta). It does this
# by declaring self.wants_implicit_tasks = True.
- wants_implicit_tasks = getattr(callback_plugin, 'wants_implicit_tasks', False)
+ if not callback_plugin.wants_implicit_tasks and (task_arg := self._first_arg_of_type(Task, args)) and task_arg.implicit:
+ continue
- # try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
- for possible in [method_name, 'v2_on_any']:
- gotit = getattr(callback_plugin, possible, None)
- if gotit is None:
- gotit = getattr(callback_plugin, possible.removeprefix('v2_'), None)
- if gotit is not None:
- methods.append(gotit)
-
- # send clean copies
- new_args = []
-
- # If we end up being given an implicit task, we'll set this flag in
- # the loop below. If the plugin doesn't care about those, then we
- # check and continue to the next iteration of the outer loop.
- is_implicit_task = False
-
- for arg in args:
- # FIXME: add play/task cleaners
- if isinstance(arg, TaskResult):
- new_args.append(arg.clean_copy())
- # elif isinstance(arg, Play):
- # elif isinstance(arg, Task):
- else:
- new_args.append(arg)
- if isinstance(arg, Task) and arg.implicit:
- is_implicit_task = True
+ if method_name in callback_plugin._implemented_callback_methods:
+ methods.append(getattr(callback_plugin, method_name))
- if is_implicit_task and not wants_implicit_tasks:
- continue
+ if 'v2_on_any' in callback_plugin._implemented_callback_methods:
+ methods.append(getattr(callback_plugin, 'v2_on_any'))
for method in methods:
- try:
- method(*new_args, **kwargs)
- except Exception as e:
- # TODO: add config toggle to make this fatal or not?
- display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_text(method_name), to_text(callback_plugin), to_text(e)))
- from traceback import format_tb
- from sys import exc_info
- display.vvv('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
+ # send clean copies
+ new_args = []
+
+ for arg in args:
+ # FIXME: add play/task cleaners
+ if isinstance(arg, _RawTaskResult):
+ copied_tr = arg.as_callback_task_result()
+ new_args.append(copied_tr)
+ # this state hack requires that no callback ever accepts > 1 TaskResult object
+ callback_plugin._current_task_result = copied_tr
+ else:
+ new_args.append(arg)
+
+ with self._callback_dispatch_error_handler.handle(AnsibleCallbackError):
+ try:
+ method(*new_args, **kwargs)
+ except AssertionError:
+ # Using an `assert` in integration tests is useful.
+ # Production code should never use `assert` or raise `AssertionError`.
+ raise
+ except Exception as ex:
+ raise AnsibleCallbackError(f"Callback dispatch {method_name!r} failed for plugin {callback_plugin._load_name!r}.") from ex
+
+ callback_plugin._current_task_result = None # clear temporary instance storage hack
diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
index 06e9af72e3c..b973a6a2796 100644
--- a/lib/ansible/executor/task_result.py
+++ b/lib/ansible/executor/task_result.py
@@ -4,78 +4,148 @@
from __future__ import annotations
-from ansible import constants as C
-from ansible.parsing.dataloader import DataLoader
+import collections.abc as _c
+import dataclasses
+import functools
+import typing as t
+
+from ansible import constants
+from ansible.utils import vars as _vars
from ansible.vars.clean import module_response_deepcopy, strip_internal_keys
+from ansible.module_utils._internal import _messages
+from ansible._internal import _collection_proxy
+
+if t.TYPE_CHECKING:
+ from ansible.inventory.host import Host
+ from ansible.playbook.task import Task
_IGNORE = ('failed', 'skipped')
-_PRESERVE = ('attempts', 'changed', 'retries')
-_SUB_PRESERVE = {'_ansible_delegated_vars': ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection')}
+_PRESERVE = {'attempts', 'changed', 'retries', '_ansible_no_log', 'exception', 'warnings', 'deprecations'}
+_SUB_PRESERVE = {'_ansible_delegated_vars': {'ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection'}}
# stuff callbacks need
CLEAN_EXCEPTIONS = (
'_ansible_verbose_always', # for debug and other actions, to always expand data (pretty jsonification)
'_ansible_item_label', # to know actual 'item' variable
- '_ansible_no_log', # jic we didnt clean up well enough, DON'T LOG
+ '_ansible_no_log', # jic we didn't clean up well enough, DON'T LOG
'_ansible_verbose_override', # controls display of ansible_facts, gathering would be very noise with -v otherwise
)
-class TaskResult:
+@t.final
+@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
+class _WireTaskResult:
+ """A thin version of `_RawTaskResult` which can be sent over the worker queue."""
+
+ host_name: str
+ task_uuid: str
+ return_data: _c.MutableMapping[str, object]
+ task_fields: _c.Mapping[str, object]
+
+
+class _BaseTaskResult:
"""
This class is responsible for interpreting the resulting data
from an executed task, and provides helper methods for determining
the result of a given task.
"""
- def __init__(self, host, task, return_data, task_fields=None):
- self._host = host
- self._task = task
+ def __init__(self, host: Host, task: Task, return_data: _c.MutableMapping[str, t.Any], task_fields: _c.Mapping[str, t.Any]) -> None:
+ self.__host = host
+ self.__task = task
+ self._return_data = return_data # FIXME: this should be immutable, but strategy result processing mutates it in some corner cases
+ self.__task_fields = task_fields
- if isinstance(return_data, dict):
- self._result = return_data.copy()
- else:
- self._result = DataLoader().load(return_data)
+ @property
+ def host(self) -> Host:
+ """The host associated with this result."""
+ return self.__host
- if task_fields is None:
- self._task_fields = dict()
- else:
- self._task_fields = task_fields
+ @property
+ def _host(self) -> Host:
+ """Use the `host` property when supporting only ansible-core 2.19 or later."""
+ # deprecated: description='Deprecate `_host` in favor of `host`' core_version='2.23'
+ return self.__host
+
+ @property
+ def task(self) -> Task:
+ """The task associated with this result."""
+ return self.__task
+
+ @property
+ def _task(self) -> Task:
+ """Use the `task` property when supporting only ansible-core 2.19 or later."""
+ # deprecated: description='Deprecate `_task` in favor of `task`' core_version='2.23'
+ return self.__task
+
+ @property
+ def task_fields(self) -> _c.Mapping[str, t.Any]:
+ """The task fields associated with this result."""
+ return self.__task_fields
+
+ @property
+ def _task_fields(self) -> _c.Mapping[str, t.Any]:
+ """Use the `task_fields` property when supporting only ansible-core 2.19 or later."""
+ # deprecated: description='Deprecate `_task_fields` in favor of `task`' core_version='2.23'
+ return self.__task_fields
+
+ @property
+ def exception(self) -> _messages.ErrorSummary | None:
+ """The error from this task result, if any."""
+ return self._return_data.get('exception')
+
+ @property
+ def warnings(self) -> _c.Sequence[_messages.WarningSummary]:
+ """The warnings for this task, if any."""
+ return _collection_proxy.SequenceProxy(self._return_data.get('warnings') or [])
@property
- def task_name(self):
- return self._task_fields.get('name', None) or self._task.get_name()
+ def deprecations(self) -> _c.Sequence[_messages.DeprecationSummary]:
+ """The deprecation warnings for this task, if any."""
+ return _collection_proxy.SequenceProxy(self._return_data.get('deprecations') or [])
+
+ @property
+ def _loop_results(self) -> list[_c.MutableMapping[str, t.Any]]:
+ """Return a list of loop results. If no loop results are present, an empty list is returned."""
+ results = self._return_data.get('results')
+
+ if not isinstance(results, list):
+ return []
- def is_changed(self):
+ return results
+
+ @property
+ def task_name(self) -> str:
+ return str(self.task_fields.get('name', '')) or self.task.get_name()
+
+ def is_changed(self) -> bool:
return self._check_key('changed')
- def is_skipped(self):
- # loop results
- if 'results' in self._result:
- results = self._result['results']
+ def is_skipped(self) -> bool:
+ if self._loop_results:
# Loop tasks are only considered skipped if all items were skipped.
# some squashed results (eg, dnf) are not dicts and can't be skipped individually
- if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results):
+ if all(isinstance(loop_res, dict) and loop_res.get('skipped', False) for loop_res in self._loop_results):
return True
# regular tasks and squashed non-dict results
- return self._result.get('skipped', False)
+ return bool(self._return_data.get('skipped', False))
- def is_failed(self):
- if 'failed_when_result' in self._result or \
- 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
+ def is_failed(self) -> bool:
+ if 'failed_when_result' in self._return_data or any(isinstance(loop_res, dict) and 'failed_when_result' in loop_res for loop_res in self._loop_results):
return self._check_key('failed_when_result')
- else:
- return self._check_key('failed')
- def is_unreachable(self):
+ return self._check_key('failed')
+
+ def is_unreachable(self) -> bool:
return self._check_key('unreachable')
- def needs_debugger(self, globally_enabled=False):
- _debugger = self._task_fields.get('debugger')
- _ignore_errors = C.TASK_DEBUGGER_IGNORE_ERRORS and self._task_fields.get('ignore_errors')
+ def needs_debugger(self, globally_enabled: bool = False) -> bool:
+ _debugger = self.task_fields.get('debugger')
+ _ignore_errors = constants.TASK_DEBUGGER_IGNORE_ERRORS and self.task_fields.get('ignore_errors')
ret = False
+
if globally_enabled and ((self.is_failed() and not _ignore_errors) or self.is_unreachable()):
ret = True
@@ -92,62 +162,95 @@ class TaskResult:
return ret
- def _check_key(self, key):
- """get a specific key from the result or its items"""
+ def _check_key(self, key: str) -> bool:
+ """Fetch a specific named boolean value from the result; if missing, a logical OR of the value from nested loop results; False for non-loop results."""
+ if (value := self._return_data.get(key, ...)) is not ...:
+ return bool(value)
- if isinstance(self._result, dict) and key in self._result:
- return self._result.get(key, False)
- else:
- flag = False
- for res in self._result.get('results', []):
- if isinstance(res, dict):
- flag |= res.get(key, False)
- return flag
+ return any(isinstance(result, dict) and result.get(key) for result in self._loop_results)
- def clean_copy(self):
- """ returns 'clean' taskresult object """
+@t.final
+class _RawTaskResult(_BaseTaskResult):
+ def as_wire_task_result(self) -> _WireTaskResult:
+ """Return a `_WireTaskResult` from this instance."""
+ return _WireTaskResult(
+ host_name=self.host.name,
+ task_uuid=self.task._uuid,
+ return_data=self._return_data,
+ task_fields=self.task_fields,
+ )
- # FIXME: clean task_fields, _task and _host copies
- result = TaskResult(self._host, self._task, {}, self._task_fields)
+ def as_callback_task_result(self) -> CallbackTaskResult:
+ """Return a `CallbackTaskResult` from this instance."""
+ ignore: tuple[str, ...]
# statuses are already reflected on the event type
- if result._task and result._task.action in C._ACTION_DEBUG:
+ if self.task and self.task.action in constants._ACTION_DEBUG:
# debug is verbose by default to display vars, no need to add invocation
ignore = _IGNORE + ('invocation',)
else:
ignore = _IGNORE
- subset = {}
+ subset: dict[str, dict[str, object]] = {}
+
# preserve subset for later
- for sub in _SUB_PRESERVE:
- if sub in self._result:
- subset[sub] = {}
- for key in _SUB_PRESERVE[sub]:
- if key in self._result[sub]:
- subset[sub][key] = self._result[sub][key]
-
- if isinstance(self._task.no_log, bool) and self._task.no_log or self._result.get('_ansible_no_log', False):
- x = {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}
-
- # preserve full
- for preserve in _PRESERVE:
- if preserve in self._result:
- x[preserve] = self._result[preserve]
-
- result._result = x
- elif self._result:
- result._result = module_response_deepcopy(self._result)
-
- # actually remove
- for remove_key in ignore:
- if remove_key in result._result:
- del result._result[remove_key]
+ for sub, sub_keys in _SUB_PRESERVE.items():
+ sub_data = self._return_data.get(sub)
+
+ if isinstance(sub_data, dict):
+ subset[sub] = {key: value for key, value in sub_data.items() if key in sub_keys}
+
+ # DTFIX-FUTURE: is checking no_log here redundant now that we use _ansible_no_log everywhere?
+ if isinstance(self.task.no_log, bool) and self.task.no_log or self._return_data.get('_ansible_no_log'):
+ censored_result = censor_result(self._return_data)
+
+ if self._loop_results:
+ # maintain shape for loop results so callback behavior recognizes a loop was performed
+ censored_result.update(results=[
+ censor_result(loop_res) if isinstance(loop_res, dict) and loop_res.get('_ansible_no_log') else loop_res for loop_res in self._loop_results
+ ])
+
+ return_data = censored_result
+ elif self._return_data:
+ return_data = {k: v for k, v in module_response_deepcopy(self._return_data).items() if k not in ignore}
# remove almost ALL internal keys, keep ones relevant to callback
- strip_internal_keys(result._result, exceptions=CLEAN_EXCEPTIONS)
+ strip_internal_keys(return_data, exceptions=CLEAN_EXCEPTIONS)
+ else:
+ return_data = {}
# keep subset
- result._result.update(subset)
+ return_data.update(subset)
+
+ return CallbackTaskResult(self.host, self.task, return_data, self.task_fields)
+
+
+@t.final
+class CallbackTaskResult(_BaseTaskResult):
+ """Public contract of TaskResult """
+
+ @property
+ def _result(self) -> _c.MutableMapping[str, t.Any]:
+ """Use the `result` property when supporting only ansible-core 2.19 or later."""
+ # deprecated: description='Deprecate `_result` in favor of `result`' core_version='2.23'
+ return self.result
+
+ @functools.cached_property
+ def result(self) -> _c.MutableMapping[str, t.Any]:
+ """
+ Returns a cached copy of the task result dictionary for consumption by callbacks.
+ Internal custom types are transformed to native Python types to facilitate access and serialization.
+ """
+ return t.cast(_c.MutableMapping[str, t.Any], _vars.transform_to_native_types(self._return_data))
+
+
+TaskResult = CallbackTaskResult
+"""Compatibility name for the pre-2.19 callback-shaped TaskResult passed to callbacks."""
+
+
+def censor_result(result: _c.Mapping[str, t.Any]) -> dict[str, t.Any]:
+ censored_result = {key: value for key in _PRESERVE if (value := result.get(key, ...)) is not ...}
+ censored_result.update(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result")
- return result
+ return censored_result
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
index 6765b087b35..0a737c46ffb 100644
--- a/lib/ansible/galaxy/api.py
+++ b/lib/ansible/galaxy/api.py
@@ -57,13 +57,13 @@ def should_retry_error(exception):
if isinstance(exception, GalaxyError) and exception.http_code in RETRY_HTTP_ERROR_CODES:
return True
- if isinstance(exception, AnsibleError) and (orig_exc := getattr(exception, 'orig_exc', None)):
+ if isinstance(exception, AnsibleError) and (cause := exception.__cause__):
# URLError is often a proxy for an underlying error, handle wrapped exceptions
- if isinstance(orig_exc, URLError):
- orig_exc = orig_exc.reason
+ if isinstance(cause, URLError):
+ cause = cause.reason
# Handle common URL related errors
- if isinstance(orig_exc, (TimeoutError, BadStatusLine, IncompleteRead)):
+ if isinstance(cause, (TimeoutError, BadStatusLine, IncompleteRead)):
return True
return False
@@ -92,7 +92,7 @@ def g_connect(versions):
try:
data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
except (AnsibleError, GalaxyError, ValueError, KeyError) as err:
- # Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API
+ # Either the URL doesn't exist, or other error. Or the URL exists, but isn't a galaxy API
# root (not JSON, no 'available_versions') so try appending '/api/'
if n_url.endswith('/api') or n_url.endswith('/api/'):
raise
@@ -138,7 +138,7 @@ def g_connect(versions):
'The v2 Ansible Galaxy API is deprecated and no longer supported. '
'Ensure that you have configured the ansible-galaxy CLI to utilize an '
'updated and supported version of Ansible Galaxy.',
- version='2.20'
+ version='2.20',
)
return method(self, *args, **kwargs)
@@ -337,10 +337,7 @@ class GalaxyAPI:
if not isinstance(other_galaxy_api, self.__class__):
return NotImplemented
- return (
- self._priority > other_galaxy_api._priority or
- self.name < self.name
- )
+ return self._priority > other_galaxy_api._priority
@property # type: ignore[misc] # https://github.com/python/mypy/issues/1362
@g_connect(['v1', 'v2', 'v3'])
@@ -408,11 +405,8 @@ class GalaxyAPI:
method=method, timeout=self._server_timeout, http_agent=user_agent(), follow_redirects='safe')
except HTTPError as e:
raise GalaxyError(e, error_context_msg)
- except Exception as e:
- raise AnsibleError(
- "Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e)),
- orig_exc=e
- )
+ except Exception as ex:
+ raise AnsibleError(f"Unknown error when attempting to call Galaxy at {url!r}.") from ex
resp_data = to_text(resp.read(), errors='surrogate_or_strict')
try:
@@ -471,8 +465,8 @@ class GalaxyAPI:
resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST", http_agent=user_agent(), timeout=self._server_timeout)
except HTTPError as e:
raise GalaxyError(e, 'Attempting to authenticate to galaxy')
- except Exception as e:
- raise AnsibleError('Unable to authenticate to galaxy: %s' % to_native(e), orig_exc=e)
+ except Exception as ex:
+ raise AnsibleError('Unable to authenticate to galaxy.') from ex
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
return data
@@ -817,8 +811,17 @@ class GalaxyAPI:
signatures = data.get('signatures') or []
+ download_url_info = urlparse(data['download_url'])
+ if not download_url_info.scheme and not download_url_info.path.startswith('/'):
+ # galaxy does a lot of redirects, with much more complex pathing than we use
+ # within this codebase, without updating _call_galaxy to be able to return
+ # the final URL, we can't reliably build a relative URL.
+ raise AnsibleError(f'Invalid non absolute download_url: {data["download_url"]}')
+
+ download_url = urljoin(self.api_server, data['download_url'])
+
return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],
- data['download_url'], data['artifact']['sha256'],
+ download_url, data['artifact']['sha256'],
data['metadata']['dependencies'], data['href'], signatures)
@g_connect(['v2', 'v3'])
@@ -874,7 +877,7 @@ class GalaxyAPI:
except GalaxyError as err:
if err.http_code != 404:
raise
- # v3 doesn't raise a 404 so we need to mimick the empty response from APIs that do.
+ # v3 doesn't raise a 404 so we need to mimic the empty response from APIs that do.
return []
if 'data' in data:
@@ -896,12 +899,10 @@ class GalaxyAPI:
if not next_link:
break
elif relative_link:
- # TODO: This assumes the pagination result is relative to the root server. Will need to be verified
- # with someone who knows the AH API.
-
- # Remove the query string from the versions_url to use the next_link's query
- versions_url = urljoin(versions_url, urlparse(versions_url).path)
- next_link = versions_url.replace(versions_url_info.path, next_link)
+ next_link_info = urlparse(next_link)
+ if not next_link_info.scheme and not next_link_info.path.startswith('/'):
+ raise AnsibleError(f'Invalid non absolute pagination link: {next_link}')
+ next_link = urljoin(self.api_server, next_link)
data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'),
error_context_msg=error_context_msg, cache=True, cache_key=cache_key)
diff --git a/lib/ansible/galaxy/collection/__init__.py b/lib/ansible/galaxy/collection/__init__.py
index 829f7aa19d2..3bc0f27ee57 100644
--- a/lib/ansible/galaxy/collection/__init__.py
+++ b/lib/ansible/galaxy/collection/__init__.py
@@ -5,7 +5,6 @@
from __future__ import annotations
-import errno
import fnmatch
import functools
import glob
@@ -31,6 +30,7 @@ from dataclasses import dataclass
from hashlib import sha256
from io import BytesIO
from importlib.metadata import distribution
+from importlib.resources import files
from itertools import chain
try:
@@ -85,7 +85,6 @@ if t.TYPE_CHECKING:
FilesManifestType = t.Dict[t.Literal['files', 'format'], t.Union[t.List[FileManifestEntryType], int]]
import ansible.constants as C
-from ansible.compat.importlib_resources import files
from ansible.errors import AnsibleError
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection.concrete_artifact_manager import (
@@ -201,9 +200,9 @@ class CollectionSignatureError(Exception):
# FUTURE: expose actual verify result details for a collection on this object, maybe reimplement as dataclass on py3.8+
class CollectionVerifyResult:
- def __init__(self, collection_name): # type: (str) -> None
- self.collection_name = collection_name # type: str
- self.success = True # type: bool
+ def __init__(self, collection_name: str) -> None:
+ self.collection_name = collection_name
+ self.success = True
def verify_local_collection(local_collection, remote_collection, artifacts_manager):
@@ -1433,9 +1432,6 @@ def find_existing_collections(path_filter, artifacts_manager, namespace_filter=N
:param path: Collection dirs layout search path.
:param artifacts_manager: Artifacts manager.
"""
- if files is None:
- raise AnsibleError('importlib_resources is not installed and is required')
-
if path_filter and not is_sequence(path_filter):
path_filter = [path_filter]
if namespace_filter and not is_sequence(namespace_filter):
@@ -1692,11 +1688,7 @@ def _extract_tar_dir(tar, dirname, b_dest):
b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict'))
b_parent_path = os.path.dirname(b_dir_path)
- try:
- os.makedirs(b_parent_path, mode=S_IRWXU_RXG_RXO)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
+ os.makedirs(b_parent_path, mode=S_IRWXU_RXG_RXO, exist_ok=True)
if tar_member.type == tarfile.SYMTYPE:
b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
diff --git a/lib/ansible/galaxy/collection/concrete_artifact_manager.py b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
index fb807766f5c..1659bc46b49 100644
--- a/lib/ansible/galaxy/collection/concrete_artifact_manager.py
+++ b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
@@ -449,9 +449,9 @@ def _extract_collection_from_git(repo_url, coll_ver, b_path):
except subprocess.CalledProcessError as proc_err:
raise AnsibleError( # should probably be LookupError
'Failed to switch a cloned Git repo `{repo_url!s}` '
- 'to the requested revision `{commitish!s}`.'.
+ 'to the requested revision `{revision!s}`.'.
format(
- commitish=to_native(version),
+ revision=to_native(version),
repo_url=to_native(git_url),
),
) from proc_err
@@ -485,16 +485,13 @@ def _download_file(url, b_path, expected_hash, validate_certs, token=None, timeo
display.display("Downloading %s to %s" % (url, to_text(b_tarball_dir)))
# NOTE: Galaxy redirects downloads to S3 which rejects the request
# NOTE: if an Authorization header is attached so don't redirect it
- try:
- resp = open_url(
- to_native(url, errors='surrogate_or_strict'),
- validate_certs=validate_certs,
- headers=None if token is None else token.headers(),
- unredirected_headers=['Authorization'], http_agent=user_agent(),
- timeout=timeout
- )
- except Exception as err:
- raise AnsibleError(to_native(err), orig_exc=err)
+ resp = open_url(
+ to_native(url, errors='surrogate_or_strict'),
+ validate_certs=validate_certs,
+ headers=None if token is None else token.headers(),
+ unredirected_headers=['Authorization'], http_agent=user_agent(),
+ timeout=timeout
+ )
with open(b_file_path, 'wb') as download_file: # type: t.BinaryIO
actual_hash = _consume_file(resp, write_to=download_file)
@@ -659,14 +656,8 @@ def _get_json_from_installed_dir(
try:
with open(b_json_filepath, 'rb') as manifest_fd:
b_json_text = manifest_fd.read()
- except (IOError, OSError):
- raise LookupError(
- "The collection {manifest!s} path '{path!s}' does not exist.".
- format(
- manifest=filename,
- path=to_native(b_json_filepath),
- )
- )
+ except OSError as ex:
+ raise LookupError(f"The collection {filename!r} path {to_text(b_json_filepath)!r} does not exist.") from ex
manifest_txt = to_text(b_json_text, errors='surrogate_or_strict')
diff --git a/lib/ansible/galaxy/dependency_resolution/dataclasses.py b/lib/ansible/galaxy/dependency_resolution/dataclasses.py
index ea4c875adb4..9877efdfc38 100644
--- a/lib/ansible/galaxy/dependency_resolution/dataclasses.py
+++ b/lib/ansible/galaxy/dependency_resolution/dataclasses.py
@@ -7,6 +7,7 @@
from __future__ import annotations
import os
+import pathlib
import typing as t
from collections import namedtuple
@@ -25,6 +26,8 @@ if t.TYPE_CHECKING:
'_ComputedReqKindsMixin',
)
+import ansible
+import ansible.release
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.galaxy.api import GalaxyAPI
@@ -39,6 +42,7 @@ _ALLOW_CONCRETE_POINTER_IN_SOURCE = False # NOTE: This is a feature flag
_GALAXY_YAML = b'galaxy.yml'
_MANIFEST_JSON = b'MANIFEST.json'
_SOURCE_METADATA_FILE = b'GALAXY.yml'
+_ANSIBLE_PACKAGE_PATH = pathlib.Path(ansible.__file__).parent
display = Display()
@@ -224,6 +228,13 @@ class _ComputedReqKindsMixin:
if dir_path.endswith(to_bytes(os.path.sep)):
dir_path = dir_path.rstrip(to_bytes(os.path.sep))
if not _is_collection_dir(dir_path):
+ dir_pathlib = pathlib.Path(to_text(dir_path))
+
+ # special handling for bundled collections without manifests, e.g., ansible._protomatter
+ if dir_pathlib.is_relative_to(_ANSIBLE_PACKAGE_PATH):
+ req_name = f'{dir_pathlib.parent.name}.{dir_pathlib.name}'
+ return cls(req_name, ansible.release.__version__, dir_path, 'dir', None)
+
display.warning(
u"Collection at '{path!s}' does not have a {manifest_json!s} "
u'file, nor has it {galaxy_yml!s}: cannot detect version.'.
@@ -578,10 +589,9 @@ class _ComputedReqKindsMixin:
See https://github.com/ansible/ansible/pull/81606 for extra context.
"""
- version_string = self.ver[0]
- return version_string.isdigit() or not (
- version_string == '*' or
- version_string.startswith(('<', '>', '!='))
+ version_spec_start_char = self.ver[0]
+ return version_spec_start_char.isdigit() or not (
+ version_spec_start_char.startswith(('<', '>', '!', '*'))
)
@property
diff --git a/lib/ansible/galaxy/dependency_resolution/providers.py b/lib/ansible/galaxy/dependency_resolution/providers.py
index 7578cae785c..5f602b8242c 100644
--- a/lib/ansible/galaxy/dependency_resolution/providers.py
+++ b/lib/ansible/galaxy/dependency_resolution/providers.py
@@ -39,7 +39,7 @@ except ImportError:
# TODO: add python requirements to ansible-test's ansible-core distribution info and remove the hardcoded lowerbound/upperbound fallback
RESOLVELIB_LOWERBOUND = SemanticVersion("0.5.3")
-RESOLVELIB_UPPERBOUND = SemanticVersion("1.1.0")
+RESOLVELIB_UPPERBOUND = SemanticVersion("2.0.0")
RESOLVELIB_VERSION = SemanticVersion.from_loose_version(LooseVersion(resolvelib_version))
@@ -148,7 +148,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
:param resolutions: Mapping of identifier, candidate pairs.
- :param candidates: Possible candidates for the identifer.
+ :param candidates: Possible candidates for the identifier.
Mapping of identifier, list of candidate pairs.
:param information: Requirement information of each package.
@@ -158,7 +158,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
:param backtrack_causes: Sequence of requirement information that were
the requirements that caused the resolver to most recently backtrack.
- The preference could depend on a various of issues, including
+ The preference could depend on various of issues, including
(not necessarily in this order):
* Is this package pinned in the current resolution result?
@@ -404,7 +404,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
:param requirement: A requirement that produced the `candidate`.
- :param candidate: A pinned candidate supposedly matchine the \
+ :param candidate: A pinned candidate supposedly matching the \
`requirement` specifier. It is guaranteed to \
have been generated from the `requirement`.
diff --git a/lib/ansible/galaxy/dependency_resolution/reporters.py b/lib/ansible/galaxy/dependency_resolution/reporters.py
index a9da75a8674..69c4444036b 100644
--- a/lib/ansible/galaxy/dependency_resolution/reporters.py
+++ b/lib/ansible/galaxy/dependency_resolution/reporters.py
@@ -5,12 +5,46 @@
from __future__ import annotations
+from collections import defaultdict
+
try:
from resolvelib import BaseReporter
except ImportError:
class BaseReporter: # type: ignore[no-redef]
pass
+try:
+ from resolvelib.resolvers import Criterion
+except ImportError:
+ class Criterion: # type: ignore[no-redef]
+ pass
+
+from ansible.utils.display import Display
+from .dataclasses import Candidate, Requirement
+
+
+display = Display()
+
+
+_CLI_APP_NAME = 'ansible-galaxy'
+_MESSAGES_AT_REJECT_COUNT = {
+ 1: (
+ f'{_CLI_APP_NAME} is looking at multiple versions of {{fqcn}} to '
+ 'determine which version is compatible with other '
+ 'requirements. This could take a while.'
+ ),
+ 8: (
+ f'{_CLI_APP_NAME} is looking at multiple versions of {{fqcn}} to '
+ 'determine which version is compatible with other '
+ 'requirements. This could take a while.'
+ ),
+ 13: (
+ 'This is taking longer than usual. You might need to provide '
+ 'the dependency resolver with stricter constraints to reduce '
+ 'runtime. If you want to abort this run, press Ctrl + C.'
+ ),
+}
+
class CollectionDependencyReporter(BaseReporter):
"""A dependency reporter for Ansible Collections.
@@ -18,3 +52,50 @@ class CollectionDependencyReporter(BaseReporter):
This is a proxy class allowing us to abstract away importing resolvelib
outside of the `ansible.galaxy.dependency_resolution` Python package.
"""
+
+ def __init__(self) -> None:
+ """Initialize the collection rejection counter."""
+ super().__init__()
+
+ self.reject_count_by_fqcn: defaultdict[str, int] = defaultdict(int)
+
+ def _maybe_log_rejection_message(self, candidate: Candidate) -> bool:
+ """Print out rejection messages on pre-defined limit hits."""
+ # Inspired by https://github.com/pypa/pip/commit/9731131
+ self.reject_count_by_fqcn[candidate.fqcn] += 1
+
+ collection_rejections_count = self.reject_count_by_fqcn[candidate.fqcn]
+
+ if collection_rejections_count not in _MESSAGES_AT_REJECT_COUNT:
+ return False
+
+ collection_rejection_message = _MESSAGES_AT_REJECT_COUNT[
+ collection_rejections_count
+ ]
+ display.display(collection_rejection_message.format(fqcn=candidate.fqcn))
+
+ return True
+
+ def rejecting_candidate( # resolvelib >= 0.9.0
+ self,
+ criterion: Criterion[Candidate, Requirement],
+ candidate: Candidate,
+ ) -> None:
+ """Augment rejection messages with conflict details."""
+ if not self._maybe_log_rejection_message(candidate):
+ return
+
+ msg = 'Will try a different candidate, due to conflict:'
+ for req_info in criterion.information:
+ req, parent = req_info.requirement, req_info.parent
+ msg += '\n '
+ if parent:
+ msg += f'{parent !s} depends on '
+ else:
+ msg += 'The user requested '
+ msg += str(req)
+ display.v(msg)
+
+ def backtracking(self, candidate: Candidate) -> None: # resolvelib < 0.9.0
+ """Print out rejection messages on pre-defined limit hits."""
+ self._maybe_log_rejection_message(candidate)
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
index 9ee7f3b9054..66bfd3ab6dd 100644
--- a/lib/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -438,8 +438,8 @@ class GalaxyRole(object):
if not (self.src and os.path.isfile(self.src)):
try:
os.unlink(tmp_file)
- except (OSError, IOError) as e:
- display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e)))
+ except OSError as ex:
+ display.error_as_warning(f"Unable to remove tmp file {tmp_file!r}.", exception=ex)
return True
return False
diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py
index 9b82ad6c62c..0f9f3ee19df 100644
--- a/lib/ansible/galaxy/token.py
+++ b/lib/ansible/galaxy/token.py
@@ -26,6 +26,7 @@ import os
import time
from stat import S_IRUSR, S_IWUSR
from urllib.error import HTTPError
+from urllib.parse import urlencode
from ansible import constants as C
from ansible.galaxy.api import GalaxyError
@@ -47,7 +48,7 @@ class KeycloakToken(object):
token_type = 'Bearer'
- def __init__(self, access_token=None, auth_url=None, validate_certs=True, client_id=None):
+ def __init__(self, access_token=None, auth_url=None, validate_certs=True, client_id=None, client_secret=None):
self.access_token = access_token
self.auth_url = auth_url
self._token = None
@@ -55,11 +56,26 @@ class KeycloakToken(object):
self.client_id = client_id
if self.client_id is None:
self.client_id = 'cloud-services'
+ self.client_secret = client_secret
self._expiration = None
def _form_payload(self):
- return 'grant_type=refresh_token&client_id=%s&refresh_token=%s' % (self.client_id,
- self.access_token)
+ payload = {
+ 'client_id': self.client_id,
+ }
+ if self.client_secret:
+ payload['client_secret'] = self.client_secret
+ payload['scope'] = 'api.console'
+ payload['grant_type'] = 'client_credentials'
+ if self.access_token not in (None, NoTokenSentinel):
+ display.warning(
+ 'Found both a client_secret and access_token for galaxy authentication, ignoring access_token'
+ )
+ else:
+ payload['refresh_token'] = self.access_token
+ payload['grant_type'] = 'refresh_token'
+
+ return urlencode(payload)
def get(self):
if self._expiration and time.time() >= self._expiration:
@@ -68,16 +84,9 @@ class KeycloakToken(object):
if self._token:
return self._token
- # - build a request to POST to auth_url
- # - body is form encoded
- # - 'refresh_token' is the offline token stored in ansible.cfg
- # - 'grant_type' is 'refresh_token'
- # - 'client_id' is 'cloud-services'
- # - should probably be based on the contents of the
- # offline_ticket's JWT payload 'aud' (audience)
- # or 'azp' (Authorized party - the party to which the ID Token was issued)
payload = self._form_payload()
+ display.vvv(f'Authenticating via {self.auth_url}')
try:
resp = open_url(to_native(self.auth_url),
data=payload,
@@ -86,15 +95,18 @@ class KeycloakToken(object):
http_agent=user_agent())
except HTTPError as e:
raise GalaxyError(e, 'Unable to get access token')
+ display.vvv('Authentication successful')
data = json.load(resp)
# So that we have a buffer, expire the token in ~2/3 the given value
expires_in = data['expires_in'] // 3 * 2
self._expiration = time.time() + expires_in
+ display.vvv(f'Authentication token expires in {expires_in} seconds')
- # - extract 'access_token'
self._token = data.get('access_token')
+ if token_type := data.get('token_type'):
+ self.token_type = token_type
return self._token
diff --git a/lib/ansible/inventory/data.py b/lib/ansible/inventory/data.py
index 691ad5bed42..f879baa4016 100644
--- a/lib/ansible/inventory/data.py
+++ b/lib/ansible/inventory/data.py
@@ -19,64 +19,49 @@
from __future__ import annotations
import sys
+import typing as t
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.inventory.group import Group
from ansible.inventory.host import Host
-from ansible.module_utils.six import string_types
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
from ansible.utils.path import basedir
+from . import helpers # this is left as a module import to facilitate easier unit test patching
+
+
display = Display()
-class InventoryData(object):
+class InventoryData:
"""
Holds inventory data (host and group objects).
- Using it's methods should guarantee expected relationships and data.
+ Using its methods should guarantee expected relationships and data.
"""
- def __init__(self):
+ def __init__(self) -> None:
- self.groups = {}
- self.hosts = {}
+ self.groups: dict[str, Group] = {}
+ self.hosts: dict[str, Host] = {}
# provides 'groups' magic var, host object has group_names
- self._groups_dict_cache = {}
+ self._groups_dict_cache: dict[str, list[str]] = {}
# current localhost, implicit or explicit
- self.localhost = None
+ self.localhost: Host | None = None
- self.current_source = None
- self.processed_sources = []
+ self.current_source: str | None = None
+ self.processed_sources: list[str] = []
# Always create the 'all' and 'ungrouped' groups,
for group in ('all', 'ungrouped'):
self.add_group(group)
- self.add_child('all', 'ungrouped')
- def serialize(self):
- self._groups_dict_cache = None
- data = {
- 'groups': self.groups,
- 'hosts': self.hosts,
- 'local': self.localhost,
- 'source': self.current_source,
- 'processed_sources': self.processed_sources
- }
- return data
-
- def deserialize(self, data):
- self._groups_dict_cache = {}
- self.hosts = data.get('hosts')
- self.groups = data.get('groups')
- self.localhost = data.get('local')
- self.current_source = data.get('source')
- self.processed_sources = data.get('processed_sources')
+ self.add_child('all', 'ungrouped')
- def _create_implicit_localhost(self, pattern):
+ def _create_implicit_localhost(self, pattern: str) -> Host:
if self.localhost:
new_host = self.localhost
@@ -100,8 +85,8 @@ class InventoryData(object):
return new_host
- def reconcile_inventory(self):
- """ Ensure inventory basic rules, run after updates """
+ def reconcile_inventory(self) -> None:
+ """Ensure inventory basic rules, run after updates."""
display.debug('Reconcile groups and hosts in inventory.')
self.current_source = None
@@ -125,7 +110,7 @@ class InventoryData(object):
if self.groups['ungrouped'] in mygroups:
# clear ungrouped of any incorrectly stored by parser
- if set(mygroups).difference(set([self.groups['all'], self.groups['ungrouped']])):
+ if set(mygroups).difference({self.groups['all'], self.groups['ungrouped']}):
self.groups['ungrouped'].remove_host(host)
elif not host.implicit:
@@ -144,8 +129,10 @@ class InventoryData(object):
self._groups_dict_cache = {}
- def get_host(self, hostname):
- """ fetch host object using name deal with implicit localhost """
+ def get_host(self, hostname: str) -> Host | None:
+ """Fetch host object using name deal with implicit localhost."""
+
+ hostname = helpers.remove_trust(hostname)
matching_host = self.hosts.get(hostname, None)
@@ -156,19 +143,19 @@ class InventoryData(object):
return matching_host
- def add_group(self, group):
- """ adds a group to inventory if not there already, returns named actually used """
+ def add_group(self, group: str) -> str:
+ """Adds a group to inventory if not there already, returns named actually used."""
if group:
- if not isinstance(group, string_types):
+ if not isinstance(group, str):
raise AnsibleError("Invalid group name supplied, expected a string but got %s for %s" % (type(group), group))
if group not in self.groups:
g = Group(group)
- if g.name not in self.groups:
- self.groups[g.name] = g
+ group = g.name # the group object may have sanitized the group name; use whatever it has
+ if group not in self.groups:
+ self.groups[group] = g
self._groups_dict_cache = {}
display.debug("Added group %s to inventory" % group)
- group = g.name
else:
display.debug("group %s already in inventory" % group)
else:
@@ -176,22 +163,24 @@ class InventoryData(object):
return group
- def remove_group(self, group):
+ def remove_group(self, group: Group) -> None:
- if group in self.groups:
- del self.groups[group]
- display.debug("Removed group %s from inventory" % group)
+ if group.name in self.groups:
+ del self.groups[group.name]
+ display.debug("Removed group %s from inventory" % group.name)
self._groups_dict_cache = {}
for host in self.hosts:
h = self.hosts[host]
h.remove_group(group)
- def add_host(self, host, group=None, port=None):
- """ adds a host to inventory and possibly a group if not there already """
+ def add_host(self, host: str, group: str | None = None, port: int | str | None = None) -> str:
+ """Adds a host to inventory and possibly a group if not there already."""
+
+ host = helpers.remove_trust(host)
if host:
- if not isinstance(host, string_types):
+ if not isinstance(host, str):
raise AnsibleError("Invalid host name supplied, expected a string but got %s for %s" % (type(host), host))
# TODO: add to_safe_host_name
@@ -211,7 +200,7 @@ class InventoryData(object):
else:
self.set_variable(host, 'inventory_file', None)
self.set_variable(host, 'inventory_dir', None)
- display.debug("Added host %s to inventory" % (host))
+ display.debug("Added host %s to inventory" % host)
# set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
if host in C.LOCALHOST:
@@ -232,7 +221,7 @@ class InventoryData(object):
return host
- def remove_host(self, host):
+ def remove_host(self, host: Host) -> None:
if host.name in self.hosts:
del self.hosts[host.name]
@@ -241,8 +230,10 @@ class InventoryData(object):
g = self.groups[group]
g.remove_host(host)
- def set_variable(self, entity, varname, value):
- """ sets a variable for an inventory object """
+ def set_variable(self, entity: str, varname: str, value: t.Any) -> None:
+ """Sets a variable for an inventory object."""
+
+ inv_object: Host | Group
if entity in self.groups:
inv_object = self.groups[entity]
@@ -254,9 +245,8 @@ class InventoryData(object):
inv_object.set_variable(varname, value)
display.debug('set %s for %s' % (varname, entity))
- def add_child(self, group, child):
- """ Add host or group to group """
- added = False
+ def add_child(self, group: str, child: str) -> bool:
+ """Add host or group to group."""
if group in self.groups:
g = self.groups[group]
if child in self.groups:
@@ -271,12 +261,12 @@ class InventoryData(object):
raise AnsibleError("%s is not a known group" % group)
return added
- def get_groups_dict(self):
+ def get_groups_dict(self) -> dict[str, list[str]]:
"""
We merge a 'magic' var 'groups' with group name keys and hostname list values into every host variable set. Cache for speed.
"""
if not self._groups_dict_cache:
- for (group_name, group) in self.groups.items():
+ for group_name, group in self.groups.items():
self._groups_dict_cache[group_name] = [h.name for h in group.get_hosts()]
return self._groups_dict_cache
diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py
index 335f60127c3..0cf97db4dc6 100644
--- a/lib/ansible/inventory/group.py
+++ b/lib/ansible/inventory/group.py
@@ -16,6 +16,8 @@
# along with Ansible. If not, see .
from __future__ import annotations
+import typing as t
+
from collections.abc import Mapping, MutableMapping
from enum import Enum
from itertools import chain
@@ -24,10 +26,15 @@ from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.utils.display import Display
-from ansible.utils.vars import combine_vars
+from ansible.utils.vars import combine_vars, validate_variable_name
+
+from . import helpers # this is left as a module import to facilitate easier unit test patching
display = Display()
+if t.TYPE_CHECKING:
+ from .host import Host
+
def to_safe_group_name(name, replacer="_", force=False, silent=False):
# Converts 'bad' characters in a string to underscores (or provided replacer) so they can be used as Ansible hosts or groups
@@ -59,22 +66,23 @@ class InventoryObjectType(Enum):
class Group:
- """ a group of ansible hosts """
+ """A group of ansible hosts."""
base_type = InventoryObjectType.GROUP
# __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
- def __init__(self, name=None):
+ def __init__(self, name: str) -> None:
+ name = helpers.remove_trust(name)
- self.depth = 0
- self.name = to_safe_group_name(name)
- self.hosts = []
- self._hosts = None
- self.vars = {}
- self.child_groups = []
- self.parent_groups = []
- self._hosts_cache = None
- self.priority = 1
+ self.depth: int = 0
+ self.name: str = to_safe_group_name(name)
+ self.hosts: list[Host] = []
+ self._hosts: set[str] | None = None
+ self.vars: dict[str, t.Any] = {}
+ self.child_groups: list[Group] = []
+ self.parent_groups: list[Group] = []
+ self._hosts_cache: list[Host] | None = None
+ self.priority: int = 1
def __repr__(self):
return self.get_name()
@@ -82,44 +90,7 @@ class Group:
def __str__(self):
return self.get_name()
- def __getstate__(self):
- return self.serialize()
-
- def __setstate__(self, data):
- return self.deserialize(data)
-
- def serialize(self):
- parent_groups = []
- for parent in self.parent_groups:
- parent_groups.append(parent.serialize())
-
- self._hosts = None
-
- result = dict(
- name=self.name,
- vars=self.vars.copy(),
- parent_groups=parent_groups,
- depth=self.depth,
- hosts=self.hosts,
- )
-
- return result
-
- def deserialize(self, data):
- self.__init__() # used by __setstate__ to deserialize in place # pylint: disable=unnecessary-dunder-call
- self.name = data.get('name')
- self.vars = data.get('vars', dict())
- self.depth = data.get('depth', 0)
- self.hosts = data.get('hosts', [])
- self._hosts = None
-
- parent_groups = data.get('parent_groups', [])
- for parent_data in parent_groups:
- g = Group()
- g.deserialize(parent_data)
- self.parent_groups.append(g)
-
- def _walk_relationship(self, rel, include_self=False, preserve_ordering=False):
+ def _walk_relationship(self, rel, include_self=False, preserve_ordering=False) -> set[Group] | list[Group]:
"""
Given `rel` that is an iterable property of Group,
consitituting a directed acyclic graph among all groups,
@@ -133,12 +104,12 @@ class Group:
F
Called on F, returns set of (A, B, C, D, E)
"""
- seen = set([])
+ seen: set[Group] = set([])
unprocessed = set(getattr(self, rel))
if include_self:
unprocessed.add(self)
if preserve_ordering:
- ordered = [self] if include_self else []
+ ordered: list[Group] = [self] if include_self else []
ordered.extend(getattr(self, rel))
while unprocessed:
@@ -158,22 +129,22 @@ class Group:
return ordered
return seen
- def get_ancestors(self):
- return self._walk_relationship('parent_groups')
+ def get_ancestors(self) -> set[Group]:
+ return t.cast(set, self._walk_relationship('parent_groups'))
- def get_descendants(self, **kwargs):
+ def get_descendants(self, **kwargs) -> set[Group] | list[Group]:
return self._walk_relationship('child_groups', **kwargs)
@property
- def host_names(self):
+ def host_names(self) -> set[str]:
if self._hosts is None:
- self._hosts = set(self.hosts)
+ self._hosts = {h.name for h in self.hosts}
return self._hosts
- def get_name(self):
+ def get_name(self) -> str:
return self.name
- def add_child_group(self, group):
+ def add_child_group(self, group: Group) -> bool:
added = False
if self == group:
raise Exception("can't add group to itself")
@@ -208,7 +179,7 @@ class Group:
self.clear_hosts_cache()
return added
- def _check_children_depth(self):
+ def _check_children_depth(self) -> None:
depth = self.depth
start_depth = self.depth # self.depth could change over loop
@@ -227,7 +198,7 @@ class Group:
if depth - start_depth > len(seen):
raise AnsibleError("The group named '%s' has a recursive dependency loop." % to_native(self.name))
- def add_host(self, host):
+ def add_host(self, host: Host) -> bool:
added = False
if host.name not in self.host_names:
self.hosts.append(host)
@@ -237,7 +208,7 @@ class Group:
added = True
return added
- def remove_host(self, host):
+ def remove_host(self, host: Host) -> bool:
removed = False
if host.name in self.host_names:
self.hosts.remove(host)
@@ -247,7 +218,13 @@ class Group:
removed = True
return removed
- def set_variable(self, key, value):
+ def set_variable(self, key: str, value: t.Any) -> None:
+ key = helpers.remove_trust(key)
+
+ try:
+ validate_variable_name(key)
+ except AnsibleError as ex:
+ Display().deprecated(msg=f'Accepting inventory variable with invalid name {key!r}.', version='2.23', help_text=ex._help_text, obj=ex.obj)
if key == 'ansible_group_priority':
self.set_priority(int(value))
@@ -257,36 +234,36 @@ class Group:
else:
self.vars[key] = value
- def clear_hosts_cache(self):
+ def clear_hosts_cache(self) -> None:
self._hosts_cache = None
for g in self.get_ancestors():
g._hosts_cache = None
- def get_hosts(self):
+ def get_hosts(self) -> list[Host]:
if self._hosts_cache is None:
self._hosts_cache = self._get_hosts()
return self._hosts_cache
- def _get_hosts(self):
+ def _get_hosts(self) -> list[Host]:
- hosts = []
- seen = {}
+ hosts: list[Host] = []
+ seen: set[Host] = set()
for kid in self.get_descendants(include_self=True, preserve_ordering=True):
kid_hosts = kid.hosts
for kk in kid_hosts:
if kk not in seen:
- seen[kk] = 1
+ seen.add(kk)
if self.name == 'all' and kk.implicit:
continue
hosts.append(kk)
return hosts
- def get_vars(self):
+ def get_vars(self) -> dict[str, t.Any]:
return self.vars.copy()
- def set_priority(self, priority):
+ def set_priority(self, priority: int | str) -> None:
try:
self.priority = int(priority)
except TypeError:
diff --git a/lib/ansible/inventory/helpers.py b/lib/ansible/inventory/helpers.py
index 8293f905266..43baac96c9b 100644
--- a/lib/ansible/inventory/helpers.py
+++ b/lib/ansible/inventory/helpers.py
@@ -18,6 +18,7 @@
#############################################
from __future__ import annotations
+from ansible._internal._datatag._tags import TrustedAsTemplate
from ansible.utils.vars import combine_vars
@@ -37,3 +38,11 @@ def get_group_vars(groups):
results = combine_vars(results, group.get_vars())
return results
+
+
+def remove_trust(value: str) -> str:
+ """
+ Remove trust from strings which should not be trusted.
+ This exists to centralize the untagging call which facilitate patching it out in unit tests.
+ """
+ return TrustedAsTemplate.untag(value)
diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py
index fafa9520928..45992648834 100644
--- a/lib/ansible/inventory/host.py
+++ b/lib/ansible/inventory/host.py
@@ -17,28 +17,28 @@
from __future__ import annotations
+import collections.abc as c
+import typing as t
+
from collections.abc import Mapping, MutableMapping
+from ansible.errors import AnsibleError
from ansible.inventory.group import Group, InventoryObjectType
from ansible.parsing.utils.addresses import patterns
-from ansible.utils.vars import combine_vars, get_unique_id
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars, get_unique_id, validate_variable_name
+from . import helpers # this is left as a module import to facilitate easier unit test patching
__all__ = ['Host']
class Host:
- """ a single ansible host """
+ """A single ansible host."""
base_type = InventoryObjectType.HOST
# __slots__ = [ 'name', 'vars', 'groups' ]
- def __getstate__(self):
- return self.serialize()
-
- def __setstate__(self, data):
- return self.deserialize(data)
-
def __eq__(self, other):
if not isinstance(other, Host):
return False
@@ -56,55 +56,28 @@ class Host:
def __repr__(self):
return self.get_name()
- def serialize(self):
- groups = []
- for group in self.groups:
- groups.append(group.serialize())
-
- return dict(
- name=self.name,
- vars=self.vars.copy(),
- address=self.address,
- uuid=self._uuid,
- groups=groups,
- implicit=self.implicit,
- )
-
- def deserialize(self, data):
- self.__init__(gen_uuid=False) # used by __setstate__ to deserialize in place # pylint: disable=unnecessary-dunder-call
-
- self.name = data.get('name')
- self.vars = data.get('vars', dict())
- self.address = data.get('address', '')
- self._uuid = data.get('uuid', None)
- self.implicit = data.get('implicit', False)
-
- groups = data.get('groups', [])
- for group_data in groups:
- g = Group()
- g.deserialize(group_data)
- self.groups.append(g)
+ def __init__(self, name: str, port: int | str | None = None, gen_uuid: bool = True) -> None:
+ name = helpers.remove_trust(name)
- def __init__(self, name=None, port=None, gen_uuid=True):
+ self.vars: dict[str, t.Any] = {}
+ self.groups: list[Group] = []
+ self._uuid: str | None = None
- self.vars = {}
- self.groups = []
- self._uuid = None
-
- self.name = name
- self.address = name
+ self.name: str = name
+ self.address: str = name
if port:
self.set_variable('ansible_port', int(port))
if gen_uuid:
self._uuid = get_unique_id()
- self.implicit = False
- def get_name(self):
+ self.implicit: bool = False
+
+ def get_name(self) -> str:
return self.name
- def populate_ancestors(self, additions=None):
+ def populate_ancestors(self, additions: c.Iterable[Group] | None = None) -> None:
# populate ancestors
if additions is None:
for group in self.groups:
@@ -114,7 +87,7 @@ class Host:
if group not in self.groups:
self.groups.append(group)
- def add_group(self, group):
+ def add_group(self, group: Group) -> bool:
added = False
# populate ancestors first
for oldg in group.get_ancestors():
@@ -127,7 +100,7 @@ class Host:
added = True
return added
- def remove_group(self, group):
+ def remove_group(self, group: Group) -> bool:
removed = False
if group in self.groups:
self.groups.remove(group)
@@ -143,18 +116,28 @@ class Host:
self.remove_group(oldg)
return removed
- def set_variable(self, key, value):
+ def set_variable(self, key: str, value: t.Any) -> None:
+ key = helpers.remove_trust(key)
+
+ try:
+ validate_variable_name(key)
+ except AnsibleError as ex:
+ Display().deprecated(msg=f'Accepting inventory variable with invalid name {key!r}.', version='2.23', help_text=ex._help_text, obj=ex.obj)
+
if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
self.vars = combine_vars(self.vars, {key: value})
else:
self.vars[key] = value
- def get_groups(self):
+ def get_groups(self) -> list[Group]:
return self.groups
- def get_magic_vars(self):
- results = {}
- results['inventory_hostname'] = self.name
+ def get_magic_vars(self) -> dict[str, t.Any]:
+ results: dict[str, t.Any] = dict(
+ inventory_hostname=self.name,
+ )
+
+ # FUTURE: these values should be dynamically calculated on access ala the rest of magic vars
if patterns['ipv4'].match(self.name) or patterns['ipv6'].match(self.name):
results['inventory_hostname_short'] = self.name
else:
@@ -164,5 +147,5 @@ class Host:
return results
- def get_vars(self):
+ def get_vars(self) -> dict[str, t.Any]:
return combine_vars(self.vars, self.get_magic_vars())
diff --git a/lib/ansible/inventory/manager.py b/lib/ansible/inventory/manager.py
index ba6397f1787..e6183ccd095 100644
--- a/lib/ansible/inventory/manager.py
+++ b/lib/ansible/inventory/manager.py
@@ -19,28 +19,34 @@
from __future__ import annotations
import fnmatch
+import functools
import os
-import sys
import re
import itertools
-import traceback
+import typing as t
from operator import attrgetter
from random import shuffle
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
+from ansible._internal import _json, _wrapt
+from ansible._internal._json import EncryptedStringBehavior
+from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.inventory.data import InventoryData
from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.loader import inventory_loader
+from ansible._internal._datatag._tags import Origin
from ansible.utils.helpers import deduplicate_list
from ansible.utils.path import unfrackpath
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars
from ansible.vars.plugins import get_vars_from_inventory_sources
+if t.TYPE_CHECKING:
+ from ansible.plugins.inventory import BaseInventoryPlugin
+
display = Display()
IGNORED_ALWAYS = [br"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"]
@@ -196,12 +202,12 @@ class InventoryManager(object):
def get_host(self, hostname):
return self._inventory.get_host(hostname)
- def _fetch_inventory_plugins(self):
+ def _fetch_inventory_plugins(self) -> list[BaseInventoryPlugin]:
""" sets up loaded inventory plugins for usage """
display.vvvv('setting up inventory plugins')
- plugins = []
+ plugins: list[BaseInventoryPlugin] = []
for name in C.INVENTORY_ENABLED:
plugin = inventory_loader.get(name)
if plugin:
@@ -276,7 +282,6 @@ class InventoryManager(object):
# try source with each plugin
for plugin in self._fetch_inventory_plugins():
-
plugin_name = to_text(getattr(plugin, '_load_name', getattr(plugin, '_original_path', '')))
display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path))
@@ -287,9 +292,14 @@ class InventoryManager(object):
plugin_wants = False
if plugin_wants:
+ # have this tag ready to apply to errors or output; str-ify source since it is often tagged by the CLI
+ origin = Origin(description=f'')
try:
- # FIXME in case plugin fails 1/2 way we have partial inventory
- plugin.parse(self._inventory, self._loader, source, cache=cache)
+ inventory_wrapper = _InventoryDataWrapper(self._inventory, target_plugin=plugin, origin=origin)
+
+ # FUTURE: now that we have a wrapper around inventory, we can have it use ChainMaps to preview the in-progress inventory,
+ # but be able to roll back partial inventory failures by discarding the outermost layer
+ plugin.parse(inventory_wrapper, self._loader, source, cache=cache)
try:
plugin.update_cache_if_changed()
except AttributeError:
@@ -298,14 +308,17 @@ class InventoryManager(object):
parsed = True
display.vvv('Parsed %s inventory source with %s plugin' % (source, plugin_name))
break
- except AnsibleParserError as e:
- display.debug('%s was not parsable by %s' % (source, plugin_name))
- tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
- failures.append({'src': source, 'plugin': plugin_name, 'exc': e, 'tb': tb})
- except Exception as e:
- display.debug('%s failed while attempting to parse %s' % (plugin_name, source))
- tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
- failures.append({'src': source, 'plugin': plugin_name, 'exc': AnsibleError(e), 'tb': tb})
+ except AnsibleError as ex:
+ if not ex.obj:
+ ex.obj = origin
+ failures.append({'src': source, 'plugin': plugin_name, 'exc': ex})
+ except Exception as ex:
+ # DTFIX-FUTURE: fix this error handling to correctly deal with messaging
+ try:
+ # omit line number to prevent contextual display of script or possibly sensitive info
+ raise AnsibleError(str(ex), obj=origin) from ex
+ except AnsibleError as ex:
+ failures.append({'src': source, 'plugin': plugin_name, 'exc': ex})
else:
display.vvv("%s declined parsing %s as it did not pass its verify_file() method" % (plugin_name, source))
@@ -319,9 +332,8 @@ class InventoryManager(object):
if failures:
# only if no plugin processed files should we show errors.
for fail in failures:
- display.warning(u'\n* Failed to parse %s with %s plugin: %s' % (to_text(fail['src']), fail['plugin'], to_text(fail['exc'])))
- if 'tb' in fail:
- display.vvv(to_text(fail['tb']))
+ # `obj` should always be set
+ display.error_as_warning(msg=f'Failed to parse inventory with {fail["plugin"]!r} plugin.', exception=fail['exc'])
# final error/warning on inventory source failure
if C.INVENTORY_ANY_UNPARSED_IS_FAILED:
@@ -749,3 +761,36 @@ class InventoryManager(object):
self.reconcile_inventory()
result_item['changed'] = changed
+
+
+class _InventoryDataWrapper(_wrapt.ObjectProxy):
+ """
+ Proxy wrapper around InventoryData.
+ Allows `set_variable` calls to automatically apply template trust for plugins that don't know how.
+ """
+
+ # declared as class attrs to signal to ObjectProxy that we want them stored on the proxy, not the wrapped value
+ _target_plugin = None
+ _default_origin = None
+
+ def __init__(self, referent: InventoryData, target_plugin: BaseInventoryPlugin, origin: Origin) -> None:
+ super().__init__(referent)
+ self._target_plugin = target_plugin
+ # fallback origin to ensure that vars are tagged with at least the file they came from
+ self._default_origin = origin
+
+ @functools.cached_property
+ def _inspector(self) -> _json.AnsibleVariableVisitor:
+ """
+ Inventory plugins can delegate to other plugins (e.g. `auto`).
+ This hack defers sampling the target plugin's `trusted_by_default` attr until `set_variable` is called, typically inside `parse`.
+ Trust is then optionally applied based on the plugin's declared intent via `trusted_by_default`.
+ """
+ return _json.AnsibleVariableVisitor(
+ trusted_as_template=self._target_plugin.trusted_by_default,
+ origin=self._default_origin,
+ encrypted_string_behavior=EncryptedStringBehavior.PRESERVE,
+ )
+
+ def set_variable(self, entity: str, varname: str, value: t.Any) -> None:
+ self.__wrapped__.set_variable(entity, varname, self._inspector.visit(value))
diff --git a/lib/ansible/keyword_desc.yml b/lib/ansible/keyword_desc.yml
index 4aea8234b61..39d703a3a3f 100644
--- a/lib/ansible/keyword_desc.yml
+++ b/lib/ansible/keyword_desc.yml
@@ -13,7 +13,7 @@ become_method: Which method of privilege escalation to use (such as sudo or su).
become_user: "User that you 'become' after using privilege escalation. The remote/login user must have permissions to become this user."
block: List of tasks in a block.
changed_when: "Conditional expression that overrides the task's normal 'changed' status."
-check_mode: A boolean that controls if a task is executed in 'check' mode. See :ref:`check_mode_dry`.
+check_mode: A boolean that controls if a task is run normally or avoids changes to the target and tries to report what it would have done (check mode/dry run). See :ref:`check_mode_dry`.
collections: |
List of collection namespaces to search for modules, plugins, and roles. See :ref:`collections_using_playbook`
diff --git a/lib/ansible/module_utils/_internal/__init__.py b/lib/ansible/module_utils/_internal/__init__.py
index e69de29bb2d..edf61c0260f 100644
--- a/lib/ansible/module_utils/_internal/__init__.py
+++ b/lib/ansible/module_utils/_internal/__init__.py
@@ -0,0 +1,55 @@
+from __future__ import annotations
+
+import collections.abc as c
+
+import typing as t
+
+if t.TYPE_CHECKING:
+ from ansible.module_utils.compat.typing import TypeGuard
+
+
+INTERMEDIATE_MAPPING_TYPES = (c.Mapping,)
+"""
+Mapping types which are supported for recursion and runtime usage, such as in serialization and templating.
+These will be converted to a simple Python `dict` before serialization or storage as a variable.
+"""
+
+INTERMEDIATE_ITERABLE_TYPES = (tuple, set, frozenset, c.Sequence)
+"""
+Iterable types which are supported for recursion and runtime usage, such as in serialization and templating.
+These will be converted to a simple Python `list` before serialization or storage as a variable.
+CAUTION: Scalar types which are sequences should be excluded when using this.
+"""
+
+ITERABLE_SCALARS_NOT_TO_ITERATE = (str, bytes)
+"""Scalars which are also iterable, and should thus be excluded from iterable checks."""
+
+
+def is_intermediate_mapping(value: object) -> TypeGuard[c.Mapping]:
+ """Returns `True` if `value` is a type supported for projection to a Python `dict`, otherwise returns `False`."""
+ return isinstance(value, INTERMEDIATE_MAPPING_TYPES)
+
+
+def is_intermediate_iterable(value: object) -> TypeGuard[c.Iterable]:
+ """Returns `True` if `value` is a type supported for projection to a Python `list`, otherwise returns `False`."""
+ return isinstance(value, INTERMEDIATE_ITERABLE_TYPES) and not isinstance(value, ITERABLE_SCALARS_NOT_TO_ITERATE)
+
+
+is_controller: bool = False
+"""Set to True automatically when this module is imported into an Ansible controller context."""
+
+
+def get_controller_serialize_map() -> dict[type, t.Callable]:
+ """
+ Called to augment serialization maps.
+ This implementation is replaced with the one from ansible._internal in controller contexts.
+ """
+ return {}
+
+
+def import_controller_module(_module_name: str, /) -> t.Any:
+ """
+ Called to conditionally import the named module in a controller context, otherwise returns `None`.
+ This implementation is replaced with the one from ansible._internal in controller contexts.
+ """
+ return None
diff --git a/lib/ansible/module_utils/_internal/_ambient_context.py b/lib/ansible/module_utils/_internal/_ambient_context.py
new file mode 100644
index 00000000000..dde9a8c046c
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_ambient_context.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2024 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import annotations
+
+import contextlib
+import contextvars
+
+# deprecated: description='typing.Self exists in Python 3.11+' python_version='3.10'
+from ..compat import typing as t
+
+
+class AmbientContextBase:
+ """
+ An abstract base context manager that, once entered, will be accessible via its `current` classmethod to any code in the same
+ `contextvars` context (e.g. same thread/coroutine), until it is exited.
+ """
+
+ __slots__ = ('_contextvar_token',)
+
+ # DTFIX-FUTURE: subclasses need to be able to opt-in to blocking nested contexts of the same type (basically optional per-callstack singleton behavior)
+ # DTFIX-FUTURE: this class should enforce strict nesting of contexts; overlapping context lifetimes leads to incredibly difficult to
+ # debug situations with undefined behavior, so it should fail fast.
+ # DTFIX-FUTURE: make frozen=True dataclass subclasses work (fix the mutability of the contextvar instance)
+
+ _contextvar: t.ClassVar[contextvars.ContextVar] # pylint: disable=declare-non-slot # pylint bug, see https://github.com/pylint-dev/pylint/issues/9950
+ _contextvar_token: contextvars.Token
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ cls._contextvar = contextvars.ContextVar(cls.__name__)
+
+ @classmethod
+ def when(cls, condition: bool, /, *args, **kwargs) -> t.Self | contextlib.nullcontext:
+ """Return an instance of the context if `condition` is `True`, otherwise return a `nullcontext` instance."""
+ return cls(*args, **kwargs) if condition else contextlib.nullcontext()
+
+ @classmethod
+ def current(cls, optional: bool = False) -> t.Self | None:
+ """
+ Return the currently active context value for the current thread or coroutine.
+ Raises ReferenceError if a context is not active, unless `optional` is `True`.
+ """
+ try:
+ return cls._contextvar.get()
+ except LookupError:
+ if optional:
+ return None
+
+ raise ReferenceError(f"A required {cls.__name__} context is not active.") from None
+
+ def __enter__(self) -> t.Self:
+ # DTFIX-FUTURE: actively block multiple entry
+ self._contextvar_token = self.__class__._contextvar.set(self)
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
+ self.__class__._contextvar.reset(self._contextvar_token)
+ del self._contextvar_token
diff --git a/lib/ansible/module_utils/_internal/_ansiballz/__init__.py b/lib/ansible/module_utils/_internal/_ansiballz/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/module_utils/_internal/_ansiballz/_extensions/__init__.py b/lib/ansible/module_utils/_internal/_ansiballz/_extensions/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/module_utils/_internal/_ansiballz/_extensions/_coverage.py b/lib/ansible/module_utils/_internal/_ansiballz/_extensions/_coverage.py
new file mode 100644
index 00000000000..7563494464d
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_ansiballz/_extensions/_coverage.py
@@ -0,0 +1,45 @@
+from __future__ import annotations
+
+import atexit
+import dataclasses
+import importlib.util
+import os
+import sys
+
+import typing as t
+
+
+@dataclasses.dataclass(frozen=True)
+class Options:
+ """Code coverage options."""
+
+ config: str
+ output: str | None
+
+
+def run(args: dict[str, t.Any]) -> None: # pragma: nocover
+ """Bootstrap `coverage` for the current Ansible module invocation."""
+ options = Options(**args)
+
+ if options.output:
+ # Enable code coverage analysis of the module.
+ # This feature is for internal testing and may change without notice.
+ python_version_string = '.'.join(str(v) for v in sys.version_info[:2])
+ os.environ['COVERAGE_FILE'] = f'{options.output}=python-{python_version_string}=coverage'
+
+ import coverage
+
+ cov = coverage.Coverage(config_file=options.config)
+
+ def atexit_coverage() -> None:
+ cov.stop()
+ cov.save()
+
+ atexit.register(atexit_coverage)
+
+ cov.start()
+ else:
+ # Verify coverage is available without importing it.
+ # This will detect when a module would fail with coverage enabled with minimal overhead.
+ if importlib.util.find_spec('coverage') is None:
+ raise RuntimeError('Could not find the `coverage` Python module.')
diff --git a/lib/ansible/module_utils/_internal/_ansiballz/_extensions/_pydevd.py b/lib/ansible/module_utils/_internal/_ansiballz/_extensions/_pydevd.py
new file mode 100644
index 00000000000..eec234d9d10
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_ansiballz/_extensions/_pydevd.py
@@ -0,0 +1,62 @@
+"""
+Remote debugging support for AnsiballZ modules.
+
+To use with PyCharm:
+
+1) Choose an available port for PyCharm to listen on (e.g. 5678).
+2) Create a Python Debug Server using that port.
+3) Start the Python Debug Server.
+4) Ensure the correct version of `pydevd-pycharm` is installed for the interpreter(s) which will run the code being debugged.
+5) Configure Ansible with the `_ANSIBALLZ_DEBUGGER_CONFIG` option.
+ See `Options` below for the structure of the debugger configuration.
+ Example configuration using an environment variable:
+ export _ANSIBLE_ANSIBALLZ_DEBUGGER_CONFIG='{"module": "pydevd_pycharm", "settrace": {"host": "localhost", "port": 5678, "suspend": false}}'
+6) Set any desired breakpoints.
+7) Run Ansible commands.
+
+A similar process should work for other pydevd based debuggers, such as Visual Studio Code, but they have not been tested.
+"""
+
+from __future__ import annotations
+
+import dataclasses
+import importlib
+import json
+import os
+import pathlib
+
+import typing as t
+
+
+@dataclasses.dataclass(frozen=True)
+class Options:
+ """Debugger options for pydevd and its derivatives."""
+
+ module: str = 'pydevd'
+ """The Python module which will be imported and which provides the `settrace` method."""
+ settrace: dict[str, object] = dataclasses.field(default_factory=dict)
+ """The options to pass to the `{module}.settrace` method."""
+ source_mapping: dict[str, str] = dataclasses.field(default_factory=dict)
+ """
+ A mapping of source paths to provide to pydevd.
+ This setting is used internally by AnsiballZ and is not required unless Ansible CLI commands are run from a different system than your IDE.
+ In that scenario, use this setting instead of configuring source mapping in your IDE.
+ The key is a path known to the IDE.
+ The value is the same path as known to the Ansible CLI.
+ Both file paths and directories are supported.
+ """
+
+
+def run(args: dict[str, t.Any]) -> None: # pragma: nocover
+ """Enable remote debugging."""
+
+ options = Options(**args)
+ temp_dir = pathlib.Path(__file__).parent.parent.parent.parent.parent.parent
+ path_mapping = [[key, str(temp_dir / value)] for key, value in options.source_mapping.items()]
+
+ os.environ['PATHS_FROM_ECLIPSE_TO_PYTHON'] = json.dumps(path_mapping)
+
+ debugging_module = importlib.import_module(options.module)
+ debugging_module.settrace(**options.settrace)
+
+ pass # when suspend is True, execution pauses here -- it's also a convenient place to put a breakpoint
diff --git a/lib/ansible/module_utils/_internal/_ansiballz/_loader.py b/lib/ansible/module_utils/_internal/_ansiballz/_loader.py
new file mode 100644
index 00000000000..478cbe6c4bf
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_ansiballz/_loader.py
@@ -0,0 +1,81 @@
+# Copyright (c) 2024 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+"""Support code for exclusive use by the AnsiballZ wrapper."""
+
+from __future__ import annotations
+
+import importlib
+import json
+import runpy
+import sys
+import typing as t
+
+from ansible.module_utils import basic
+from ansible.module_utils._internal import _errors, _traceback, _messages, _ansiballz
+from ansible.module_utils.common.json import get_module_encoder, Direction
+
+
+def run_module(
+ *,
+ json_params: bytes,
+ profile: str,
+ module_fqn: str,
+ modlib_path: str,
+ extensions: dict[str, dict[str, object]],
+ init_globals: dict[str, t.Any] | None = None,
+) -> None: # pragma: nocover
+ """Used internally by the AnsiballZ wrapper to run an Ansible module."""
+ try:
+ for extension, args in extensions.items():
+ # importing _ansiballz instead of _extensions avoids an unnecessary import when extensions are not in use
+ extension_module = importlib.import_module(f'{_ansiballz.__name__}._extensions.{extension}')
+ extension_module.run(args)
+
+ _run_module(
+ json_params=json_params,
+ profile=profile,
+ module_fqn=module_fqn,
+ modlib_path=modlib_path,
+ init_globals=init_globals,
+ )
+ except Exception as ex: # not BaseException, since modules are expected to raise SystemExit
+ _handle_exception(ex, profile)
+
+
+def _run_module(
+ *,
+ json_params: bytes,
+ profile: str,
+ module_fqn: str,
+ modlib_path: str,
+ init_globals: dict[str, t.Any] | None = None,
+) -> None:
+ """Used internally by `_run_module` to run an Ansible module after coverage has been enabled (if applicable)."""
+ basic._ANSIBLE_ARGS = json_params
+ basic._ANSIBLE_PROFILE = profile
+
+ init_globals = init_globals or {}
+ init_globals.update(_module_fqn=module_fqn, _modlib_path=modlib_path)
+
+ # Run the module. By importing it as '__main__', it executes as a script.
+ runpy.run_module(mod_name=module_fqn, init_globals=init_globals, run_name='__main__', alter_sys=True)
+
+ # An Ansible module must print its own results and exit. If execution reaches this point, that did not happen.
+ raise RuntimeError('New-style module did not handle its own exit.')
+
+
+def _handle_exception(exception: BaseException, profile: str) -> t.NoReturn:
+ """Handle the given exception."""
+ result = dict(
+ failed=True,
+ exception=_messages.ErrorSummary(
+ event=_errors.EventFactory.from_exception(exception, _traceback.is_traceback_enabled(_traceback.TracebackEvent.ERROR)),
+ ),
+ )
+
+ encoder = get_module_encoder(profile, Direction.MODULE_TO_CONTROLLER)
+
+ print(json.dumps(result, cls=encoder)) # pylint: disable=ansible-bad-function
+
+ sys.exit(1) # pylint: disable=ansible-bad-function
diff --git a/lib/ansible/module_utils/_internal/_ansiballz/_respawn.py b/lib/ansible/module_utils/_internal/_ansiballz/_respawn.py
new file mode 100644
index 00000000000..05c1257fa6a
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_ansiballz/_respawn.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+import inspect
+import sys
+
+from ... import basic
+from . import _respawn_wrapper
+
+
+def create_payload() -> str:
+ """Create and return an AnsiballZ payload for respawning a module."""
+ main = sys.modules['__main__']
+ code = inspect.getsource(_respawn_wrapper)
+
+ args = dict(
+ module_fqn=main._module_fqn,
+ modlib_path=main._modlib_path,
+ profile=basic._ANSIBLE_PROFILE,
+ json_params=basic._ANSIBLE_ARGS,
+ )
+
+ args_string = '\n'.join(f'{key}={value!r},' for key, value in args.items())
+
+ wrapper = f"""{code}
+
+if __name__ == "__main__":
+ _respawn_main(
+{args_string}
+)
+"""
+
+ return wrapper
diff --git a/lib/ansible/module_utils/_internal/_ansiballz/_respawn_wrapper.py b/lib/ansible/module_utils/_internal/_ansiballz/_respawn_wrapper.py
new file mode 100644
index 00000000000..2bd03074c75
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_ansiballz/_respawn_wrapper.py
@@ -0,0 +1,23 @@
+from __future__ import annotations
+
+
+def _respawn_main(
+ json_params: bytes,
+ profile: str,
+ module_fqn: str,
+ modlib_path: str,
+) -> None:
+ import sys
+
+ sys.path.insert(0, modlib_path)
+
+ from ansible.module_utils._internal._ansiballz import _loader
+
+ _loader.run_module(
+ json_params=json_params,
+ profile=profile,
+ module_fqn=module_fqn,
+ modlib_path=modlib_path,
+ extensions={},
+ init_globals=dict(_respawned=True),
+ )
diff --git a/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py b/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
index 0b32a062fed..3a29b981100 100644
--- a/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
+++ b/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
@@ -1,4 +1,5 @@
"""Proxy stdlib threading module that only supports non-joinable daemon threads."""
+
# NB: all new local module attrs are _ prefixed to ensure an identical public attribute surface area to the module we're proxying
from __future__ import annotations as _annotations
diff --git a/lib/ansible/module_utils/_internal/_dataclass_validation.py b/lib/ansible/module_utils/_internal/_dataclass_validation.py
new file mode 100644
index 00000000000..dcd6472347c
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_dataclass_validation.py
@@ -0,0 +1,217 @@
+# Copyright (c) 2024 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+"""Code generation of __post_init__ methods for efficient dataclass field type checking at runtime."""
+
+from __future__ import annotations
+
+import atexit
+import functools
+import itertools
+import shutil
+import tempfile
+import types
+import typing as t
+
+_write_generated_code_to_disk = False
+
+# deprecated: description='types.UnionType is available in Python 3.10' python_version='3.9'
+try:
+ _union_type: type | None = types.UnionType # type: ignore[attr-defined]
+ _union_types: tuple = (t.Union, types.UnionType) # type: ignore[attr-defined]
+except AttributeError:
+ _union_type = None # type: ignore[assignment]
+ _union_types = (t.Union,) # type: ignore[assignment]
+
+
+def inject_post_init_validation(cls: type, allow_subclasses=False) -> None:
+ """Inject a __post_init__ field validation method on the given dataclass. An existing __post_init__ attribute must already exist."""
+ # DTFIX-FUTURE: when cls must have a __post_init__, enforcing it as a no-op would be nice, but is tricky on slotted dataclasses due to double-creation
+ post_validate_name = '_post_validate'
+ method_name = '__post_init__'
+ exec_globals: dict[str, t.Any] = {}
+ known_types: dict[type, str] = {}
+ lines: list[str] = []
+ field_type_hints = t.get_type_hints(cls)
+ indent = 1
+
+ def append_line(line: str) -> None:
+ """Append a line to the generated source at the current indentation level."""
+ lines.append((' ' * indent * 4) + line)
+
+ def register_type(target_type: type) -> str:
+ """Register the target type and return the local name."""
+ target_name = f'{target_type.__module__.replace(".", "_")}_{target_type.__name__}'
+
+ known_types[target_type] = target_name
+ exec_globals[target_name] = target_type
+
+ return target_name
+
+ def validate_value(target_name: str, target_ref: str, target_type: type) -> None:
+ """Generate code to validate the specified value."""
+ nonlocal indent
+
+ origin_type = t.get_origin(target_type)
+
+ if origin_type is t.ClassVar:
+ return # ignore annotations which are not fields, indicated by the t.ClassVar annotation
+
+ allowed_types = _get_allowed_types(target_type)
+
+ # check value
+
+ if origin_type is t.Literal:
+ # DTFIX-FUTURE: support optional literals
+
+ values = t.get_args(target_type)
+
+ append_line(f"""if {target_ref} not in {values}:""")
+ append_line(f""" raise ValueError(rf"{target_name} must be one of {values} instead of {{{target_ref}!r}}")""")
+
+ allowed_refs = [register_type(allowed_type) for allowed_type in allowed_types]
+ allowed_names = [repr(allowed_type) for allowed_type in allowed_types]
+
+ if allow_subclasses:
+ if len(allowed_refs) == 1:
+ append_line(f"""if not isinstance({target_ref}, {allowed_refs[0]}):""")
+ else:
+ append_line(f"""if not isinstance({target_ref}, ({', '.join(allowed_refs)})):""")
+ else:
+ if len(allowed_refs) == 1:
+ append_line(f"""if type({target_ref}) is not {allowed_refs[0]}:""")
+ else:
+ append_line(f"""if type({target_ref}) not in ({', '.join(allowed_refs)}):""")
+
+ append_line(f""" raise TypeError(f"{target_name} must be {' or '.join(allowed_names)} instead of {{type({target_ref})}}")""")
+
+ # check elements (for containers)
+
+ if target_ref.startswith('self.'):
+ local_ref = target_ref[5:]
+ else:
+ local_ref = target_ref
+
+ if tuple in allowed_types:
+ tuple_type = _extract_type(target_type, tuple)
+
+ idx_ref = f'{local_ref}_idx'
+ item_ref = f'{local_ref}_item'
+ item_name = f'{target_name}[{{{idx_ref}!r}}]'
+ item_type, _ellipsis = t.get_args(tuple_type)
+
+ if _ellipsis is not ...:
+ raise ValueError(f"{cls} tuple fields must be a tuple of a single element type")
+
+ append_line(f"""if isinstance({target_ref}, {known_types[tuple]}):""")
+ append_line(f""" for {idx_ref}, {item_ref} in enumerate({target_ref}):""")
+
+ indent += 2
+ validate_value(target_name=item_name, target_ref=item_ref, target_type=item_type)
+ indent -= 2
+
+ if list in allowed_types:
+ list_type = _extract_type(target_type, list)
+
+ idx_ref = f'{local_ref}_idx'
+ item_ref = f'{local_ref}_item'
+ item_name = f'{target_name}[{{{idx_ref}!r}}]'
+ (item_type,) = t.get_args(list_type)
+
+ append_line(f"""if isinstance({target_ref}, {known_types[list]}):""")
+ append_line(f""" for {idx_ref}, {item_ref} in enumerate({target_ref}):""")
+
+ indent += 2
+ validate_value(target_name=item_name, target_ref=item_ref, target_type=item_type)
+ indent -= 2
+
+ if dict in allowed_types:
+ dict_type = _extract_type(target_type, dict)
+
+ key_ref, value_ref = f'{local_ref}_key', f'{local_ref}_value'
+ key_type, value_type = t.get_args(dict_type)
+ key_name, value_name = f'{target_name!r} key {{{key_ref}!r}}', f'{target_name}[{{{key_ref}!r}}]'
+
+ append_line(f"""if isinstance({target_ref}, {known_types[dict]}):""")
+ append_line(f""" for {key_ref}, {value_ref} in {target_ref}.items():""")
+
+ indent += 2
+ validate_value(target_name=key_name, target_ref=key_ref, target_type=key_type)
+ validate_value(target_name=value_name, target_ref=value_ref, target_type=value_type)
+ indent -= 2
+
+ for field_name in cls.__annotations__:
+ validate_value(target_name=f'{{type(self).__name__}}.{field_name}', target_ref=f'self.{field_name}', target_type=field_type_hints[field_name])
+
+ if hasattr(cls, post_validate_name):
+ append_line(f"self.{post_validate_name}()")
+
+ if not lines:
+ return # nothing to validate (empty dataclass)
+
+ if '__init__' in cls.__dict__ and not hasattr(cls, method_name):
+ raise ValueError(f"{cls} must have a {method_name!r} method to override when invoked after the '__init__' method is created")
+
+ if any(hasattr(parent, method_name) for parent in cls.__mro__[1:]):
+ lines.insert(0, f' super({register_type(cls)}, self).{method_name}()')
+
+ lines.insert(0, f'def {method_name}(self):')
+
+ source = '\n'.join(lines) + '\n'
+
+ if _write_generated_code_to_disk:
+ tmp = tempfile.NamedTemporaryFile(mode='w+t', suffix=f'-{cls.__module__}.{cls.__name__}.py', delete=False, dir=_get_temporary_directory())
+
+ tmp.write(source)
+ tmp.flush()
+
+ filename = tmp.name
+ else:
+ filename = f' generated for {cls}'
+
+ code = compile(source, filename, 'exec')
+
+ exec(code, exec_globals)
+ setattr(cls, method_name, exec_globals[method_name])
+
+
+@functools.lru_cache(maxsize=1)
+def _get_temporary_directory() -> str:
+ """Create a temporary directory and return its full path. The directory will be deleted when the process exits."""
+ temp_dir = tempfile.mkdtemp()
+
+ atexit.register(lambda: shutil.rmtree(temp_dir))
+
+ return temp_dir
+
+
+def _get_allowed_types(target_type: type) -> tuple[type, ...]:
+ """Return a tuple of types usable in instance checks for the given target_type."""
+ origin_type = t.get_origin(target_type)
+
+ if origin_type in _union_types:
+ allowed_types = tuple(set(itertools.chain.from_iterable(_get_allowed_types(arg) for arg in t.get_args(target_type))))
+ elif origin_type is t.Literal:
+ allowed_types = (str,) # DTFIX-FUTURE: support non-str literal types
+ elif origin_type:
+ allowed_types = (origin_type,)
+ else:
+ allowed_types = (target_type,)
+
+ return allowed_types
+
+
+def _extract_type(target_type: type, of_type: type) -> type:
+ """Return `of_type` from `target_type`, where `target_type` may be a union."""
+ origin_type = t.get_origin(target_type)
+
+ if origin_type is of_type: # pylint: disable=unidiomatic-typecheck
+ return target_type
+
+ if origin_type is t.Union or (_union_type and isinstance(target_type, _union_type)):
+ args = t.get_args(target_type)
+ extracted_types = [arg for arg in args if type(arg) is of_type or t.get_origin(arg) is of_type] # pylint: disable=unidiomatic-typecheck
+ (extracted_type,) = extracted_types
+ return extracted_type
+
+ raise NotImplementedError(f'{target_type} is not supported')
diff --git a/lib/ansible/module_utils/_internal/_datatag/__init__.py b/lib/ansible/module_utils/_internal/_datatag/__init__.py
new file mode 100644
index 00000000000..479a0278d0a
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_datatag/__init__.py
@@ -0,0 +1,961 @@
+from __future__ import annotations
+
+import abc
+import collections.abc as c
+import copy
+import dataclasses
+import datetime
+import enum
+import inspect
+import sys
+
+from itertools import chain
+
+# deprecated: description='typing.Self exists in Python 3.11+' python_version='3.10'
+from ansible.module_utils.compat import typing as t
+
+from ansible.module_utils._internal import _dataclass_validation
+from ansible.module_utils._internal._patches import _sys_intern_patch, _socket_patch
+
+_sys_intern_patch.SysInternPatch.patch()
+_socket_patch.GetAddrInfoPatch.patch() # DTFIX-FUTURE: consider replacing this with a socket import shim that installs the patch
+
+if sys.version_info >= (3, 10):
+ # Using slots for reduced memory usage and improved performance.
+ _tag_dataclass_kwargs = dict(frozen=True, repr=False, kw_only=True, slots=True)
+else:
+ # deprecated: description='always use dataclass slots and keyword-only args' python_version='3.9'
+ _tag_dataclass_kwargs = dict(frozen=True, repr=False)
+
+_T = t.TypeVar('_T')
+_TAnsibleSerializable = t.TypeVar('_TAnsibleSerializable', bound='AnsibleSerializable')
+_TAnsibleDatatagBase = t.TypeVar('_TAnsibleDatatagBase', bound='AnsibleDatatagBase')
+_TAnsibleTaggedObject = t.TypeVar('_TAnsibleTaggedObject', bound='AnsibleTaggedObject')
+
+_NO_INSTANCE_STORAGE = t.cast(t.Tuple[str], tuple())
+_ANSIBLE_TAGGED_OBJECT_SLOTS = tuple(('_ansible_tags_mapping',))
+
+# shared empty frozenset for default values
+_empty_frozenset: t.FrozenSet = frozenset()
+
+# Technical Notes
+#
+# Tagged values compare (and thus hash) the same as their base types, so a value that differs only by its tags will appear identical to non-tag-aware code.
+# This will affect storage and update of tagged values in dictionary keys, sets, etc. While tagged values can be used as keys in hashable collections,
+# updating a key usually requires removal and re-addition.
+
+
+class AnsibleTagHelper:
+ """Utility methods for working with Ansible data tags."""
+
+ @staticmethod
+ def untag(value: _T, *tag_types: t.Type[AnsibleDatatagBase]) -> _T:
+ """
+ If tags matching any of `tag_types` are present on `value`, return a copy with those tags removed.
+ If no `tag_types` are specified and the object has tags, return a copy with all tags removed.
+ Otherwise, the original `value` is returned.
+ """
+ tag_set = AnsibleTagHelper.tags(value)
+
+ if not tag_set:
+ return value
+
+ if tag_types:
+ tags_mapping = _AnsibleTagsMapping((type(tag), tag) for tag in tag_set if type(tag) not in tag_types) # pylint: disable=unidiomatic-typecheck
+
+ if len(tags_mapping) == len(tag_set):
+ return value # if no tags were removed, return the original instance
+ else:
+ tags_mapping = None
+
+ if not tags_mapping:
+ if t.cast(AnsibleTaggedObject, value)._empty_tags_as_native:
+ return t.cast(AnsibleTaggedObject, value)._native_copy()
+
+ tags_mapping = _EMPTY_INTERNAL_TAGS_MAPPING
+
+ tagged_type = AnsibleTaggedObject._get_tagged_type(type(value))
+
+ return t.cast(_T, tagged_type._instance_factory(value, tags_mapping))
+
+ @staticmethod
+ def tags(value: t.Any) -> t.FrozenSet[AnsibleDatatagBase]:
+ tags = _try_get_internal_tags_mapping(value)
+
+ if tags is _EMPTY_INTERNAL_TAGS_MAPPING:
+ return _empty_frozenset
+
+ return frozenset(tags.values())
+
+ @staticmethod
+ def tag_types(value: t.Any) -> t.FrozenSet[t.Type[AnsibleDatatagBase]]:
+ tags = _try_get_internal_tags_mapping(value)
+
+ if tags is _EMPTY_INTERNAL_TAGS_MAPPING:
+ return _empty_frozenset
+
+ return frozenset(tags)
+
+ @staticmethod
+ def base_type(type_or_value: t.Any, /) -> type:
+ """Return the friendly type of the given type or value. If the type is an AnsibleTaggedObject, the native type will be used."""
+ if isinstance(type_or_value, type):
+ the_type = type_or_value
+ else:
+ the_type = type(type_or_value)
+
+ if issubclass(the_type, AnsibleTaggedObject):
+ the_type = type_or_value._native_type
+
+ # DTFIX-FUTURE: provide a knob to optionally report the real type for debugging purposes
+ return the_type
+
+ @staticmethod
+ def as_native_type(value: _T) -> _T:
+ """
+ Returns an untagged native data type matching the input value, or the original input if the value was not a tagged type.
+ Containers are not recursively processed.
+ """
+ if isinstance(value, AnsibleTaggedObject):
+ value = value._native_copy()
+
+ return value
+
+ @staticmethod
+ @t.overload
+ def tag_copy(src: t.Any, value: _T) -> _T: ... # pragma: nocover
+
+ @staticmethod
+ @t.overload
+ def tag_copy(src: t.Any, value: t.Any, *, value_type: type[_T]) -> _T: ... # pragma: nocover
+
+ @staticmethod
+ @t.overload
+ def tag_copy(src: t.Any, value: _T, *, value_type: None = None) -> _T: ... # pragma: nocover
+
+ @staticmethod
+ def tag_copy(src: t.Any, value: _T, *, value_type: t.Optional[type] = None) -> _T:
+ """Return a copy of `value`, with tags copied from `src`, overwriting any existing tags of the same types."""
+ src_tags = AnsibleTagHelper.tags(src)
+ value_tags = [(tag, tag._get_tag_to_propagate(src, value, value_type=value_type)) for tag in src_tags]
+ tags = [tag[1] for tag in value_tags if tag[1] is not None]
+ tag_types_to_remove = [type(tag[0]) for tag in value_tags if tag[1] is None]
+
+ if tag_types_to_remove:
+ value = AnsibleTagHelper.untag(value, *tag_types_to_remove)
+
+ return AnsibleTagHelper.tag(value, tags, value_type=value_type)
+
+ @staticmethod
+ @t.overload
+ def tag(value: _T, tags: t.Union[AnsibleDatatagBase, t.Iterable[AnsibleDatatagBase]]) -> _T: ... # pragma: nocover
+
+ @staticmethod
+ @t.overload
+ def tag(value: t.Any, tags: t.Union[AnsibleDatatagBase, t.Iterable[AnsibleDatatagBase]], *, value_type: type[_T]) -> _T: ... # pragma: nocover
+
+ @staticmethod
+ @t.overload
+ def tag(value: _T, tags: t.Union[AnsibleDatatagBase, t.Iterable[AnsibleDatatagBase]], *, value_type: None = None) -> _T: ... # pragma: nocover
+
+ @staticmethod
+ def tag(value: _T, tags: t.Union[AnsibleDatatagBase, t.Iterable[AnsibleDatatagBase]], *, value_type: t.Optional[type] = None) -> _T:
+ """
+ Return a copy of `value`, with `tags` applied, overwriting any existing tags of the same types.
+ If `value` is an ignored type, or `tags` is empty, the original `value` will be returned.
+ If `value` is not taggable, a `NotTaggableError` exception will be raised.
+ If `value_type` was given, that type will be returned instead.
+ """
+ if value_type is None:
+ value_type_specified = False
+ value_type = type(value)
+ else:
+ value_type_specified = True
+
+ # if no tags to apply, just return what we got
+ # NB: this only works because the untaggable types are singletons (and thus direct type comparison works)
+ if not tags or value_type in _untaggable_types:
+ if value_type_specified:
+ return value_type(value)
+
+ return value
+
+ tag_list: list[AnsibleDatatagBase]
+
+ # noinspection PyProtectedMember
+ if type(tags) in _known_tag_types:
+ tag_list = [tags] # type: ignore[list-item]
+ else:
+ tag_list = list(tags) # type: ignore[arg-type]
+
+ for idx, tag in enumerate(tag_list):
+ # noinspection PyProtectedMember
+ if type(tag) not in _known_tag_types:
+ # noinspection PyProtectedMember
+ raise TypeError(f'tags[{idx}] of type {type(tag)} is not one of {_known_tag_types}')
+
+ existing_internal_tags_mapping = _try_get_internal_tags_mapping(value)
+
+ if existing_internal_tags_mapping is not _EMPTY_INTERNAL_TAGS_MAPPING:
+ # include the existing tags first so new tags of the same type will overwrite
+ tag_list = list(chain(existing_internal_tags_mapping.values(), tag_list))
+
+ tags_mapping = _AnsibleTagsMapping((type(tag), tag) for tag in tag_list)
+ tagged_type = AnsibleTaggedObject._get_tagged_type(value_type)
+
+ return t.cast(_T, tagged_type._instance_factory(value, tags_mapping))
+
+ @staticmethod
+ def try_tag(value: _T, tags: t.Union[AnsibleDatatagBase, t.Iterable[AnsibleDatatagBase]]) -> _T:
+ """
+ Return a copy of `value`, with `tags` applied, overwriting any existing tags of the same types.
+ If `value` is not taggable or `tags` is empty, the original `value` will be returned.
+ """
+ try:
+ return AnsibleTagHelper.tag(value, tags)
+ except NotTaggableError:
+ return value
+
+
+class AnsibleSerializable:
+ __slots__ = _NO_INSTANCE_STORAGE
+
+ _known_type_map: t.ClassVar[t.Dict[str, t.Type['AnsibleSerializable']]] = {}
+ _TYPE_KEY: t.ClassVar[str] = '__ansible_type'
+
+ _type_key: t.ClassVar[str]
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ # this is needed to call __init__subclass__ on mixins for derived types
+ super().__init_subclass__(**kwargs)
+
+ cls._type_key = cls.__name__
+
+ # DTFIX-FUTURE: is there a better way to exclude non-abstract types which are base classes?
+ if not inspect.isabstract(cls) and not cls.__name__.endswith('Base') and cls.__name__ != 'AnsibleTaggedObject':
+ AnsibleSerializable._known_type_map[cls._type_key] = cls
+
+ @classmethod
+ @abc.abstractmethod
+ def _from_dict(cls: t.Type[_TAnsibleSerializable], d: t.Dict[str, t.Any]) -> object:
+ """Return an instance of this type, created from the given dictionary."""
+
+ @abc.abstractmethod
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ """
+ Return a serialized version of this instance as a dictionary.
+ This operation is *NOT* recursive - the returned dictionary may still include custom types.
+ It is the responsibility of the caller to handle recursion of the returned dict.
+ """
+
+ def _serialize(self) -> t.Dict[str, t.Any]:
+ value = self._as_dict()
+ value.update({AnsibleSerializable._TYPE_KEY: self._type_key})
+
+ return value
+
+ @staticmethod
+ def _deserialize(data: t.Dict[str, t.Any]) -> object:
+ """Deserialize an object from the supplied data dict, which will be mutated if it contains a type key."""
+ type_name = data.pop(AnsibleSerializable._TYPE_KEY, ...) # common usage assumes `data` is an intermediate dict provided by a deserializer
+
+ if type_name is ...:
+ return None
+
+ type_value = AnsibleSerializable._known_type_map.get(type_name)
+
+ if not type_value:
+ raise ValueError(f'An unknown {AnsibleSerializable._TYPE_KEY!r} value {type_name!r} was encountered during deserialization.')
+
+ return type_value._from_dict(data)
+
+ def _repr(self, name: str) -> str:
+ args = self._as_dict()
+ arg_string = ', '.join((f'{k}={v!r}' for k, v in args.items()))
+ return f'{name}({arg_string})'
+
+
+class AnsibleSerializableEnum(AnsibleSerializable, enum.Enum):
+ """Base class for serializable enumerations."""
+
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ return dict(value=self.value)
+
+ @classmethod
+ def _from_dict(cls, d: t.Dict[str, t.Any]) -> t.Self:
+ return cls(d['value'].lower())
+
+ def __str__(self) -> str:
+ return self.value
+
+ def __repr__(self) -> str:
+ return f'<{self.__class__.__name__}.{self.name}>'
+
+ @staticmethod
+ def _generate_next_value_(name, start, count, last_values):
+ return name.lower()
+
+
+class AnsibleSerializableWrapper(AnsibleSerializable, t.Generic[_T], metaclass=abc.ABCMeta):
+ __slots__ = ('_value',)
+
+ _wrapped_types: t.ClassVar[dict[type, type[AnsibleSerializable]]] = {}
+ _wrapped_type: t.ClassVar[type] = type(None)
+
+ def __init__(self, value: _T) -> None:
+ self._value: _T = value
+
+ def __init_subclass__(cls, **kwargs):
+ super().__init_subclass__(**kwargs)
+
+ cls._wrapped_type = t.get_args(cls.__orig_bases__[0])[0]
+ cls._wrapped_types[cls._wrapped_type] = cls
+
+
+class AnsibleSerializableDate(AnsibleSerializableWrapper[datetime.date]):
+ __slots__ = _NO_INSTANCE_STORAGE
+
+ @classmethod
+ def _from_dict(cls: t.Type[_TAnsibleSerializable], d: t.Dict[str, t.Any]) -> datetime.date:
+ return datetime.date.fromisoformat(d['iso8601'])
+
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ return dict(
+ iso8601=self._value.isoformat(),
+ )
+
+
+class AnsibleSerializableTime(AnsibleSerializableWrapper[datetime.time]):
+ __slots__ = _NO_INSTANCE_STORAGE
+
+ @classmethod
+ def _from_dict(cls: t.Type[_TAnsibleSerializable], d: t.Dict[str, t.Any]) -> datetime.time:
+ value = datetime.time.fromisoformat(d['iso8601'])
+ value.replace(fold=d['fold'])
+
+ return value
+
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ return dict(
+ iso8601=self._value.isoformat(),
+ fold=self._value.fold,
+ )
+
+
+class AnsibleSerializableDateTime(AnsibleSerializableWrapper[datetime.datetime]):
+ __slots__ = _NO_INSTANCE_STORAGE
+
+ @classmethod
+ def _from_dict(cls: t.Type[_TAnsibleSerializable], d: t.Dict[str, t.Any]) -> datetime.datetime:
+ value = datetime.datetime.fromisoformat(d['iso8601'])
+ value.replace(fold=d['fold'])
+
+ return value
+
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ return dict(
+ iso8601=self._value.isoformat(),
+ fold=self._value.fold,
+ )
+
+
+@dataclasses.dataclass(**_tag_dataclass_kwargs)
+class AnsibleSerializableDataclass(AnsibleSerializable, metaclass=abc.ABCMeta):
+ _validation_allow_subclasses = True
+ _validation_auto_enabled = True
+
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ # omit None values when None is the field default
+ # DTFIX-FUTURE: this implementation means we can never change the default on fields which have None for their default
+ # other defaults can be changed -- but there's no way to override this behavior either way for other default types
+ # it's a trip hazard to have the default logic here, rather than per field (or not at all)
+ # consider either removing the filtering or requiring it to be explicitly set per field using dataclass metadata
+ fields = ((field, getattr(self, field.name)) for field in dataclasses.fields(self))
+ return {field.name: value for field, value in fields if value is not None or field.default is not None}
+
+ @classmethod
+ def _from_dict(cls, d: t.Dict[str, t.Any]) -> t.Self:
+ # DTFIX-FUTURE: optimize this to avoid the dataclasses fields metadata and get_origin stuff at runtime
+ type_hints = t.get_type_hints(cls)
+ mutated_dict: dict[str, t.Any] | None = None
+
+ for field in dataclasses.fields(cls):
+ if t.get_origin(type_hints[field.name]) is tuple: # NOTE: only supports bare tuples, not optional or inside a union
+ if type(field_value := d.get(field.name)) is list: # pylint: disable=unidiomatic-typecheck
+ if mutated_dict is None:
+ mutated_dict = d.copy()
+
+ mutated_dict[field.name] = tuple(field_value)
+
+ return cls(**(mutated_dict or d))
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ super(AnsibleSerializableDataclass, cls).__init_subclass__(**kwargs) # cannot use super() without arguments when using slots
+
+ if cls._validation_auto_enabled:
+ try:
+ _dataclass_validation.inject_post_init_validation(cls, cls._validation_allow_subclasses) # code gen a real __post_init__ method
+ except Exception as ex:
+ raise Exception(f'Validation code generation failed on {cls}.') from ex
+
+
+class Tripwire:
+ """Marker mixin for types that should raise an error when encountered."""
+
+ __slots__ = _NO_INSTANCE_STORAGE
+
+ def trip(self) -> t.NoReturn:
+ """Derived types should implement a failure behavior."""
+ raise NotImplementedError()
+
+
+@dataclasses.dataclass(**_tag_dataclass_kwargs)
+class AnsibleDatatagBase(AnsibleSerializableDataclass, metaclass=abc.ABCMeta):
+ """
+ Base class for data tagging tag types.
+ New tag types need to be considered very carefully; e.g.: which serialization/runtime contexts they're allowed in, fallback behavior, propagation.
+ """
+
+ _validation_allow_subclasses = False
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ # NOTE: This method is called twice when the datatag type is a dataclass.
+ super(AnsibleDatatagBase, cls).__init_subclass__(**kwargs) # cannot use super() without arguments when using slots
+
+ # DTFIX-FUTURE: "freeze" this after module init has completed to discourage custom external tag subclasses
+
+ # DTFIX-FUTURE: is there a better way to exclude non-abstract types which are base classes?
+ if not inspect.isabstract(cls) and not cls.__name__.endswith('Base'):
+ existing = _known_tag_type_map.get(cls.__name__)
+
+ if existing:
+ # When the datatag type is a dataclass, the first instance will be the non-dataclass type.
+ # It must be removed from the known tag types before adding the dataclass version.
+ _known_tag_types.remove(existing)
+
+ _known_tag_type_map[cls.__name__] = cls
+ _known_tag_types.add(cls)
+
+ @classmethod
+ def is_tagged_on(cls, value: t.Any) -> bool:
+ return cls in _try_get_internal_tags_mapping(value)
+
+ @classmethod
+ def first_tagged_on(cls, *values: t.Any) -> t.Any | None:
+ """Return the first value which is tagged with this type, or None if no match is found."""
+ for value in values:
+ if cls.is_tagged_on(value):
+ return value
+
+ return None
+
+ @classmethod
+ def get_tag(cls, value: t.Any) -> t.Optional[t.Self]:
+ return _try_get_internal_tags_mapping(value).get(cls)
+
+ @classmethod
+ def get_required_tag(cls, value: t.Any) -> t.Self:
+ if (tag := cls.get_tag(value)) is None:
+ # DTFIX-FUTURE: we really should have a way to use AnsibleError with obj in module_utils when it's controller-side
+ raise ValueError(f'The type {type(value).__name__!r} is not tagged with {cls.__name__!r}.')
+
+ return tag
+
+ @classmethod
+ def untag(cls, value: _T) -> _T:
+ """
+ If this tag type is present on `value`, return a copy with that tag removed.
+ Otherwise, the original `value` is returned.
+ """
+ return AnsibleTagHelper.untag(value, cls)
+
+ def tag(self, value: _T) -> _T:
+ """
+ Return a copy of `value` with this tag applied, overwriting any existing tag of the same type.
+ If `value` is an ignored type, the original `value` will be returned.
+ If `value` is not taggable, a `NotTaggableError` exception will be raised.
+ """
+ return AnsibleTagHelper.tag(value, self)
+
+ def try_tag(self, value: _T) -> _T:
+ """
+ Return a copy of `value` with this tag applied, overwriting any existing tag of the same type.
+ If `value` is not taggable, the original `value` will be returned.
+ """
+ return AnsibleTagHelper.try_tag(value, self)
+
+ def _get_tag_to_propagate(self, src: t.Any, value: object, *, value_type: t.Optional[type] = None) -> t.Self | None:
+ """
+ Called by `AnsibleTagHelper.tag_copy` during tag propagation.
+ Returns an instance of this tag appropriate for propagation to `value`, or `None` if the tag should not be propagated.
+ Derived implementations may consult the arguments relayed from `tag_copy` to determine if and how the tag should be propagated.
+ """
+ return self
+
+ def __repr__(self) -> str:
+ return AnsibleSerializable._repr(self, self.__class__.__name__)
+
+
+# used by the datatag Ansible/Jinja test plugin to find tags by name
+_known_tag_type_map: t.Dict[str, t.Type[AnsibleDatatagBase]] = {}
+_known_tag_types: t.Set[t.Type[AnsibleDatatagBase]] = set()
+
+if sys.version_info >= (3, 9):
+ # Include the key and value types in the type hints on Python 3.9 and later.
+ # Earlier versions do not support subscriptable dict.
+ # deprecated: description='always use subscriptable dict' python_version='3.8'
+ class _AnsibleTagsMapping(dict[type[_TAnsibleDatatagBase], _TAnsibleDatatagBase]):
+ __slots__ = _NO_INSTANCE_STORAGE
+
+else:
+
+ class _AnsibleTagsMapping(dict):
+ __slots__ = _NO_INSTANCE_STORAGE
+
+
+class _EmptyROInternalTagsMapping(dict):
+ """
+ Optimizes empty tag mapping by using a shared singleton read-only dict.
+ Since mappingproxy is not pickle-able and causes other problems, we had to roll our own.
+ """
+
+ def __new__(cls):
+ try:
+ # noinspection PyUnresolvedReferences
+ return cls._instance
+ except AttributeError:
+ cls._instance = dict.__new__(cls)
+
+ # noinspection PyUnresolvedReferences
+ return cls._instance
+
+ def __setitem__(self, key, value):
+ raise NotImplementedError()
+
+ def setdefault(self, __key, __default=None):
+ raise NotImplementedError()
+
+ def update(self, __m, **kwargs):
+ raise NotImplementedError()
+
+
+_EMPTY_INTERNAL_TAGS_MAPPING = t.cast(_AnsibleTagsMapping, _EmptyROInternalTagsMapping())
+"""
+An empty read-only mapping of tags.
+Also used as a sentinel to cheaply determine that a type is not tagged by using a reference equality check.
+"""
+
+
+class CollectionWithMro(c.Collection, t.Protocol):
+ """Used to represent a Collection with __mro__ in a TypeGuard for tools that don't include __mro__ in Collection."""
+
+ __mro__: tuple[type, ...]
+
+
+def is_non_scalar_collection_type(value: type) -> t.TypeGuard[type[CollectionWithMro]]:
+ """Returns True if the value is a non-scalar collection type, otherwise returns False."""
+ return issubclass(value, c.Collection) and not issubclass(value, str) and not issubclass(value, bytes)
+
+
+def _try_get_internal_tags_mapping(value: t.Any) -> _AnsibleTagsMapping:
+ """Return the internal tag mapping of the given value, or a sentinel value if it is not tagged."""
+ # noinspection PyBroadException
+ try:
+ # noinspection PyProtectedMember
+ tags = value._ansible_tags_mapping
+ except Exception:
+ # try/except is a cheap way to determine if this is a tagged object without using isinstance
+ # handling Exception accounts for types that may raise something other than AttributeError
+ return _EMPTY_INTERNAL_TAGS_MAPPING
+
+ # handle cases where the instance always returns something, such as Marker or MagicMock
+ if type(tags) is not _AnsibleTagsMapping: # pylint: disable=unidiomatic-typecheck
+ return _EMPTY_INTERNAL_TAGS_MAPPING
+
+ return tags
+
+
+class NotTaggableError(TypeError):
+ def __init__(self, value):
+ super(NotTaggableError, self).__init__('{} is not taggable'.format(value))
+
+
+@dataclasses.dataclass(**_tag_dataclass_kwargs)
+class AnsibleSingletonTagBase(AnsibleDatatagBase):
+ def __new__(cls):
+ try:
+ # noinspection PyUnresolvedReferences
+ return cls._instance
+ except AttributeError:
+ cls._instance = AnsibleDatatagBase.__new__(cls)
+
+ # noinspection PyUnresolvedReferences
+ return cls._instance
+
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ return {}
+
+
+class AnsibleTaggedObject(AnsibleSerializable):
+ __slots__ = _NO_INSTANCE_STORAGE
+
+ _native_type: t.ClassVar[type]
+ _item_source: t.ClassVar[t.Optional[t.Callable]] = None
+
+ _tagged_type_map: t.ClassVar[t.Dict[type, t.Type['AnsibleTaggedObject']]] = {}
+ _tagged_collection_types: t.ClassVar[t.Set[t.Type[c.Collection]]] = set()
+ _collection_types: t.ClassVar[t.Set[t.Type[c.Collection]]] = set()
+
+ _empty_tags_as_native: t.ClassVar[bool] = True # by default, untag will revert to the native type when no tags remain
+ _subclasses_native_type: t.ClassVar[bool] = True # by default, tagged types are assumed to subclass the type they augment
+
+ _ansible_tags_mapping: _AnsibleTagsMapping | _EmptyROInternalTagsMapping = _EMPTY_INTERNAL_TAGS_MAPPING
+ """
+ Efficient internal storage of tags, indexed by tag type.
+ Contains no more than one instance of each tag type.
+ This is defined as a class attribute to support type hinting and documentation.
+ It is overwritten with an instance attribute during instance creation.
+ The instance attribute slot is provided by the derived type.
+ """
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ super().__init_subclass__(**kwargs)
+
+ try:
+ init_class = cls._init_class # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+ else:
+ init_class()
+
+ if not cls._subclasses_native_type:
+ return # NOTE: When not subclassing a native type, the derived type must set cls._native_type itself and cls._empty_tags_as_native to False.
+
+ try:
+ # Subclasses of tagged types will already have a native type set and won't need to detect it.
+ # Special types which do not subclass a native type can also have their native type already set.
+ # Automatic item source selection is only implemented for types that don't set _native_type.
+ cls._native_type
+ except AttributeError:
+ # Direct subclasses of native types won't have cls._native_type set, so detect the native type.
+ cls._native_type = cls.__bases__[0]
+
+ # Detect the item source if not already set.
+ if cls._item_source is None and is_non_scalar_collection_type(cls._native_type):
+ cls._item_source = cls._native_type.__iter__ # type: ignore[attr-defined]
+
+ # Use a collection specific factory for types with item sources.
+ if cls._item_source:
+ cls._instance_factory = cls._instance_factory_collection # type: ignore[method-assign]
+
+ new_type_direct_subclass = cls.__mro__[1]
+
+ conflicting_impl = AnsibleTaggedObject._tagged_type_map.get(new_type_direct_subclass)
+
+ if conflicting_impl:
+ raise TypeError(f'Cannot define type {cls.__name__!r} since {conflicting_impl.__name__!r} already extends {new_type_direct_subclass.__name__!r}.')
+
+ AnsibleTaggedObject._tagged_type_map[new_type_direct_subclass] = cls
+
+ if is_non_scalar_collection_type(cls):
+ AnsibleTaggedObject._tagged_collection_types.add(cls)
+ AnsibleTaggedObject._collection_types.update({cls, new_type_direct_subclass})
+
+ def _native_copy(self) -> t.Any:
+ """
+ Returns a copy of the current instance as its native Python type.
+ Any dynamic access behaviors that apply to this instance will be used during creation of the copy.
+ In the case of a container type, this is a shallow copy.
+ Recursive calls to native_copy are the responsibility of the caller.
+ """
+ return self._native_type(self) # pylint: disable=abstract-class-instantiated
+
+ @classmethod
+ def _instance_factory(cls, value: t.Any, tags_mapping: _AnsibleTagsMapping) -> t.Self:
+ # There's no way to indicate cls is callable with a single arg without defining a useless __init__.
+ instance = cls(value) # type: ignore[call-arg]
+ instance._ansible_tags_mapping = tags_mapping
+
+ return instance
+
+ @staticmethod
+ def _get_tagged_type(value_type: type) -> type[AnsibleTaggedObject]:
+ tagged_type: t.Optional[type[AnsibleTaggedObject]]
+
+ if issubclass(value_type, AnsibleTaggedObject):
+ tagged_type = value_type
+ else:
+ tagged_type = AnsibleTaggedObject._tagged_type_map.get(value_type)
+
+ if not tagged_type:
+ raise NotTaggableError(value_type)
+
+ return tagged_type
+
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ return dict(
+ value=self._native_copy(),
+ tags=list(self._ansible_tags_mapping.values()),
+ )
+
+ @classmethod
+ def _from_dict(cls: t.Type[_TAnsibleTaggedObject], d: t.Dict[str, t.Any]) -> _TAnsibleTaggedObject:
+ return AnsibleTagHelper.tag(**d)
+
+ @classmethod
+ def _instance_factory_collection(
+ cls,
+ value: t.Any,
+ tags_mapping: _AnsibleTagsMapping,
+ ) -> t.Self:
+ if type(value) in AnsibleTaggedObject._collection_types:
+ # use the underlying iterator to avoid access/iteration side effects (e.g. templating/wrapping on Lazy subclasses)
+ instance = cls(cls._item_source(value)) # type: ignore[call-arg,misc]
+ else:
+ # this is used when the value is a generator
+ instance = cls(value) # type: ignore[call-arg]
+
+ instance._ansible_tags_mapping = tags_mapping
+
+ return instance
+
+ def _copy_collection(self) -> AnsibleTaggedObject:
+ """
+ Return a shallow copy of this instance, which must be a collection.
+ This uses the underlying iterator to avoid access/iteration side effects (e.g. templating/wrapping on Lazy subclasses).
+ """
+ return AnsibleTagHelper.tag_copy(self, type(self)._item_source(self), value_type=type(self)) # type: ignore[misc]
+
+ @classmethod
+ def _new(cls, value: t.Any, *args, **kwargs) -> t.Self:
+ if type(value) is _AnsibleTagsMapping: # pylint: disable=unidiomatic-typecheck
+ self = cls._native_type.__new__(cls, *args, **kwargs)
+ self._ansible_tags_mapping = value
+ return self
+
+ return cls._native_type.__new__(cls, value, *args, **kwargs)
+
+ def _reduce(self, reduced: t.Union[str, tuple[t.Any, ...]]) -> tuple:
+ if type(reduced) is not tuple: # pylint: disable=unidiomatic-typecheck
+ raise TypeError()
+
+ updated: list[t.Any] = list(reduced)
+ updated[1] = (self._ansible_tags_mapping,) + updated[1]
+
+ return tuple(updated)
+
+
+class _AnsibleTaggedStr(str, AnsibleTaggedObject):
+ __slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
+
+
+class _AnsibleTaggedBytes(bytes, AnsibleTaggedObject):
+ # nonempty __slots__ not supported for subtype of 'bytes'
+ pass
+
+
+class _AnsibleTaggedInt(int, AnsibleTaggedObject):
+ # nonempty __slots__ not supported for subtype of 'int'
+ pass
+
+
+class _AnsibleTaggedFloat(float, AnsibleTaggedObject):
+ __slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
+
+
+class _AnsibleTaggedDateTime(datetime.datetime, AnsibleTaggedObject):
+ __slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
+
+ @classmethod
+ def _instance_factory(cls, value: datetime.datetime, tags_mapping: _AnsibleTagsMapping) -> _AnsibleTaggedDateTime:
+ instance = cls(
+ year=value.year,
+ month=value.month,
+ day=value.day,
+ hour=value.hour,
+ minute=value.minute,
+ second=value.second,
+ microsecond=value.microsecond,
+ tzinfo=value.tzinfo,
+ fold=value.fold,
+ )
+
+ instance._ansible_tags_mapping = tags_mapping
+
+ return instance
+
+ def _native_copy(self) -> datetime.datetime:
+ return datetime.datetime(
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond,
+ tzinfo=self.tzinfo,
+ fold=self.fold,
+ )
+
+ def __new__(cls, year, *args, **kwargs):
+ return super()._new(year, *args, **kwargs)
+
+ def __reduce_ex__(self, protocol: t.SupportsIndex) -> tuple:
+ return super()._reduce(super().__reduce_ex__(protocol))
+
+ def __repr__(self) -> str:
+ return self._native_copy().__repr__()
+
+
+class _AnsibleTaggedDate(datetime.date, AnsibleTaggedObject):
+ __slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
+
+ @classmethod
+ def _instance_factory(cls, value: datetime.date, tags_mapping: _AnsibleTagsMapping) -> _AnsibleTaggedDate:
+ instance = cls(
+ year=value.year,
+ month=value.month,
+ day=value.day,
+ )
+
+ instance._ansible_tags_mapping = tags_mapping
+
+ return instance
+
+ def _native_copy(self) -> datetime.date:
+ return datetime.date(
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ )
+
+ def __new__(cls, year, *args, **kwargs):
+ return super()._new(year, *args, **kwargs)
+
+ def __reduce__(self) -> tuple:
+ return super()._reduce(super().__reduce__())
+
+ def __repr__(self) -> str:
+ return self._native_copy().__repr__()
+
+
+class _AnsibleTaggedTime(datetime.time, AnsibleTaggedObject):
+ __slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
+
+ @classmethod
+ def _instance_factory(cls, value: datetime.time, tags_mapping: _AnsibleTagsMapping) -> _AnsibleTaggedTime:
+ instance = cls(
+ hour=value.hour,
+ minute=value.minute,
+ second=value.second,
+ microsecond=value.microsecond,
+ tzinfo=value.tzinfo,
+ fold=value.fold,
+ )
+
+ instance._ansible_tags_mapping = tags_mapping
+
+ return instance
+
+ def _native_copy(self) -> datetime.time:
+ return datetime.time(
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond,
+ tzinfo=self.tzinfo,
+ fold=self.fold,
+ )
+
+ def __new__(cls, hour, *args, **kwargs):
+ return super()._new(hour, *args, **kwargs)
+
+ def __reduce_ex__(self, protocol: t.SupportsIndex) -> tuple:
+ return super()._reduce(super().__reduce_ex__(protocol))
+
+ def __repr__(self) -> str:
+ return self._native_copy().__repr__()
+
+
+class _AnsibleTaggedDict(dict, AnsibleTaggedObject):
+ __slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
+
+ _item_source: t.ClassVar[t.Optional[t.Callable]] = dict.items
+
+ def __copy__(self):
+ return super()._copy_collection()
+
+ def copy(self) -> _AnsibleTaggedDict:
+ return copy.copy(self)
+
+ # NB: Tags are intentionally not preserved for operator methods that return a new instance. In-place operators ignore tags from the `other` instance.
+ # Propagation of tags in these cases is left to the caller, based on needs specific to their use case.
+
+
+class _AnsibleTaggedList(list, AnsibleTaggedObject):
+ __slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
+
+ def __copy__(self):
+ return super()._copy_collection()
+
+ def copy(self) -> _AnsibleTaggedList:
+ return copy.copy(self)
+
+ # NB: Tags are intentionally not preserved for operator methods that return a new instance. In-place operators ignore tags from the `other` instance.
+ # Propagation of tags in these cases is left to the caller, based on needs specific to their use case.
+
+
+class _AnsibleTaggedSet(set, AnsibleTaggedObject):
+ __slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
+
+ def __copy__(self):
+ return super()._copy_collection()
+
+ def copy(self):
+ return copy.copy(self)
+
+ def __init__(self, value=None, *args, **kwargs):
+ if type(value) is _AnsibleTagsMapping: # pylint: disable=unidiomatic-typecheck
+ super().__init__(*args, **kwargs)
+ else:
+ super().__init__(value, *args, **kwargs)
+
+ def __new__(cls, value=None, *args, **kwargs):
+ return super()._new(value, *args, **kwargs)
+
+ def __reduce_ex__(self, protocol: t.SupportsIndex) -> tuple:
+ return super()._reduce(super().__reduce_ex__(protocol))
+
+ def __str__(self) -> str:
+ return self._native_copy().__str__()
+
+ def __repr__(self) -> str:
+ return self._native_copy().__repr__()
+
+
+class _AnsibleTaggedTuple(tuple, AnsibleTaggedObject):
+ # nonempty __slots__ not supported for subtype of 'tuple'
+
+ def __copy__(self):
+ return super()._copy_collection()
+
+
+_untaggable_types = {type(None), bool}
+"""
+Attempts to apply tags to values of these types will be silently ignored.
+While we could proxy or subclass builtin singletons, they're idiomatically compared with "is" reference equality, which we can't customize.
+This set gets augmented with additional types when some controller-only types are imported.
+"""
+
+# noinspection PyProtectedMember
+_ANSIBLE_ALLOWED_VAR_TYPES = frozenset({type(None), bool}) | set(AnsibleTaggedObject._tagged_type_map) | set(AnsibleTaggedObject._tagged_type_map.values())
+"""These are the exact types supported by Ansible's variable storage."""
+
+_ANSIBLE_ALLOWED_NON_SCALAR_COLLECTION_VAR_TYPES = frozenset(item for item in _ANSIBLE_ALLOWED_VAR_TYPES if is_non_scalar_collection_type(item))
+"""These are the exact non-scalar collection types supported by Ansible's variable storage."""
+
+_ANSIBLE_ALLOWED_MAPPING_VAR_TYPES = frozenset(item for item in _ANSIBLE_ALLOWED_VAR_TYPES if issubclass(item, c.Mapping))
+"""These are the exact mapping types supported by Ansible's variable storage."""
+
+_ANSIBLE_ALLOWED_SCALAR_VAR_TYPES = _ANSIBLE_ALLOWED_VAR_TYPES - _ANSIBLE_ALLOWED_NON_SCALAR_COLLECTION_VAR_TYPES
+"""These are the exact scalar types supported by Ansible's variable storage."""
diff --git a/lib/ansible/module_utils/_internal/_datatag/_tags.py b/lib/ansible/module_utils/_internal/_datatag/_tags.py
new file mode 100644
index 00000000000..011aeed46c5
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_datatag/_tags.py
@@ -0,0 +1,16 @@
+from __future__ import annotations
+
+import dataclasses
+import typing as t
+
+from ansible.module_utils._internal import _datatag, _messages
+
+
+@dataclasses.dataclass(**_datatag._tag_dataclass_kwargs)
+class Deprecated(_datatag.AnsibleDatatagBase):
+ msg: str
+ help_text: t.Optional[str] = None
+ date: t.Optional[str] = None
+ version: t.Optional[str] = None
+ deprecator: t.Optional[_messages.PluginInfo] = None
+ formatted_traceback: t.Optional[str] = None
diff --git a/lib/ansible/module_utils/_internal/_debugging.py b/lib/ansible/module_utils/_internal/_debugging.py
new file mode 100644
index 00000000000..6fb390ccd62
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_debugging.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+import argparse
+import pathlib
+import sys
+
+
+def load_params() -> tuple[bytes, str]:
+ """Load module arguments and profile when debugging an Ansible module."""
+ parser = argparse.ArgumentParser(description="Directly invoke an Ansible module for debugging.")
+ parser.add_argument('args', nargs='?', help='module args JSON (file path or inline string)')
+ parser.add_argument('--profile', default='legacy', help='profile for JSON decoding/encoding of args/response')
+
+ parsed_args = parser.parse_args()
+
+ args: str | None = parsed_args.args
+ profile: str = parsed_args.profile
+
+ if args:
+ if (args_path := pathlib.Path(args)).is_file():
+ buffer = args_path.read_bytes()
+ else:
+ buffer = args.encode(errors='surrogateescape')
+ else:
+ if sys.stdin.isatty():
+ sys.stderr.write('Waiting for Ansible module JSON on STDIN...\n')
+ sys.stderr.flush()
+
+ buffer = sys.stdin.buffer.read()
+
+ return buffer, profile
diff --git a/lib/ansible/module_utils/_internal/_deprecator.py b/lib/ansible/module_utils/_internal/_deprecator.py
new file mode 100644
index 00000000000..69788bd26c4
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_deprecator.py
@@ -0,0 +1,157 @@
+from __future__ import annotations
+
+import re
+import pathlib
+import sys
+import typing as t
+
+from ansible.module_utils._internal import _stack, _messages, _validation, _plugin_info
+
+
+def deprecator_from_collection_name(collection_name: str | None) -> _messages.PluginInfo | None:
+ """Returns an instance with the special `collection` type to refer to a non-plugin or ambiguous caller within a collection."""
+ # CAUTION: This function is exposed in public API as ansible.module_utils.datatag.deprecator_from_collection_name.
+
+ if not collection_name:
+ return None
+
+ _validation.validate_collection_name(collection_name)
+
+ return _messages.PluginInfo(
+ resolved_name=collection_name,
+ type=None,
+ )
+
+
+def get_best_deprecator(*, deprecator: _messages.PluginInfo | None = None, collection_name: str | None = None) -> _messages.PluginInfo:
+ """Return the best-available `PluginInfo` for the caller of this method."""
+ _skip_stackwalk = True
+
+ if deprecator and collection_name:
+ raise ValueError('Specify only one of `deprecator` or `collection_name`.')
+
+ return deprecator or deprecator_from_collection_name(collection_name) or get_caller_plugin_info() or INDETERMINATE_DEPRECATOR
+
+
+def get_caller_plugin_info() -> _messages.PluginInfo | None:
+ """Try to get `PluginInfo` for the caller of this method, ignoring marked infrastructure stack frames."""
+ _skip_stackwalk = True
+
+ if frame_info := _stack.caller_frame():
+ return _path_as_plugininfo(frame_info.filename)
+
+ return None # pragma: nocover
+
+
+def _path_as_plugininfo(path: str) -> _messages.PluginInfo | None:
+ """Return a `PluginInfo` instance if the provided `path` refers to a plugin."""
+ return _path_as_core_plugininfo(path) or _path_as_collection_plugininfo(path)
+
+
+def _path_as_core_plugininfo(path: str) -> _messages.PluginInfo | None:
+ """Return a `PluginInfo` instance if the provided `path` refers to a core plugin."""
+ try:
+ relpath = str(pathlib.Path(path).relative_to(_ANSIBLE_MODULE_BASE_PATH))
+ except ValueError:
+ return None # not ansible-core
+
+ namespace = 'ansible.builtin'
+
+ if match := re.match(r'plugins/(?P\w+)/(?P\w+)', relpath):
+ plugin_name = match.group("plugin_name")
+ plugin_type = _plugin_info.normalize_plugin_type(match.group("plugin_type"))
+
+ if plugin_type not in _DEPRECATOR_PLUGIN_TYPES:
+ # The plugin type isn't a known deprecator type, so we have to assume the caller is intermediate code.
+ # We have no way of knowing if the intermediate code is deprecating its own feature, or acting on behalf of another plugin.
+ # Callers in this case need to identify the deprecating plugin name, otherwise only ansible-core will be reported.
+ # Reporting ansible-core is never wrong, it just may be missing an additional detail (plugin name) in the "on behalf of" case.
+ return ANSIBLE_CORE_DEPRECATOR
+
+ if plugin_name == '__init__':
+ # The plugin type is known, but the caller isn't a specific plugin -- instead, it's core plugin infrastructure (the base class).
+ return _messages.PluginInfo(resolved_name=namespace, type=plugin_type)
+ elif match := re.match(r'modules/(?P\w+)', relpath):
+ # AnsiballZ Python package for core modules
+ plugin_name = match.group("module_name")
+ plugin_type = _messages.PluginType.MODULE
+ elif match := re.match(r'legacy/(?P\w+)', relpath):
+ # AnsiballZ Python package for non-core library/role modules
+ namespace = 'ansible.legacy'
+
+ plugin_name = match.group("module_name")
+ plugin_type = _messages.PluginType.MODULE
+ else:
+ return ANSIBLE_CORE_DEPRECATOR # non-plugin core path, safe to use ansible-core for the same reason as the non-deprecator plugin type case above
+
+ name = f'{namespace}.{plugin_name}'
+
+ return _messages.PluginInfo(resolved_name=name, type=plugin_type)
+
+
+def _path_as_collection_plugininfo(path: str) -> _messages.PluginInfo | None:
+ """Return a `PluginInfo` instance if the provided `path` refers to a collection plugin."""
+ if not (match := re.search(r'/ansible_collections/(?P\w+)/(?P\w+)/plugins/(?P\w+)/(?P\w+)', path)):
+ return None
+
+ plugin_type = _plugin_info.normalize_plugin_type(match.group('plugin_type'))
+
+ if plugin_type in _AMBIGUOUS_DEPRECATOR_PLUGIN_TYPES:
+ # We're able to detect the namespace, collection and plugin type -- but we have no way to identify the plugin name currently.
+ # To keep things simple we'll fall back to just identifying the namespace and collection.
+ # In the future we could improve the detection and/or make it easier for a caller to identify the plugin name.
+ return deprecator_from_collection_name('.'.join((match.group('ns'), match.group('coll'))))
+
+ if plugin_type not in _DEPRECATOR_PLUGIN_TYPES:
+ # The plugin type isn't a known deprecator type, so we have to assume the caller is intermediate code.
+ # We have no way of knowing if the intermediate code is deprecating its own feature, or acting on behalf of another plugin.
+ # Callers in this case need to identify the deprecator to avoid ambiguity, since it could be the same collection or another collection.
+ return INDETERMINATE_DEPRECATOR
+
+ name = '.'.join((match.group('ns'), match.group('coll'), match.group('plugin_name')))
+
+ # DTFIX-FUTURE: deprecations from __init__ will be incorrectly attributed to a plugin of that name
+
+ return _messages.PluginInfo(resolved_name=name, type=plugin_type)
+
+
+_ANSIBLE_MODULE_BASE_PATH: t.Final = pathlib.Path(sys.modules['ansible'].__file__).parent
+"""Runtime-detected base path of the `ansible` Python package to distinguish between Ansible-owned and external code."""
+
+ANSIBLE_CORE_DEPRECATOR: t.Final = deprecator_from_collection_name('ansible.builtin')
+"""Singleton `PluginInfo` instance for ansible-core callers where the plugin can/should not be identified in messages."""
+
+INDETERMINATE_DEPRECATOR: t.Final = _messages.PluginInfo(resolved_name=None, type=None)
+"""Singleton `PluginInfo` instance for indeterminate deprecator."""
+
+_DEPRECATOR_PLUGIN_TYPES: t.Final = frozenset(
+ {
+ _messages.PluginType.ACTION,
+ _messages.PluginType.BECOME,
+ _messages.PluginType.CACHE,
+ _messages.PluginType.CALLBACK,
+ _messages.PluginType.CLICONF,
+ _messages.PluginType.CONNECTION,
+ # DOC_FRAGMENTS - no code execution
+ # FILTER - basename inadequate to identify plugin
+ _messages.PluginType.HTTPAPI,
+ _messages.PluginType.INVENTORY,
+ _messages.PluginType.LOOKUP,
+ _messages.PluginType.MODULE, # only for collections
+ _messages.PluginType.NETCONF,
+ _messages.PluginType.SHELL,
+ _messages.PluginType.STRATEGY,
+ _messages.PluginType.TERMINAL,
+ # TEST - basename inadequate to identify plugin
+ _messages.PluginType.VARS,
+ }
+)
+"""Plugin types which are valid for identifying a deprecator for deprecation purposes."""
+
+_AMBIGUOUS_DEPRECATOR_PLUGIN_TYPES: t.Final = frozenset(
+ {
+ _messages.PluginType.FILTER,
+ _messages.PluginType.TEST,
+ }
+)
+"""Plugin types for which basename cannot be used to identify the plugin name."""
diff --git a/lib/ansible/module_utils/_internal/_errors.py b/lib/ansible/module_utils/_internal/_errors.py
new file mode 100644
index 00000000000..48a5dcadde1
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_errors.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2024 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+"""Internal error handling logic for targets. Not for use on the controller."""
+
+from __future__ import annotations as _annotations
+
+import traceback as _sys_traceback
+import typing as _t
+
+from . import _messages
+
+MSG_REASON_DIRECT_CAUSE: _t.Final[str] = '<<< caused by >>>'
+MSG_REASON_HANDLING_CAUSE: _t.Final[str] = '<<< while handling >>>'
+
+TRACEBACK_REASON_EXCEPTION_DIRECT_WARNING: _t.Final[str] = 'The above exception was the direct cause of the following warning:'
+
+
+class EventFactory:
+ """Factory for creating `Event` instances from `BaseException` instances on targets."""
+
+ _MAX_DEPTH = 10
+ """Maximum exception chain depth. Exceptions beyond this depth will be omitted."""
+
+ @classmethod
+ def from_exception(cls, exception: BaseException, include_traceback: bool) -> _messages.Event:
+ return cls(include_traceback)._convert_exception(exception)
+
+ def __init__(self, include_traceback: bool) -> None:
+ self._include_traceback = include_traceback
+ self._depth = 0
+
+ def _convert_exception(self, exception: BaseException) -> _messages.Event:
+ if self._depth > self._MAX_DEPTH:
+ return _messages.Event(
+ msg="Maximum depth exceeded, omitting further events.",
+ )
+
+ self._depth += 1
+
+ try:
+ return _messages.Event(
+ msg=self._get_msg(exception),
+ formatted_traceback=self._get_formatted_traceback(exception),
+ formatted_source_context=self._get_formatted_source_context(exception),
+ help_text=self._get_help_text(exception),
+ chain=self._get_chain(exception),
+ events=self._get_events(exception),
+ )
+ finally:
+ self._depth -= 1
+
+ def _get_msg(self, exception: BaseException) -> str | None:
+ return str(exception).strip()
+
+ def _get_formatted_traceback(self, exception: BaseException) -> str | None:
+ if self._include_traceback:
+ return ''.join(_sys_traceback.format_exception(type(exception), exception, exception.__traceback__, chain=False))
+
+ return None
+
+ def _get_formatted_source_context(self, exception: BaseException) -> str | None:
+ return None
+
+ def _get_help_text(self, exception: BaseException) -> str | None:
+ return None
+
+ def _get_chain(self, exception: BaseException) -> _messages.EventChain | None:
+ if cause := self._get_cause(exception):
+ return _messages.EventChain(
+ msg_reason=MSG_REASON_DIRECT_CAUSE,
+ traceback_reason='The above exception was the direct cause of the following exception:',
+ event=self._convert_exception(cause),
+ follow=self._follow_cause(exception),
+ )
+
+ if context := self._get_context(exception):
+ return _messages.EventChain(
+ msg_reason=MSG_REASON_HANDLING_CAUSE,
+ traceback_reason='During handling of the above exception, another exception occurred:',
+ event=self._convert_exception(context),
+ follow=False,
+ )
+
+ return None
+
+ def _follow_cause(self, exception: BaseException) -> bool:
+ return True
+
+ def _get_cause(self, exception: BaseException) -> BaseException | None:
+ return exception.__cause__
+
+ def _get_context(self, exception: BaseException) -> BaseException | None:
+ if exception.__suppress_context__:
+ return None
+
+ return exception.__context__
+
+ def _get_events(self, exception: BaseException) -> tuple[_messages.Event, ...] | None:
+ # deprecated: description='move BaseExceptionGroup support here from ControllerEventFactory' python_version='3.10'
+ return None
diff --git a/lib/ansible/module_utils/_internal/_event_utils.py b/lib/ansible/module_utils/_internal/_event_utils.py
new file mode 100644
index 00000000000..fba88691872
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_event_utils.py
@@ -0,0 +1,61 @@
+from __future__ import annotations as _annotations
+
+import typing as _t
+
+from ansible.module_utils._internal import _text_utils, _messages
+
+
+def deduplicate_message_parts(message_parts: list[str]) -> str:
+ """Format the given list of messages into a brief message, while deduplicating repeated elements."""
+ message_parts = list(reversed(message_parts))
+
+ message = message_parts.pop(0)
+
+ for message_part in message_parts:
+ # avoid duplicate messages where the cause was already concatenated to the exception message
+ if message_part.endswith(message):
+ message = message_part
+ else:
+ message = _text_utils.concat_message(message_part, message)
+
+ return message
+
+
+def format_event_brief_message(event: _messages.Event) -> str:
+ """
+ Format an event into a brief message.
+ Help text, contextual information and sub-events will be omitted.
+ """
+ message_parts: list[str] = []
+
+ while True:
+ message_parts.append(event.msg)
+
+ if not event.chain or not event.chain.follow:
+ break
+
+ event = event.chain.event
+
+ return deduplicate_message_parts(message_parts)
+
+
+def deprecation_as_dict(deprecation: _messages.DeprecationSummary) -> _t.Dict[str, _t.Any]:
+ """Returns a dictionary representation of the deprecation object in the format exposed to playbooks."""
+ from ansible.module_utils._internal._deprecator import INDETERMINATE_DEPRECATOR # circular import from messages
+
+ if deprecation.deprecator and deprecation.deprecator != INDETERMINATE_DEPRECATOR:
+ collection_name = '.'.join(deprecation.deprecator.resolved_name.split('.')[:2])
+ else:
+ collection_name = None
+
+ result = dict(
+ msg=format_event_brief_message(deprecation.event),
+ collection_name=collection_name,
+ )
+
+ if deprecation.date:
+ result.update(date=deprecation.date)
+ else:
+ result.update(version=deprecation.version)
+
+ return result
diff --git a/lib/ansible/module_utils/_internal/_json/__init__.py b/lib/ansible/module_utils/_internal/_json/__init__.py
new file mode 100644
index 00000000000..d04c7a243e7
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/__init__.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+import importlib
+import importlib.util
+import types
+
+import typing as t
+
+from ansible.module_utils._internal._json._profiles import AnsibleProfileJSONEncoder, AnsibleProfileJSONDecoder, _JSONSerializationProfile
+from ansible.module_utils import _internal
+
+_T = t.TypeVar('_T', AnsibleProfileJSONEncoder, AnsibleProfileJSONDecoder)
+
+
+def get_encoder_decoder(profile: str | types.ModuleType, return_type: type[_T]) -> type[_T]:
+ class_name = 'Encoder' if return_type is AnsibleProfileJSONEncoder else 'Decoder'
+
+ return getattr(get_serialization_module(profile), class_name)
+
+
+def get_module_serialization_profile_name(name: str, controller_to_module: bool) -> str:
+ if controller_to_module:
+ name = f'module_{name}_c2m'
+ else:
+ name = f'module_{name}_m2c'
+
+ return name
+
+
+def get_module_serialization_profile_module_name(name: str, controller_to_module: bool) -> str:
+ return get_serialization_module_name(get_module_serialization_profile_name(name, controller_to_module))
+
+
+def get_serialization_profile(name: str | types.ModuleType) -> _JSONSerializationProfile:
+ return getattr(get_serialization_module(name), '_Profile')
+
+
+def get_serialization_module(name: str | types.ModuleType) -> types.ModuleType:
+ return importlib.import_module(get_serialization_module_name(name))
+
+
+def get_serialization_module_name(name: str | types.ModuleType) -> str:
+ if isinstance(name, str):
+ if '.' in name:
+ return name # name is already fully qualified
+
+ target_name = f'{__name__}._profiles._{name}'
+ elif isinstance(name, types.ModuleType):
+ return name.__name__
+ else:
+ raise TypeError(f'Name is {type(name)} instead of {str} or {types.ModuleType}.')
+
+ if importlib.util.find_spec(target_name):
+ return target_name
+
+ # the value of is_controller can change after import; always pick it up from the module
+ if _internal.is_controller:
+ controller_name = f'ansible._internal._json._profiles._{name}'
+
+ if importlib.util.find_spec(controller_name):
+ return controller_name
+
+ raise ValueError(f'Unknown profile name {name!r}.')
diff --git a/lib/ansible/module_utils/_internal/_json/_legacy_encoder.py b/lib/ansible/module_utils/_internal/_json/_legacy_encoder.py
new file mode 100644
index 00000000000..2e4e940c708
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/_legacy_encoder.py
@@ -0,0 +1,26 @@
+from __future__ import annotations
+
+from ansible.module_utils._internal._json import _profiles
+from ansible.module_utils._internal._json._profiles import _tagless
+
+
+class LegacyTargetJSONEncoder(_tagless.Encoder):
+ """Compatibility wrapper over `legacy` profile JSON encoder to support trust stripping and vault value plaintext conversion."""
+
+ def __init__(self, preprocess_unsafe: bool = False, vault_to_text: bool = False, _decode_bytes: bool = False, **kwargs) -> None:
+ self._decode_bytes = _decode_bytes
+
+ # NOTE: The preprocess_unsafe and vault_to_text arguments are features of LegacyControllerJSONEncoder.
+ # They are implemented here to allow callers to pass them without raising an error, but they have no effect.
+
+ super().__init__(**kwargs)
+
+ def default(self, o: object) -> object:
+ if self._decode_bytes:
+ if type(o) is _profiles._WrappedValue: # pylint: disable=unidiomatic-typecheck
+ o = o.wrapped
+
+ if isinstance(o, bytes):
+ return o.decode(errors='surrogateescape') # backward compatibility with `ansible.module_utils.basic.jsonify`
+
+ return super().default(o)
diff --git a/lib/ansible/module_utils/_internal/_json/_profiles/__init__.py b/lib/ansible/module_utils/_internal/_json/_profiles/__init__.py
new file mode 100644
index 00000000000..25163175b4c
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/_profiles/__init__.py
@@ -0,0 +1,428 @@
+from __future__ import annotations
+
+import datetime
+import functools
+import json
+import typing as t
+
+from ansible.module_utils import _internal
+from ansible.module_utils._internal import _messages
+from ansible.module_utils._internal._datatag import (
+ AnsibleSerializable,
+ AnsibleSerializableWrapper,
+ AnsibleTaggedObject,
+ Tripwire,
+ _AnsibleTaggedBytes,
+ _AnsibleTaggedDate,
+ _AnsibleTaggedDateTime,
+ _AnsibleTaggedDict,
+ _AnsibleTaggedFloat,
+ _AnsibleTaggedInt,
+ _AnsibleTaggedList,
+ _AnsibleTaggedSet,
+ _AnsibleTaggedStr,
+ _AnsibleTaggedTime,
+ _AnsibleTaggedTuple,
+ AnsibleTagHelper,
+ _tags,
+)
+
+# transformations to "final" JSON representations can only use:
+# str, float, int, bool, None, dict, list
+# NOT SUPPORTED: tuple, set -- the representation of these in JSON varies by profile (can raise an error, may be converted to list, etc.)
+# This means that any special handling required on JSON types that are not wrapped/tagged must be done in a pre-pass before serialization.
+# The final type map cannot contain any JSON types other than tuple or set.
+
+
+_NoneType: t.Final[type] = type(None)
+
+_json_subclassable_scalar_types: t.Final[tuple[type, ...]] = (str, float, int)
+"""Scalar types understood by JSONEncoder which can also be subclassed."""
+
+_json_scalar_types: t.Final[tuple[type, ...]] = (str, float, int, bool, _NoneType)
+"""Scalar types understood by JSONEncoder."""
+
+_json_container_types: t.Final[tuple[type, ...]] = (dict, list, tuple)
+"""Container types understood by JSONEncoder."""
+
+_json_types: t.Final[tuple[type, ...]] = _json_scalar_types + _json_container_types
+"""Types understood by JSONEncoder."""
+
+_intercept_containers = frozenset(
+ {
+ dict,
+ list,
+ tuple,
+ _AnsibleTaggedDict,
+ _AnsibleTaggedList,
+ _AnsibleTaggedTuple,
+ }
+)
+"""Container types to intercept in support of scalar interception."""
+
+_common_module_types: frozenset[type[AnsibleSerializable]] = frozenset(
+ {
+ _AnsibleTaggedBytes,
+ _AnsibleTaggedDate,
+ _AnsibleTaggedDateTime,
+ _AnsibleTaggedDict,
+ _AnsibleTaggedFloat,
+ _AnsibleTaggedInt,
+ _AnsibleTaggedList,
+ _AnsibleTaggedSet,
+ _AnsibleTaggedStr,
+ _AnsibleTaggedTime,
+ _AnsibleTaggedTuple,
+ }
+)
+"""
+Types that must be supported for all Ansible module serialization profiles.
+
+For module-to-controller, all types should support full fidelity serialization.
+This allows infrastructure and library code to use these features even when a module does not.
+
+For controller-to-module, type behavior is profile dependent.
+"""
+
+_common_module_response_types: frozenset[type[AnsibleSerializable]] = frozenset(
+ {
+ _messages.PluginInfo,
+ _messages.PluginType,
+ _messages.Event,
+ _messages.EventChain,
+ _messages.ErrorSummary,
+ _messages.WarningSummary,
+ _messages.DeprecationSummary,
+ _tags.Deprecated,
+ }
+)
+"""Types that must be supported for all Ansible module-to-controller serialization profiles."""
+
+_T_encoder = t.TypeVar('_T_encoder', bound="AnsibleProfileJSONEncoder")
+_T_decoder = t.TypeVar('_T_decoder', bound="AnsibleProfileJSONDecoder")
+
+
+class _JSONSerializationProfile(t.Generic[_T_encoder, _T_decoder]):
+ serialize_map: t.ClassVar[dict[type, t.Callable]]
+ """
+ Each concrete non-JSON type must be included in this mapping to support serialization.
+ Including a JSON type in the mapping allows for overriding or disabling of serialization of that type.
+ """
+
+ deserialize_map: t.ClassVar[dict[str, t.Callable]]
+ """A mapping of type keys to type dispatchers for deserialization."""
+
+ allowed_ansible_serializable_types: t.ClassVar[frozenset[type[AnsibleSerializable]]] = frozenset()
+ """Each concrete AnsibleSerialiable derived type must be included in this set to support serialization."""
+
+ _common_discard_tags: t.ClassVar[dict[type, t.Callable]]
+ """
+ Serialize map for tagged types to have their tags discarded.
+ This is generated by __init_subclass__ and should not be manually updated.
+ """
+
+ _allowed_type_keys: t.ClassVar[frozenset[str]]
+ """
+ The set of type keys allowed during deserialization.
+ This is generated by __init_subclass__ and should not be manually updated.
+ """
+
+ _unwrapped_json_types: t.ClassVar[frozenset[type]]
+ """
+ The set of types that do not need to be wrapped during serialization.
+ This is generated by __init_subclass__ and should not be manually updated.
+ """
+
+ profile_name: t.ClassVar[str]
+ """
+ The user-facing name of the profile, derived from the module name in which the profile resides.
+ Used to load the profile dynamically at runtime.
+ This is generated by __init_subclass__ and should not be manually updated.
+ """
+
+ encode_strings_as_utf8: t.ClassVar[bool] = False
+ r"""
+ When enabled, JSON encoding will result in UTF8 strings being emitted.
+ Otherwise, non-ASCII strings will be escaped with `\uXXXX` escape sequences.`
+ """
+
+ @classmethod
+ def pre_serialize(cls, encoder: _T_encoder, o: t.Any) -> t.Any:
+ return o
+
+ @classmethod
+ def post_deserialize(cls, decoder: _T_decoder, o: t.Any) -> t.Any:
+ return o
+
+ @classmethod
+ def cannot_serialize_error(cls, target: t.Any, /) -> t.NoReturn:
+ raise TypeError(f'Object of type {type(target).__name__!r} is not JSON serializable by the {cls.profile_name!r} profile.')
+
+ @classmethod
+ def cannot_deserialize_error(cls, target_type_name: str, /) -> t.NoReturn:
+ raise TypeError(f'Object of type {target_type_name!r} is not JSON deserializable by the {cls.profile_name!r} profile.')
+
+ @classmethod
+ def unsupported_target_type_error(cls, target_type_name: str, _value: dict) -> t.NoReturn:
+ cls.cannot_deserialize_error(target_type_name)
+
+ @classmethod
+ def discard_tags(cls, value: AnsibleTaggedObject) -> object:
+ return value._native_copy()
+
+ @classmethod
+ def deserialize_serializable(cls, value: dict[str, t.Any]) -> object:
+ type_key = value[AnsibleSerializable._TYPE_KEY]
+
+ if type_key not in cls._allowed_type_keys:
+ cls.cannot_deserialize_error(type_key)
+
+ return AnsibleSerializable._deserialize(value)
+
+ @classmethod
+ def serialize_as_list(cls, value: t.Iterable) -> list:
+ # DTFIX-FUTURE: once we have separate control/data channels for module-to-controller (and back), warn about this conversion
+ return AnsibleTagHelper.tag_copy(value, (item for item in value), value_type=list)
+
+ @classmethod
+ def serialize_as_isoformat(cls, value: datetime.date | datetime.time | datetime.datetime) -> str:
+ return value.isoformat()
+
+ @classmethod
+ def serialize_serializable_object(cls, value: AnsibleSerializable) -> t.Any:
+ return value._serialize()
+
+ @classmethod
+ def post_init(cls) -> None:
+ pass
+
+ @classmethod
+ def maybe_wrap(cls, o: t.Any) -> t.Any:
+ if type(o) in cls._unwrapped_json_types:
+ return o
+
+ return _WrappedValue(o)
+
+ @classmethod
+ def handle_key(cls, k: t.Any) -> t.Any:
+ """Validation/conversion hook before a dict key is serialized. The default implementation only accepts str-typed keys."""
+ # NOTE: Since JSON requires string keys, there is no support for preserving tags on dictionary keys during serialization.
+
+ if not isinstance(k, str): # DTFIX-FUTURE: optimize this to use all known str-derived types in type map / allowed types
+ raise TypeError(f'Key of type {type(k).__name__!r} is not JSON serializable by the {cls.profile_name!r} profile.')
+
+ return k
+
+ @classmethod
+ def _handle_key_str_fallback(cls, k: t.Any) -> t.Any:
+ """Legacy implementations should use this key handler for backward compatibility with stdlib JSON key conversion quirks."""
+ # DTFIX-FUTURE: optimized exact-type table lookup first
+
+ if isinstance(k, str):
+ return k
+
+ if k is None or isinstance(k, (int, float)):
+ return json.dumps(k)
+
+ raise TypeError(f'Key of type {type(k).__name__!r} is not JSON serializable by the {cls.profile_name!r} profile.')
+
+ @classmethod
+ def default(cls, o: t.Any) -> t.Any:
+ # Preserve the built-in JSON encoder support for subclasses of scalar types.
+
+ if isinstance(o, _json_subclassable_scalar_types):
+ return o
+
+ # Preserve the built-in JSON encoder support for subclasses of dict and list.
+ # Additionally, add universal support for mappings and sequences/sets by converting them to dict and list, respectively.
+
+ if _internal.is_intermediate_mapping(o):
+ return {cls.handle_key(k): cls.maybe_wrap(v) for k, v in o.items()}
+
+ if _internal.is_intermediate_iterable(o):
+ return [cls.maybe_wrap(v) for v in o]
+
+ return cls.last_chance(o)
+
+ @classmethod
+ def last_chance(cls, o: t.Any) -> t.Any:
+ if isinstance(o, Tripwire):
+ o.trip()
+
+ cls.cannot_serialize_error(o)
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ cls.deserialize_map = {}
+ cls._common_discard_tags = {obj: cls.discard_tags for obj in _common_module_types if issubclass(obj, AnsibleTaggedObject)}
+
+ cls.post_init()
+
+ cls.profile_name = cls.__module__.rsplit('.', maxsplit=1)[-1].lstrip('_')
+
+ wrapper_types = set(obj for obj in cls.serialize_map.values() if isinstance(obj, type) and issubclass(obj, AnsibleSerializableWrapper))
+
+ cls.allowed_ansible_serializable_types |= wrapper_types
+
+ # no current need to preserve tags on controller-only types or custom behavior for anything in `allowed_serializable_types`
+ cls.serialize_map.update({obj: cls.serialize_serializable_object for obj in cls.allowed_ansible_serializable_types})
+ cls.serialize_map.update({obj: func for obj, func in _internal.get_controller_serialize_map().items() if obj not in cls.serialize_map})
+
+ cls.deserialize_map[AnsibleSerializable._TYPE_KEY] = cls.deserialize_serializable # always recognize tagged types
+
+ cls._allowed_type_keys = frozenset(obj._type_key for obj in cls.allowed_ansible_serializable_types)
+
+ cls._unwrapped_json_types = frozenset(
+ {obj for obj in cls.serialize_map if not issubclass(obj, _json_types)} # custom types that do not extend JSON-native types
+ | {obj for obj in _json_scalar_types if obj not in cls.serialize_map} # JSON-native scalars lacking custom handling
+ )
+
+
+class _WrappedValue:
+ __slots__ = ('wrapped',)
+
+ def __init__(self, wrapped: t.Any) -> None:
+ self.wrapped = wrapped
+
+
+class AnsibleProfileJSONEncoder(json.JSONEncoder):
+ """Profile based JSON encoder capable of handling Ansible internal types."""
+
+ _wrap_container_types = (list, set, tuple, dict)
+ _profile: type[_JSONSerializationProfile]
+
+ profile_name: str
+
+ def __init__(self, **kwargs):
+ self._wrap_types = self._wrap_container_types + (AnsibleSerializable,)
+
+ if self._profile.encode_strings_as_utf8:
+ kwargs.update(ensure_ascii=False)
+
+ super().__init__(**kwargs)
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ cls.profile_name = cls._profile.profile_name
+
+ def encode(self, o):
+ o = self._profile.maybe_wrap(self._profile.pre_serialize(self, o))
+
+ return super().encode(o)
+
+ def default(self, o: t.Any) -> t.Any:
+ o_type = type(o)
+
+ if o_type is _WrappedValue: # pylint: disable=unidiomatic-typecheck
+ o = o.wrapped
+ o_type = type(o)
+
+ if mapped_callable := self._profile.serialize_map.get(o_type):
+ return self._profile.maybe_wrap(mapped_callable(o))
+
+ # This is our last chance to intercept the values in containers, so they must be wrapped here.
+ # Only containers natively understood by the built-in JSONEncoder are recognized, since any other container types must be present in serialize_map.
+
+ if o_type is dict: # pylint: disable=unidiomatic-typecheck
+ return {self._profile.handle_key(k): self._profile.maybe_wrap(v) for k, v in o.items()}
+
+ if o_type is list or o_type is tuple: # pylint: disable=unidiomatic-typecheck
+ return [self._profile.maybe_wrap(v) for v in o] # JSONEncoder converts tuple to a list, so just make it a list now
+
+ # Any value here is a type not explicitly handled by this encoder.
+ # The profile default handler is responsible for generating an error or converting the value to a supported type.
+
+ return self._profile.default(o)
+
+
+class AnsibleProfileJSONDecoder(json.JSONDecoder):
+ """Profile based JSON decoder capable of handling Ansible internal types."""
+
+ _profile: type[_JSONSerializationProfile]
+
+ profile_name: str
+
+ def __init__(self, **kwargs):
+ kwargs.update(object_hook=self.object_hook)
+
+ super().__init__(**kwargs)
+
+ def __init_subclass__(cls, **kwargs) -> None:
+ cls.profile_name = cls._profile.profile_name
+
+ def raw_decode(self, s: str, idx: int = 0) -> tuple[t.Any, int]:
+ obj, end = super().raw_decode(s, idx)
+
+ if _string_encoding_check_enabled():
+ try:
+ _recursively_check_string_encoding(obj)
+ except UnicodeEncodeError as ex:
+ raise _create_encoding_check_error() from ex
+
+ obj = self._profile.post_deserialize(self, obj)
+
+ return obj, end
+
+ def object_hook(self, pairs: dict[str, object]) -> object:
+ if _string_encoding_check_enabled():
+ try:
+ for key, value in pairs.items():
+ key.encode()
+ _recursively_check_string_encoding(value)
+ except UnicodeEncodeError as ex:
+ raise _create_encoding_check_error() from ex
+
+ for mapped_key, mapped_callable in self._profile.deserialize_map.items():
+ if mapped_key in pairs:
+ return mapped_callable(pairs)
+
+ return pairs
+
+
+_check_encoding_setting = 'MODULE_STRICT_UTF8_RESPONSE'
+r"""
+The setting to control whether strings are checked to verify they can be encoded as valid UTF8.
+This is currently only used during deserialization, to prevent string values from entering the controller which will later fail to be encoded as bytes.
+
+The encoding failure can occur when the string represents one of two kinds of values:
+1) It was created through decoding bytes with the `surrogateescape` error handler, and that handler is not being used when encoding.
+2) It represents an invalid UTF8 value, such as `"\ud8f3"` in a JSON payload. This cannot be encoded, even using the `surrogateescape` error handler.
+
+Although this becomes an error during deserialization, there are other opportunities for these values to become strings within Ansible.
+Future code changes should further restrict bytes to string conversions to eliminate use of `surrogateescape` where appropriate.
+Additional warnings at other boundaries may be needed to give users an opportunity to resolve the issues before they become errors.
+"""
+# DTFIX-FUTURE: add strict UTF8 string encoding checking to serialization profiles (to match the checks performed during deserialization)
+# DTFIX3: the surrogateescape note above isn't quite right, for encoding use surrogatepass, which does work
+# DTFIX-FUTURE: this config setting should probably be deprecated
+
+
+def _create_encoding_check_error() -> Exception:
+ """
+ Return an AnsibleError for use when a UTF8 string encoding check has failed.
+ These checks are only performed in the controller context, but since this is module_utils code, dynamic loading of the `errors` module is required.
+ """
+ errors = _internal.import_controller_module('ansible.errors') # bypass AnsiballZ import scanning
+
+ return errors.AnsibleRuntimeError(
+ message='Refusing to deserialize an invalid UTF8 string value.',
+ help_text=f'This check can be disabled with the `{_check_encoding_setting}` setting.',
+ )
+
+
+@functools.lru_cache
+def _string_encoding_check_enabled() -> bool:
+ """Return True if JSON deserialization should verify strings can be encoded as valid UTF8."""
+ if constants := _internal.import_controller_module('ansible.constants'): # bypass AnsiballZ import scanning
+ return constants.config.get_config_value(_check_encoding_setting) # covers all profile-based deserializers, not just modules
+
+ return False
+
+
+def _recursively_check_string_encoding(value: t.Any) -> None:
+ """Recursively check the given object to ensure all strings can be encoded as valid UTF8."""
+ value_type = type(value)
+
+ if value_type is str:
+ value.encode()
+ elif value_type is list: # dict is handled by the JSON deserializer
+ for item in value:
+ _recursively_check_string_encoding(item)
diff --git a/lib/ansible/module_utils/_internal/_json/_profiles/_fallback_to_str.py b/lib/ansible/module_utils/_internal/_json/_profiles/_fallback_to_str.py
new file mode 100644
index 00000000000..92b80ca0d31
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/_profiles/_fallback_to_str.py
@@ -0,0 +1,73 @@
+"""
+Lossy best-effort serialization for Ansible variables; used primarily for callback JSON display.
+Any type which is not supported by JSON will be converted to a string.
+The string representation of any type that is not native to JSON is subject to change and should not be considered stable.
+The decoder provides no special behavior.
+"""
+
+from __future__ import annotations as _annotations
+
+import datetime as _datetime
+import typing as _t
+
+from json import dumps as _dumps
+
+from ... import _datatag
+from .. import _profiles
+
+
+class _Profile(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
+ serialize_map: _t.ClassVar[dict[type, _t.Callable]]
+
+ @classmethod
+ def post_init(cls) -> None:
+ cls.serialize_map = {
+ bytes: cls.serialize_bytes_as_str,
+ set: cls.serialize_as_list,
+ tuple: cls.serialize_as_list,
+ _datetime.date: cls.serialize_as_isoformat,
+ _datetime.time: cls.serialize_as_isoformat,
+ _datetime.datetime: cls.serialize_as_isoformat,
+ _datatag._AnsibleTaggedDate: cls.discard_tags,
+ _datatag._AnsibleTaggedTime: cls.discard_tags,
+ _datatag._AnsibleTaggedDateTime: cls.discard_tags,
+ _datatag._AnsibleTaggedStr: cls.discard_tags,
+ _datatag._AnsibleTaggedInt: cls.discard_tags,
+ _datatag._AnsibleTaggedFloat: cls.discard_tags,
+ _datatag._AnsibleTaggedSet: cls.discard_tags,
+ _datatag._AnsibleTaggedList: cls.discard_tags,
+ _datatag._AnsibleTaggedTuple: cls.discard_tags,
+ _datatag._AnsibleTaggedDict: cls.discard_tags,
+ _datatag._AnsibleTaggedBytes: cls.discard_tags,
+ }
+
+ @classmethod
+ def serialize_bytes_as_str(cls, value: bytes) -> str:
+ return value.decode(errors='surrogateescape')
+
+ @classmethod
+ def handle_key(cls, k: _t.Any) -> _t.Any:
+ while mapped_callable := cls.serialize_map.get(type(k)):
+ k = mapped_callable(k)
+
+ k = cls.default(k)
+
+ if not isinstance(k, str):
+ k = _dumps(k, cls=Encoder)
+
+ return k
+
+ @classmethod
+ def last_chance(cls, o: _t.Any) -> _t.Any:
+ try:
+ return str(o)
+ except Exception as ex:
+ return str(ex)
+
+
+class Encoder(_profiles.AnsibleProfileJSONEncoder):
+ _profile = _Profile
+
+
+class Decoder(_profiles.AnsibleProfileJSONDecoder):
+ _profile = _Profile
diff --git a/lib/ansible/module_utils/_internal/_json/_profiles/_module_legacy_c2m.py b/lib/ansible/module_utils/_internal/_json/_profiles/_module_legacy_c2m.py
new file mode 100644
index 00000000000..3247d27a81c
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/_profiles/_module_legacy_c2m.py
@@ -0,0 +1,33 @@
+"""Legacy wire format for controller to module communication."""
+
+from __future__ import annotations as _annotations
+
+import datetime as _datetime
+
+from .. import _profiles
+
+
+class _Profile(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
+ @classmethod
+ def post_init(cls) -> None:
+ cls.serialize_map = {}
+ cls.serialize_map.update(cls._common_discard_tags)
+ cls.serialize_map.update(
+ {
+ set: cls.serialize_as_list, # legacy _json_encode_fallback behavior
+ tuple: cls.serialize_as_list, # JSONEncoder built-in behavior
+ _datetime.date: cls.serialize_as_isoformat,
+ _datetime.time: cls.serialize_as_isoformat, # always failed pre-2.18, so okay to include for consistency
+ _datetime.datetime: cls.serialize_as_isoformat,
+ }
+ )
+
+ cls.handle_key = cls._handle_key_str_fallback # type: ignore[method-assign] # legacy stdlib-compatible key behavior
+
+
+class Encoder(_profiles.AnsibleProfileJSONEncoder):
+ _profile = _Profile
+
+
+class Decoder(_profiles.AnsibleProfileJSONDecoder):
+ _profile = _Profile
diff --git a/lib/ansible/module_utils/_internal/_json/_profiles/_module_legacy_m2c.py b/lib/ansible/module_utils/_internal/_json/_profiles/_module_legacy_m2c.py
new file mode 100644
index 00000000000..3030f72ad8e
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/_profiles/_module_legacy_m2c.py
@@ -0,0 +1,37 @@
+"""Legacy wire format for module to controller communication."""
+
+from __future__ import annotations as _annotations
+
+import datetime as _datetime
+
+from .. import _profiles
+from ansible.module_utils.common.text.converters import to_text as _to_text
+
+
+class _Profile(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
+ @classmethod
+ def bytes_to_text(cls, value: bytes) -> str:
+ return _to_text(value, errors='surrogateescape')
+
+ @classmethod
+ def post_init(cls) -> None:
+ cls.allowed_ansible_serializable_types = _profiles._common_module_types | _profiles._common_module_response_types
+
+ cls.serialize_map = {
+ bytes: cls.bytes_to_text, # legacy behavior from jsonify and container_to_text
+ set: cls.serialize_as_list, # legacy _json_encode_fallback behavior
+ tuple: cls.serialize_as_list, # JSONEncoder built-in behavior
+ _datetime.date: cls.serialize_as_isoformat, # legacy parameters.py does this before serialization
+ _datetime.time: cls.serialize_as_isoformat, # always failed pre-2.18, so okay to include for consistency
+ _datetime.datetime: cls.serialize_as_isoformat, # legacy _json_encode_fallback behavior *and* legacy parameters.py does this before serialization
+ }
+
+ cls.handle_key = cls._handle_key_str_fallback # type: ignore[method-assign] # legacy stdlib-compatible key behavior
+
+
+class Encoder(_profiles.AnsibleProfileJSONEncoder):
+ _profile = _Profile
+
+
+class Decoder(_profiles.AnsibleProfileJSONDecoder):
+ _profile = _Profile
diff --git a/lib/ansible/module_utils/_internal/_json/_profiles/_module_modern_c2m.py b/lib/ansible/module_utils/_internal/_json/_profiles/_module_modern_c2m.py
new file mode 100644
index 00000000000..a1806b37c0b
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/_profiles/_module_modern_c2m.py
@@ -0,0 +1,35 @@
+"""Data tagging aware wire format for controller to module communication."""
+
+from __future__ import annotations as _annotations
+
+import datetime as _datetime
+
+from ... import _datatag
+from .. import _profiles
+
+
+class _Profile(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
+ encode_strings_as_utf8 = True
+
+ @classmethod
+ def post_init(cls) -> None:
+ cls.serialize_map = {}
+ cls.serialize_map.update(cls._common_discard_tags)
+ cls.serialize_map.update(
+ {
+ # The bytes type is not supported, use str instead (future module profiles may support a bytes wrapper distinct from `bytes`).
+ set: cls.serialize_as_list, # legacy _json_encode_fallback behavior
+ tuple: cls.serialize_as_list, # JSONEncoder built-in behavior
+ _datetime.date: _datatag.AnsibleSerializableDate,
+ _datetime.time: _datatag.AnsibleSerializableTime,
+ _datetime.datetime: _datatag.AnsibleSerializableDateTime,
+ }
+ )
+
+
+class Encoder(_profiles.AnsibleProfileJSONEncoder):
+ _profile = _Profile
+
+
+class Decoder(_profiles.AnsibleProfileJSONDecoder):
+ _profile = _Profile
diff --git a/lib/ansible/module_utils/_internal/_json/_profiles/_module_modern_m2c.py b/lib/ansible/module_utils/_internal/_json/_profiles/_module_modern_m2c.py
new file mode 100644
index 00000000000..a32d2c122b9
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/_profiles/_module_modern_m2c.py
@@ -0,0 +1,33 @@
+"""Data tagging aware wire format for module to controller communication."""
+
+from __future__ import annotations as _annotations
+
+import datetime as _datetime
+
+from ... import _datatag
+from .. import _profiles
+
+
+class _Profile(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
+ encode_strings_as_utf8 = True
+
+ @classmethod
+ def post_init(cls) -> None:
+ cls.allowed_ansible_serializable_types = _profiles._common_module_types | _profiles._common_module_response_types
+
+ cls.serialize_map = {
+ # The bytes type is not supported, use str instead (future module profiles may support a bytes wrapper distinct from `bytes`).
+ set: cls.serialize_as_list, # legacy _json_encode_fallback behavior
+ tuple: cls.serialize_as_list, # JSONEncoder built-in behavior
+ _datetime.date: _datatag.AnsibleSerializableDate,
+ _datetime.time: _datatag.AnsibleSerializableTime,
+ _datetime.datetime: _datatag.AnsibleSerializableDateTime,
+ }
+
+
+class Encoder(_profiles.AnsibleProfileJSONEncoder):
+ _profile = _Profile
+
+
+class Decoder(_profiles.AnsibleProfileJSONDecoder):
+ _profile = _Profile
diff --git a/lib/ansible/module_utils/_internal/_json/_profiles/_tagless.py b/lib/ansible/module_utils/_internal/_json/_profiles/_tagless.py
new file mode 100644
index 00000000000..29ff96c4173
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_json/_profiles/_tagless.py
@@ -0,0 +1,52 @@
+"""
+Lossy best-effort serialization for Ansible variables.
+Default profile for the `to_json` filter.
+Deserialization behavior is identical to JSONDecoder, except known Ansible custom serialization markers will raise an error.
+"""
+
+from __future__ import annotations as _annotations
+
+import datetime as _datetime
+import functools as _functools
+
+from ... import _datatag
+from .. import _profiles
+
+
+class _Profile(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
+ @classmethod
+ def post_init(cls) -> None:
+ cls.serialize_map = {
+ # DTFIX5: support serialization of every type that is supported in the Ansible variable type system
+ set: cls.serialize_as_list,
+ tuple: cls.serialize_as_list,
+ _datetime.date: cls.serialize_as_isoformat,
+ _datetime.time: cls.serialize_as_isoformat,
+ _datetime.datetime: cls.serialize_as_isoformat,
+ # bytes intentionally omitted as they are not a supported variable type, they were not originally supported by the old AnsibleJSONEncoder
+ _datatag._AnsibleTaggedDate: cls.discard_tags,
+ _datatag._AnsibleTaggedTime: cls.discard_tags,
+ _datatag._AnsibleTaggedDateTime: cls.discard_tags,
+ _datatag._AnsibleTaggedStr: cls.discard_tags,
+ _datatag._AnsibleTaggedInt: cls.discard_tags,
+ _datatag._AnsibleTaggedFloat: cls.discard_tags,
+ _datatag._AnsibleTaggedSet: cls.discard_tags,
+ _datatag._AnsibleTaggedList: cls.discard_tags,
+ _datatag._AnsibleTaggedTuple: cls.discard_tags,
+ _datatag._AnsibleTaggedDict: cls.discard_tags,
+ }
+
+ cls.deserialize_map = {
+ '__ansible_unsafe': _functools.partial(cls.unsupported_target_type_error, '__ansible_unsafe'),
+ '__ansible_vault': _functools.partial(cls.unsupported_target_type_error, '__ansible_vault'),
+ }
+
+ cls.handle_key = cls._handle_key_str_fallback # type: ignore[method-assign] # legacy stdlib-compatible key behavior
+
+
+class Encoder(_profiles.AnsibleProfileJSONEncoder):
+ _profile = _Profile
+
+
+class Decoder(_profiles.AnsibleProfileJSONDecoder):
+ _profile = _Profile
diff --git a/lib/ansible/module_utils/_internal/_messages.py b/lib/ansible/module_utils/_internal/_messages.py
new file mode 100644
index 00000000000..c03fc687e4b
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_messages.py
@@ -0,0 +1,130 @@
+"""
+Message contract definitions for various target-side types.
+
+These types and the wire format they implement are currently considered provisional and subject to change without notice.
+A future release will remove the provisional status.
+"""
+
+from __future__ import annotations as _annotations
+
+import dataclasses as _dataclasses
+import enum as _enum
+import sys as _sys
+import typing as _t
+
+from ansible.module_utils._internal import _datatag, _dataclass_validation
+
+if _sys.version_info >= (3, 10):
+ # Using slots for reduced memory usage and improved performance.
+ _dataclass_kwargs = dict(frozen=True, kw_only=True, slots=True)
+else:
+ # deprecated: description='always use dataclass slots and keyword-only args' python_version='3.9'
+ _dataclass_kwargs = dict(frozen=True)
+
+
+class PluginType(_datatag.AnsibleSerializableEnum):
+ """Enum of Ansible plugin types."""
+
+ ACTION = _enum.auto()
+ BECOME = _enum.auto()
+ CACHE = _enum.auto()
+ CALLBACK = _enum.auto()
+ CLICONF = _enum.auto()
+ CONNECTION = _enum.auto()
+ DOC_FRAGMENTS = _enum.auto()
+ FILTER = _enum.auto()
+ HTTPAPI = _enum.auto()
+ INVENTORY = _enum.auto()
+ LOOKUP = _enum.auto()
+ MODULE = _enum.auto()
+ NETCONF = _enum.auto()
+ SHELL = _enum.auto()
+ STRATEGY = _enum.auto()
+ TERMINAL = _enum.auto()
+ TEST = _enum.auto()
+ VARS = _enum.auto()
+
+
+@_dataclasses.dataclass(**_dataclass_kwargs)
+class PluginInfo(_datatag.AnsibleSerializableDataclass):
+ """Information about a loaded plugin."""
+
+ resolved_name: _t.Optional[str]
+ """The resolved canonical plugin name; always fully-qualified for collection plugins."""
+
+ type: _t.Optional[PluginType]
+ """The plugin type."""
+
+
+@_dataclasses.dataclass(**_dataclass_kwargs)
+class EventChain(_datatag.AnsibleSerializableDataclass):
+ """A chain used to link one event to another."""
+
+ _validation_auto_enabled = False
+
+ def __post_init__(self): ... # required for deferred dataclass validation
+
+ msg_reason: str
+ traceback_reason: str
+ event: Event
+ follow: bool = True
+
+
+@_dataclasses.dataclass(**_dataclass_kwargs)
+class Event(_datatag.AnsibleSerializableDataclass):
+ """Base class for an error/warning/deprecation event with optional chain (from an exception __cause__ chain) and an optional traceback."""
+
+ _validation_auto_enabled = False
+
+ def __post_init__(self): ... # required for deferred dataclass validation
+
+ msg: str
+ formatted_source_context: _t.Optional[str] = None
+ formatted_traceback: _t.Optional[str] = None
+ help_text: _t.Optional[str] = None
+ chain: _t.Optional[EventChain] = None
+ events: _t.Optional[_t.Tuple[Event, ...]] = None
+
+
+_dataclass_validation.inject_post_init_validation(EventChain, EventChain._validation_allow_subclasses)
+_dataclass_validation.inject_post_init_validation(Event, Event._validation_allow_subclasses)
+
+
+@_dataclasses.dataclass(**_dataclass_kwargs)
+class SummaryBase(_datatag.AnsibleSerializableDataclass):
+ """Base class for an error/warning/deprecation summary with details (possibly derived from an exception __cause__ chain) and an optional traceback."""
+
+ event: Event
+
+
+@_dataclasses.dataclass(**_dataclass_kwargs)
+class ErrorSummary(SummaryBase):
+ """Error summary with details (possibly derived from an exception __cause__ chain) and an optional traceback."""
+
+
+@_dataclasses.dataclass(**_dataclass_kwargs)
+class WarningSummary(SummaryBase):
+ """Warning summary with details (possibly derived from an exception __cause__ chain) and an optional traceback."""
+
+
+@_dataclasses.dataclass(**_dataclass_kwargs)
+class DeprecationSummary(WarningSummary):
+ """Deprecation summary with details (possibly derived from an exception __cause__ chain) and an optional traceback."""
+
+ deprecator: _t.Optional[PluginInfo] = None
+ """
+ The identifier for the content which is being deprecated.
+ """
+
+ date: _t.Optional[str] = None
+ """
+ The date after which a new release of `deprecator` will remove the feature described by `msg`.
+ Ignored if `deprecator` is not provided.
+ """
+
+ version: _t.Optional[str] = None
+ """
+ The version of `deprecator` which will remove the feature described by `msg`.
+ Ignored if `deprecator` is not provided.
+ Ignored if `date` is provided.
+ """
diff --git a/lib/ansible/module_utils/_internal/_patches/__init__.py b/lib/ansible/module_utils/_internal/_patches/__init__.py
new file mode 100644
index 00000000000..7e08b04bff3
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_patches/__init__.py
@@ -0,0 +1,66 @@
+"""Infrastructure for patching callables with alternative implementations as needed based on patch-specific test criteria."""
+
+from __future__ import annotations
+
+import abc
+import typing as t
+
+
+@t.runtime_checkable
+class PatchedTarget(t.Protocol):
+ """Runtime-checkable protocol that allows identification of a patched function via `isinstance`."""
+
+ unpatched_implementation: t.Callable
+
+
+class CallablePatch(abc.ABC):
+ """Base class for patches that provides abstractions for validation of broken behavior, installation of patches, and validation of fixed behavior."""
+
+ target_container: t.ClassVar
+ """The module object containing the function to be patched."""
+
+ target_attribute: t.ClassVar[str]
+ """The attribute name on the target module to patch."""
+
+ unpatched_implementation: t.ClassVar[t.Callable]
+ """The unpatched implementation. Available only after the patch has been applied."""
+
+ @classmethod
+ @abc.abstractmethod
+ def is_patch_needed(cls) -> bool:
+ """Returns True if the patch is currently needed. Returns False if the original target does not need the patch or the patch has already been applied."""
+
+ @abc.abstractmethod
+ def __call__(self, *args, **kwargs) -> t.Any:
+ """Invoke the patched or original implementation, depending on whether the patch has been applied or not."""
+
+ @classmethod
+ def is_patched(cls) -> bool:
+ """Returns True if the patch has been applied, otherwise returns False."""
+ return isinstance(cls.get_current_implementation(), PatchedTarget) # using a protocol lets us be more resilient to module unload weirdness
+
+ @classmethod
+ def get_current_implementation(cls) -> t.Any:
+ """Get the current (possibly patched) implementation from the patch target container."""
+ return getattr(cls.target_container, cls.target_attribute)
+
+ @classmethod
+ def patch(cls) -> None:
+ """Idempotently apply this patch (if needed)."""
+ if cls.is_patched():
+ return
+
+ cls.unpatched_implementation = cls.get_current_implementation()
+
+ if not cls.is_patch_needed():
+ return
+
+ # __call__ requires an instance (otherwise it'll be __new__)
+ setattr(cls.target_container, cls.target_attribute, cls())
+
+ if not cls.is_patch_needed():
+ return
+
+ setattr(cls.target_container, cls.target_attribute, cls.unpatched_implementation)
+
+ raise RuntimeError(f"Validation of '{cls.target_container.__name__}.{cls.target_attribute}' failed after patching.")
diff --git a/lib/ansible/module_utils/_internal/_patches/_dataclass_annotation_patch.py b/lib/ansible/module_utils/_internal/_patches/_dataclass_annotation_patch.py
new file mode 100644
index 00000000000..43da39abe82
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_patches/_dataclass_annotation_patch.py
@@ -0,0 +1,53 @@
+"""Patches for builtin `dataclasses` module."""
+
+from __future__ import annotations
+
+import dataclasses
+import sys
+import typing as t
+
+from . import CallablePatch
+
+# trigger the bug by exposing typing.ClassVar via a module reference that is not `typing`
+_ts = sys.modules[__name__]
+ClassVar = t.ClassVar
+
+
+class DataclassesIsTypePatch(CallablePatch):
+ """Patch broken ClassVar support in dataclasses when ClassVar is accessed via a module other than `typing`."""
+
+ target_container: t.ClassVar = dataclasses
+ target_attribute = '_is_type'
+
+ @classmethod
+ def is_patch_needed(cls) -> bool:
+ @dataclasses.dataclass
+ class CheckClassVar:
+ # this is the broken case requiring patching: ClassVar dot-referenced from a module that is not `typing` is treated as an instance field
+ # DTFIX-FUTURE: file/link CPython bug report, deprecate this patch if/when it's fixed in CPython
+ a_classvar: _ts.ClassVar[int] # type: ignore[name-defined]
+ a_field: int
+
+ return len(dataclasses.fields(CheckClassVar)) != 1
+
+ def __call__(self, annotation, cls, a_module, a_type, is_type_predicate) -> bool:
+ """
+ This is a patched copy of `_is_type` from dataclasses.py in Python 3.13.
+ It eliminates the redundant source module reference equality check for the ClassVar type that triggers the bug.
+ """
+ match = dataclasses._MODULE_IDENTIFIER_RE.match(annotation) # type: ignore[attr-defined]
+ if match:
+ ns = None
+ module_name = match.group(1)
+ if not module_name:
+ # No module name, assume the class's module did
+ # "from dataclasses import InitVar".
+ ns = sys.modules.get(cls.__module__).__dict__
+ else:
+ # Look up module_name in the class's module.
+ module = sys.modules.get(cls.__module__)
+ if module and module.__dict__.get(module_name): # this is the patched line; removed `is a_module`
+ ns = sys.modules.get(a_type.__module__).__dict__
+ if ns and is_type_predicate(ns.get(match.group(2)), a_module):
+ return True
+ return False
diff --git a/lib/ansible/module_utils/_internal/_patches/_socket_patch.py b/lib/ansible/module_utils/_internal/_patches/_socket_patch.py
new file mode 100644
index 00000000000..fd8c2b16f6d
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_patches/_socket_patch.py
@@ -0,0 +1,34 @@
+"""Patches for builtin socket module."""
+
+from __future__ import annotations
+
+import contextlib
+import socket
+import typing as t
+
+from . import CallablePatch
+
+
+class _CustomInt(int):
+ """Wrapper around `int` to test if subclasses are accepted."""
+
+
+class GetAddrInfoPatch(CallablePatch):
+ """Patch `socket.getaddrinfo` so that its `port` arg works with `int` subclasses."""
+
+ target_container: t.ClassVar = socket
+ target_attribute = 'getaddrinfo'
+
+ @classmethod
+ def is_patch_needed(cls) -> bool:
+ with contextlib.suppress(OSError):
+ socket.getaddrinfo('127.0.0.1', _CustomInt(22))
+ return False
+
+ return True
+
+ def __call__(self, host, port, *args, **kwargs) -> t.Any:
+ if type(port) is not int and isinstance(port, int): # pylint: disable=unidiomatic-typecheck
+ port = int(port)
+
+ return type(self).unpatched_implementation(host, port, *args, **kwargs)
diff --git a/lib/ansible/module_utils/_internal/_patches/_sys_intern_patch.py b/lib/ansible/module_utils/_internal/_patches/_sys_intern_patch.py
new file mode 100644
index 00000000000..1e785d608e2
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_patches/_sys_intern_patch.py
@@ -0,0 +1,34 @@
+"""Patches for the builtin `sys` module."""
+
+from __future__ import annotations
+
+import contextlib
+import sys
+import typing as t
+
+from . import CallablePatch
+
+
+class _CustomStr(str):
+ """Wrapper around `str` to test if subclasses are accepted."""
+
+
+class SysInternPatch(CallablePatch):
+ """Patch `sys.intern` so that subclasses of `str` are accepted."""
+
+ target_container: t.ClassVar = sys
+ target_attribute = 'intern'
+
+ @classmethod
+ def is_patch_needed(cls) -> bool:
+ with contextlib.suppress(TypeError):
+ sys.intern(_CustomStr("x"))
+ return False
+
+ return True
+
+ def __call__(self, value: str):
+ if type(value) is not str and isinstance(value, str): # pylint: disable=unidiomatic-typecheck
+ value = str(value)
+
+ return type(self).unpatched_implementation(value)
diff --git a/lib/ansible/module_utils/_internal/_plugin_info.py b/lib/ansible/module_utils/_internal/_plugin_info.py
new file mode 100644
index 00000000000..2efb636b7e1
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_plugin_info.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+import typing as t
+
+from . import _messages
+
+
+class HasPluginInfo(t.Protocol):
+ """Protocol to type-annotate and expose PluginLoader-set values."""
+
+ @property
+ def ansible_name(self) -> str | None:
+ """Fully resolved plugin name."""
+
+ @property
+ def plugin_type(self) -> str:
+ """Plugin type name."""
+
+
+def get_plugin_info(value: HasPluginInfo) -> _messages.PluginInfo:
+ """Utility method that returns a `PluginInfo` from an object implementing the `HasPluginInfo` protocol."""
+ return _messages.PluginInfo(
+ resolved_name=value.ansible_name,
+ type=normalize_plugin_type(value.plugin_type),
+ )
+
+
+def normalize_plugin_type(value: str) -> _messages.PluginType | None:
+ """Normalize value and return it as a PluginType, or None if the value does match any known plugin type."""
+ value = value.lower()
+
+ if value == 'modules':
+ value = 'module'
+
+ try:
+ return _messages.PluginType(value)
+ except ValueError:
+ return None
diff --git a/lib/ansible/module_utils/_internal/_stack.py b/lib/ansible/module_utils/_internal/_stack.py
new file mode 100644
index 00000000000..00ca51d820b
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_stack.py
@@ -0,0 +1,22 @@
+from __future__ import annotations as _annotations
+
+import inspect as _inspect
+import typing as _t
+
+
+def caller_frame() -> _inspect.FrameInfo | None:
+ """Return the caller stack frame, skipping any marked with the `_skip_stackwalk` local."""
+ _skip_stackwalk = True
+
+ return next(iter_stack(), None)
+
+
+def iter_stack() -> _t.Generator[_inspect.FrameInfo]:
+ """Iterate over stack frames, skipping any marked with the `_skip_stackwalk` local."""
+ _skip_stackwalk = True
+
+ for frame_info in _inspect.stack():
+ if '_skip_stackwalk' in frame_info.frame.f_locals:
+ continue
+
+ yield frame_info
diff --git a/lib/ansible/module_utils/_internal/_testing.py b/lib/ansible/module_utils/_internal/_testing.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/module_utils/_internal/_text_utils.py b/lib/ansible/module_utils/_internal/_text_utils.py
new file mode 100644
index 00000000000..aefad6bdbf6
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_text_utils.py
@@ -0,0 +1,6 @@
+from __future__ import annotations as _annotations
+
+
+def concat_message(left: str, right: str) -> str:
+ """Normalize `left` by removing trailing punctuation and spaces before appending new punctuation and `right`."""
+ return f'{left.rstrip(". ")}: {right}'
diff --git a/lib/ansible/module_utils/_internal/_traceback.py b/lib/ansible/module_utils/_internal/_traceback.py
new file mode 100644
index 00000000000..a5fdc45afe0
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_traceback.py
@@ -0,0 +1,92 @@
+# Copyright (c) 2024 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+"""Internal utility code for supporting traceback reporting."""
+
+from __future__ import annotations
+
+import enum
+import traceback
+
+from . import _stack
+
+
+class TracebackEvent(enum.Enum):
+ """The events for which tracebacks can be enabled."""
+
+ ERROR = enum.auto()
+ WARNING = enum.auto()
+ DEPRECATED = enum.auto()
+ DEPRECATED_VALUE = enum.auto() # implies DEPRECATED
+
+
+def traceback_for() -> list[str]:
+ """Return a list of traceback event names (not enums) which are enabled."""
+ return [value.name.lower() for value in TracebackEvent if is_traceback_enabled(value)]
+
+
+def is_traceback_enabled(event: TracebackEvent) -> bool:
+ """Return True if tracebacks are enabled for the specified event, otherwise return False."""
+ return _is_traceback_enabled(event)
+
+
+def maybe_capture_traceback(msg: str, event: TracebackEvent) -> str | None:
+ """
+ Optionally capture a traceback for the current call stack, formatted as a string, if the specified traceback event is enabled.
+ Frames marked with the `_skip_stackwalk` local are omitted.
+ """
+ _skip_stackwalk = True
+
+ if not is_traceback_enabled(event):
+ return None
+
+ tb_lines = []
+
+ if frame_info := _stack.caller_frame():
+ # DTFIX-FUTURE: rewrite target-side tracebacks to point at controller-side paths?
+ tb_lines.append('Traceback (most recent call last):\n')
+ tb_lines.extend(traceback.format_stack(frame_info.frame))
+ tb_lines.append(f'Message: {msg}\n')
+ else:
+ tb_lines.append('(frame not found)\n') # pragma: nocover
+
+ return ''.join(tb_lines)
+
+
+def maybe_extract_traceback(exception: BaseException, event: TracebackEvent) -> str | None:
+ """Optionally extract a formatted traceback from the given exception, if the specified traceback event is enabled."""
+
+ if not is_traceback_enabled(event):
+ return None
+
+ # deprecated: description='use the single-arg version of format_traceback' python_version='3.9'
+ tb_lines = traceback.format_exception(type(exception), exception, exception.__traceback__)
+
+ return ''.join(tb_lines)
+
+
+_module_tracebacks_enabled_events: frozenset[TracebackEvent] | None = None
+"""Cached enabled TracebackEvent values extracted from `_ansible_tracebacks_for` module arg."""
+
+
+def _is_module_traceback_enabled(event: TracebackEvent) -> bool:
+ """Module utility function to lazily load traceback config and determine if traceback collection is enabled for the specified event."""
+ global _module_tracebacks_enabled_events
+
+ if _module_tracebacks_enabled_events is None:
+ try:
+ # Suboptimal error handling, but since import order can matter, and this is a critical error path, better to fail silently
+ # than to mask the triggering error by issuing a new error/warning here.
+ from ..basic import _PARSED_MODULE_ARGS
+
+ _module_tracebacks_enabled_events = frozenset(
+ TracebackEvent[value.upper()] for value in _PARSED_MODULE_ARGS.get('_ansible_tracebacks_for', [])
+ ) # type: ignore[union-attr]
+ except BaseException:
+ return True # if things failed early enough that we can't figure this out, assume we want a traceback for troubleshooting
+
+ return event in _module_tracebacks_enabled_events
+
+
+_is_traceback_enabled = _is_module_traceback_enabled
+"""Callable to determine if tracebacks are enabled. Overridden on the controller by display. Use `is_traceback_enabled` instead of calling this directly."""
diff --git a/lib/ansible/module_utils/_internal/_validation.py b/lib/ansible/module_utils/_internal/_validation.py
new file mode 100644
index 00000000000..d6f65052fb1
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_validation.py
@@ -0,0 +1,14 @@
+from __future__ import annotations
+
+import keyword
+
+
+def validate_collection_name(collection_name: object, name: str = 'collection_name') -> None:
+ """Validate a collection name."""
+ if not isinstance(collection_name, str):
+ raise TypeError(f"{name} must be {str} instead of {type(collection_name)}")
+
+ parts = collection_name.split('.')
+
+ if len(parts) != 2 or not all(part.isidentifier() and not keyword.iskeyword(part) for part in parts):
+ raise ValueError(f"{name} must consist of two non-keyword identifiers separated by '.'")
diff --git a/lib/ansible/module_utils/api.py b/lib/ansible/module_utils/api.py
index 2415c38a839..f8023824ee3 100644
--- a/lib/ansible/module_utils/api.py
+++ b/lib/ansible/module_utils/api.py
@@ -31,8 +31,7 @@ import itertools
import secrets
import sys
import time
-
-import ansible.module_utils.compat.typing as t
+import typing as t
def rate_limit_argument_spec(spec=None):
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index d3420c0980c..b6104396ded 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -4,6 +4,7 @@
from __future__ import annotations
+import copy
import json
import sys
import typing as t
@@ -25,6 +26,7 @@ if sys.version_info < _PY_MIN:
import __main__
import atexit
+import dataclasses as _dataclasses
import errno
import grp
import fcntl
@@ -51,6 +53,8 @@ try:
except ImportError:
HAS_SYSLOG = False
+_UNSET = t.cast(t.Any, object())
+
try:
from systemd import journal, daemon as systemd_daemon
# Makes sure that systemd.journal has method sendv()
@@ -71,8 +75,12 @@ except ImportError:
# Python2 & 3 way to get NoneType
NoneType = type(None)
-from ._text import to_native, to_bytes, to_text
-from ansible.module_utils.common.text.converters import (
+from ._internal import _traceback, _errors, _debugging, _deprecator, _messages
+
+from .common.text.converters import (
+ to_native,
+ to_bytes,
+ to_text,
jsonify,
container_to_bytes as json_dict_unicode_to_bytes,
container_to_text as json_dict_bytes_to_unicode,
@@ -87,6 +95,8 @@ from ansible.module_utils.common.text.formatters import (
SIZE_RANGES,
)
+from ansible.module_utils.common import json as _common_json
+
import hashlib
@@ -111,6 +121,8 @@ def _get_available_hash_algorithms():
AVAILABLE_HASH_ALGORITHMS = _get_available_hash_algorithms()
+from ansible.module_utils.common import json as _json
+
from ansible.module_utils.six.moves.collections_abc import (
KeysView,
Mapping, MutableMapping,
@@ -152,8 +164,9 @@ from ansible.module_utils.common._utils import get_all_subclasses as _get_all_su
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
from ansible.module_utils.common.warnings import (
deprecate,
- get_deprecation_messages,
- get_warning_messages,
+ error_as_warning,
+ get_deprecations,
+ get_warnings,
warn,
)
@@ -169,7 +182,9 @@ imap = map
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
-_ANSIBLE_ARGS = None
+_ANSIBLE_ARGS: bytes | None = None
+_ANSIBLE_PROFILE: str | None = None
+_PARSED_MODULE_ARGS: dict[str, t.Any] | None = None
FILE_COMMON_ARGUMENTS = dict(
@@ -307,40 +322,31 @@ def _load_params():
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
"""
- global _ANSIBLE_ARGS
- if _ANSIBLE_ARGS is not None:
- buffer = _ANSIBLE_ARGS
- else:
- # debug overrides to read args from file or cmdline
+ global _ANSIBLE_ARGS, _ANSIBLE_PROFILE
- # Avoid tracebacks when locale is non-utf8
- # We control the args and we pass them as utf8
- if len(sys.argv) > 1:
- if os.path.isfile(sys.argv[1]):
- with open(sys.argv[1], 'rb') as fd:
- buffer = fd.read()
- else:
- buffer = sys.argv[1].encode('utf-8', errors='surrogateescape')
- # default case, read from stdin
- else:
- buffer = sys.stdin.buffer.read()
- _ANSIBLE_ARGS = buffer
+ if _ANSIBLE_ARGS is None:
+ _ANSIBLE_ARGS, _ANSIBLE_PROFILE = _debugging.load_params()
- try:
- params = json.loads(buffer.decode('utf-8'))
- except ValueError:
- # This helper is used too early for fail_json to work.
- print('\n{"msg": "Error: Module unable to decode stdin/parameters as valid JSON. Unable to parse what parameters were passed", "failed": true}')
- sys.exit(1)
+ buffer = _ANSIBLE_ARGS
+ profile = _ANSIBLE_PROFILE
+
+ if not profile:
+ raise Exception("No serialization profile was specified.")
try:
- return params['ANSIBLE_MODULE_ARGS']
- except KeyError:
- # This helper does not have access to fail_json so we have to print
- # json output on our own.
- print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in JSON data from stdin. Unable to figure out what parameters were passed", '
- '"failed": true}')
- sys.exit(1)
+ decoder = _json.get_module_decoder(profile, _json.Direction.CONTROLLER_TO_MODULE)
+ params = json.loads(buffer.decode(), cls=decoder)
+ except Exception as ex:
+ raise Exception("Failed to decode JSON module parameters.") from ex
+
+ if (ansible_module_args := params.get('ANSIBLE_MODULE_ARGS', _UNSET)) is _UNSET:
+ raise Exception("ANSIBLE_MODULE_ARGS not provided.")
+
+ global _PARSED_MODULE_ARGS
+
+ _PARSED_MODULE_ARGS = copy.deepcopy(ansible_module_args) # AnsibleModule mutates the returned dict, so a copy is needed
+
+ return ansible_module_args
def missing_required_lib(library, reason=None, url=None):
@@ -394,7 +400,6 @@ class AnsibleModule(object):
# run_command invocation
self.run_command_environ_update = {}
self._clean = {}
- self._string_conversion_action = ''
self.aliases = {}
self._legal_inputs = []
@@ -475,9 +480,11 @@ class AnsibleModule(object):
if basedir is not None and not os.path.exists(basedir):
try:
os.makedirs(basedir, mode=0o700)
- except (OSError, IOError) as e:
- self.warn("Unable to use %s as temporary directory, "
- "failing back to system: %s" % (basedir, to_native(e)))
+ except OSError as ex:
+ self.error_as_warning(
+ msg=f"Unable to use {basedir!r} as temporary directory, falling back to system default.",
+ exception=ex,
+ )
basedir = None
else:
self.warn("Module remote_tmp %s did not exist and was "
@@ -489,31 +496,71 @@ class AnsibleModule(object):
basefile = "ansible-moduletmp-%s-" % time.time()
try:
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
- except (OSError, IOError) as e:
- self.fail_json(
- msg="Failed to create remote module tmp path at dir %s "
- "with prefix %s: %s" % (basedir, basefile, to_native(e))
- )
+ except OSError as ex:
+ raise Exception(
+ f"Failed to create remote module tmp path at dir {basedir!r} "
+ f"with prefix {basefile!r}.",
+ ) from ex
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
- def warn(self, warning):
- warn(warning)
- self.log('[WARNING] %s' % warning)
-
- def deprecate(self, msg, version=None, date=None, collection_name=None):
- if version is not None and date is not None:
- raise AssertionError("implementation error -- version and date must not both be set")
- deprecate(msg, version=version, date=date, collection_name=collection_name)
- # For compatibility, we accept that neither version nor date is set,
- # and treat that the same as if version would not have been set
- if date is not None:
- self.log('[DEPRECATION WARNING] %s %s' % (msg, date))
- else:
- self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
+ def warn(
+ self,
+ warning: str,
+ *,
+ help_text: str | None = None,
+ ) -> None:
+ _skip_stackwalk = True
+
+ warn(
+ warning=warning,
+ help_text=help_text,
+ )
+
+ def error_as_warning(
+ self,
+ msg: str | None,
+ exception: BaseException,
+ *,
+ help_text: str | None = None,
+ ) -> None:
+ """Display an exception as a warning."""
+ _skip_stackwalk = True
+
+ error_as_warning(
+ msg=msg,
+ exception=exception,
+ help_text=help_text,
+ )
+
+ def deprecate(
+ self,
+ msg: str,
+ version: str | None = None,
+ date: str | None = None,
+ collection_name: str | None = None,
+ *,
+ deprecator: _messages.PluginInfo | None = None,
+ help_text: str | None = None,
+ ) -> None:
+ """
+ Record a deprecation warning to be returned with the module result.
+ Most callers do not need to provide `collection_name` or `deprecator` -- but provide only one if needed.
+ Specify `version` or `date`, but not both.
+ If `date` is a string, it must be in the form `YYYY-MM-DD`.
+ """
+ _skip_stackwalk = True
+
+ deprecate( # pylint: disable=ansible-deprecated-date-not-permitted,ansible-deprecated-unnecessary-collection-name
+ msg=msg,
+ version=version,
+ date=date,
+ deprecator=_deprecator.get_best_deprecator(deprecator=deprecator, collection_name=collection_name),
+ help_text=help_text,
+ )
def load_file_common_arguments(self, params, path=None):
"""
@@ -613,11 +660,8 @@ class AnsibleModule(object):
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
- except OSError as e:
- if e.errno == errno.ENOENT:
- self.fail_json(path=path, msg='path %s does not exist' % path)
- else:
- self.fail_json(path=path, msg='failed to retrieve selinux context')
+ except OSError as ex:
+ self.fail_json(path=path, msg='Failed to retrieve selinux context.', exception=ex)
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
@@ -757,9 +801,9 @@ class AnsibleModule(object):
return True
try:
os.lchown(b_path, uid, -1)
- except (IOError, OSError) as e:
+ except OSError as ex:
path = to_text(b_path)
- self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
+ self.fail_json(path=path, msg='chown failed', exception=ex)
changed = True
return changed
@@ -879,8 +923,7 @@ class AnsibleModule(object):
raise
except Exception as e:
path = to_text(b_path)
- self.fail_json(path=path, msg='chmod failed', details=to_native(e),
- exception=traceback.format_exc())
+ self.fail_json(path=path, msg='chmod failed', details=to_native(e))
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
@@ -928,8 +971,7 @@ class AnsibleModule(object):
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
- self.fail_json(path=to_text(b_path), msg='chattr failed',
- details=to_native(e), exception=traceback.format_exc())
+ self.fail_json(path=to_text(b_path), msg='chattr failed', details=to_native(e))
return changed
def get_file_attributes(self, path, include_version=True):
@@ -1174,8 +1216,7 @@ class AnsibleModule(object):
os.environ['LC_ALL'] = best_locale
os.environ['LC_MESSAGES'] = best_locale
except Exception as e:
- self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
- to_native(e), exception=traceback.format_exc())
+ self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % to_native(e))
def _set_internal_properties(self, argument_spec=None, module_parameters=None):
if argument_spec is None:
@@ -1225,7 +1266,6 @@ class AnsibleModule(object):
msg='Failed to log to syslog (%s). To proceed anyway, '
'disable syslog logging by setting no_target_syslog '
'to True in your Ansible config.' % to_native(e),
- exception=traceback.format_exc(),
msg_to_log=msg,
)
@@ -1289,7 +1329,7 @@ class AnsibleModule(object):
else:
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
**dict(journal_args))
- except IOError:
+ except OSError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(journal_msg)
else:
@@ -1379,8 +1419,15 @@ class AnsibleModule(object):
self.fail_json(msg=to_native(e))
def jsonify(self, data):
+ # deprecated: description='deprecate AnsibleModule.jsonify()' core_version='2.23'
+ # deprecate(
+ # msg="The `AnsibleModule.jsonify' method is deprecated.",
+ # version="2.27",
+ # # help_text="", # DTFIX-FUTURE: fill in this help text
+ # )
+
try:
- return jsonify(data)
+ return json.dumps(data, cls=_common_json._get_legacy_encoder())
except UnicodeError as e:
self.fail_json(msg=to_text(e))
@@ -1396,6 +1443,7 @@ class AnsibleModule(object):
self.cleanup(path)
def _return_formatted(self, kwargs):
+ _skip_stackwalk = True
self.add_path_info(kwargs)
@@ -1403,30 +1451,58 @@ class AnsibleModule(object):
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
+ self.deprecate( # pylint: disable=ansible-deprecated-unnecessary-collection-name
+ msg='Passing `warnings` to `exit_json` or `fail_json` is deprecated.',
+ version='2.23',
+ help_text='Use `AnsibleModule.warn` instead.',
+ deprecator=_deprecator.ANSIBLE_CORE_DEPRECATOR,
+ )
+
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
- warnings = get_warning_messages()
+ warnings = get_warnings()
if warnings:
kwargs['warnings'] = warnings
if 'deprecations' in kwargs:
+ self.deprecate( # pylint: disable=ansible-deprecated-unnecessary-collection-name
+ msg='Passing `deprecations` to `exit_json` or `fail_json` is deprecated.',
+ version='2.23',
+ help_text='Use `AnsibleModule.deprecate` instead.',
+ deprecator=_deprecator.ANSIBLE_CORE_DEPRECATOR,
+ )
+
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
- if isinstance(d, SEQUENCETYPE) and len(d) == 2:
- self.deprecate(d[0], version=d[1])
+ if isinstance(d, (KeysView, Sequence)) and len(d) == 2:
+ self.deprecate( # pylint: disable=ansible-deprecated-unnecessary-collection-name,ansible-invalid-deprecated-version
+ msg=d[0],
+ version=d[1],
+ deprecator=_deprecator.get_best_deprecator(),
+ )
elif isinstance(d, Mapping):
- self.deprecate(d['msg'], version=d.get('version'), date=d.get('date'),
- collection_name=d.get('collection_name'))
+ self.deprecate( # pylint: disable=ansible-deprecated-date-not-permitted,ansible-deprecated-unnecessary-collection-name
+ msg=d['msg'],
+ version=d.get('version'),
+ date=d.get('date'),
+ deprecator=_deprecator.get_best_deprecator(collection_name=d.get('collection_name')),
+ )
else:
- self.deprecate(d) # pylint: disable=ansible-deprecated-no-version
+ self.deprecate( # pylint: disable=ansible-deprecated-unnecessary-collection-name,ansible-deprecated-no-version
+ msg=d,
+ deprecator=_deprecator.get_best_deprecator(),
+ )
else:
- self.deprecate(kwargs['deprecations']) # pylint: disable=ansible-deprecated-no-version
+ self.deprecate( # pylint: disable=ansible-deprecated-unnecessary-collection-name,ansible-deprecated-no-version
+ msg=kwargs['deprecations'],
+ deprecator=_deprecator.get_best_deprecator(),
+ )
- deprecations = get_deprecation_messages()
+ deprecations = get_deprecations()
if deprecations:
kwargs['deprecations'] = deprecations
@@ -1439,28 +1515,78 @@ class AnsibleModule(object):
# return preserved
kwargs.update(preserved)
- print('\n%s' % self.jsonify(kwargs))
+ encoder = _json.get_module_encoder(_ANSIBLE_PROFILE, _json.Direction.MODULE_TO_CONTROLLER)
+ print('\n%s' % json.dumps(kwargs, cls=encoder))
def exit_json(self, **kwargs) -> t.NoReturn:
""" return from the module, without error """
+ _skip_stackwalk = True
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
- def fail_json(self, msg, **kwargs) -> t.NoReturn:
- """ return from the module, with an error message """
+ def fail_json(self, msg: str, *, exception: BaseException | str | None = _UNSET, **kwargs) -> t.NoReturn:
+ """
+ Return from the module with an error message and optional exception/traceback detail.
+ A traceback will only be included in the result if error traceback capturing has been enabled.
+
+ When `exception` is an exception object, its message chain will be automatically combined with `msg` to create the final error message.
+ The message chain includes the exception's message as well as messages from any __cause__ exceptions.
+ The traceback from `exception` will be used for the formatted traceback.
+
+ When `exception` is a string, it will be used as the formatted traceback.
+
+ When `exception` is set to `None`, the current call stack will be used for the formatted traceback.
+
+ When `exception` is not specified, a formatted traceback will be retrieved from the current exception.
+ If no exception is pending, the current call stack will be used instead.
+ """
+ _skip_stackwalk = True
+
+ msg = str(msg) # coerce to str instead of raising an error due to an invalid type
- kwargs['failed'] = True
- kwargs['msg'] = msg
+ kwargs.update(
+ failed=True,
+ msg=msg,
+ )
+
+ if isinstance(exception, BaseException):
+ # Include a `_messages.Event` in the result.
+ # The `msg` is included in the chain to ensure it is not lost when looking only at `exception` from the result.
+
+ kwargs.update(
+ exception=_messages.ErrorSummary(
+ event=_messages.Event(
+ msg=msg,
+ formatted_traceback=_traceback.maybe_capture_traceback(msg, _traceback.TracebackEvent.ERROR),
+ chain=_messages.EventChain(
+ msg_reason=_errors.MSG_REASON_DIRECT_CAUSE,
+ traceback_reason="The above exception was the direct cause of the following error:",
+ event=_errors.EventFactory.from_exception(exception, _traceback.is_traceback_enabled(_traceback.TracebackEvent.ERROR)),
+ ),
+ ),
+ ),
+ )
+ elif _traceback.is_traceback_enabled(_traceback.TracebackEvent.ERROR):
+ # Include only a formatted traceback string in the result.
+ # The controller will combine this with `msg` to create an `_messages.ErrorSummary`.
+
+ formatted_traceback: str | None
+
+ if isinstance(exception, str):
+ formatted_traceback = exception
+ elif exception is _UNSET and (current_exception := t.cast(t.Optional[BaseException], sys.exc_info()[1])):
+ formatted_traceback = _traceback.maybe_extract_traceback(current_exception, _traceback.TracebackEvent.ERROR)
+ else:
+ formatted_traceback = _traceback.maybe_capture_traceback(msg, _traceback.TracebackEvent.ERROR)
- # Add traceback if debug or high verbosity and it is missing
- # NOTE: Badly named as exception, it really always has been a traceback
- if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
- kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
+ if formatted_traceback:
+ kwargs.update(exception=formatted_traceback)
self.do_cleanup_files()
self._return_formatted(kwargs)
+
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
@@ -1531,10 +1657,11 @@ class AnsibleModule(object):
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
- try:
- self.preserved_copy(fn, backupdest)
- except (shutil.Error, IOError) as e:
- self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
+ if not self.check_mode:
+ try:
+ self.preserved_copy(fn, backupdest)
+ except (shutil.Error, IOError) as ex:
+ raise Exception(f'Could not make backup of {fn!r} to {backupdest!r}.') from ex
return backupdest
@@ -1608,29 +1735,25 @@ class AnsibleModule(object):
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
- except (IOError, OSError) as e:
- if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
+ except OSError as ex:
+ if ex.errno in (errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY):
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
- self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc())
- else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
- error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp', dir=b_dest_dir, suffix=b_suffix)
- except (OSError, IOError) as e:
- error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
- finally:
- if error_msg:
- if unsafe_writes:
- self._unsafe_writes(b_src, b_dest)
- else:
- self.fail_json(msg=error_msg, exception=traceback.format_exc())
+ except OSError as ex:
+ if unsafe_writes:
+ self._unsafe_writes(b_src, b_dest)
+ else:
+ raise Exception(
+ f'The destination directory {os.path.dirname(dest)!r} is not writable by the current user.'
+ ) from ex
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
@@ -1659,24 +1782,27 @@ class AnsibleModule(object):
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
os.utime(b_tmp_dest_name, times=(time.time(), time.time()))
- except OSError as e:
- if e.errno != errno.EPERM:
+ except OSError as ex:
+ if ex.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
- except (shutil.Error, OSError, IOError) as e:
- if unsafe_writes and e.errno == errno.EBUSY:
+ except (shutil.Error, OSError) as ex:
+ if unsafe_writes and ex.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
- self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
- (src, dest, b_tmp_dest_name, to_native(e)), exception=traceback.format_exc())
- except (shutil.Error, OSError, IOError) as e:
+ raise Exception(
+ f'Unable to make {src!r} into to {dest!r}, failed final rename from {to_text(b_tmp_dest_name)!r}.'
+ ) from ex
+ except (shutil.Error, OSError) as ex:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
- self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc())
+ raise Exception(f'Failed to replace {dest!r} with {src!r}.') from ex
finally:
self.cleanup(b_tmp_dest_name)
+ else:
+ raise Exception(f'Could not replace {dest!r} with {src!r}.') from ex
if creating:
# make sure the file has the correct permissions
@@ -1703,19 +1829,11 @@ class AnsibleModule(object):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
- out_dest = in_src = None
- try:
- out_dest = open(dest, 'wb')
- in_src = open(src, 'rb')
- shutil.copyfileobj(in_src, out_dest)
- finally: # assuring closed files in 2.4 compatible way
- if out_dest:
- out_dest.close()
- if in_src:
- in_src.close()
- except (shutil.Error, OSError, IOError) as e:
- self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
- exception=traceback.format_exc())
+ with open(dest, 'wb') as out_dest:
+ with open(src, 'rb') as in_src:
+ shutil.copyfileobj(in_src, out_dest)
+ except (shutil.Error, OSError) as ex:
+ raise Exception(f'Could not write data to file {dest!r} from {src!r}.') from ex
def _clean_args(self, args):
@@ -1782,18 +1900,18 @@ class AnsibleModule(object):
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* environ variables with
:kw umask: Umask to be used when running the command. Default None
- :kw encoding: Since we return native strings, on python3 we need to
+ :kw encoding: Since we return strings, we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
- :kw errors: Since we return native strings, on python3 we need to
+ :kw errors: Since we return strings, we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
- python3 versions we support) otherwise a UnicodeError traceback
+ Python versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
@@ -1801,10 +1919,8 @@ class AnsibleModule(object):
are expanded before running the command. When ``True`` a string such as
``$SHELL`` will be expanded regardless of escaping. When ``False`` and
``use_unsafe_shell=False`` no path or variable expansion will be done.
- :kw pass_fds: When running on Python 3 this argument
- dictates which file descriptors should be passed
- to an underlying ``Popen`` constructor. On Python 2, this will
- set ``close_fds`` to False.
+ :kw pass_fds: This argument dictates which file descriptors should be passed
+ to an underlying ``Popen`` constructor.
:kw before_communicate_callback: This function will be called
after ``Popen`` object will be created
but before communicating to the process.
@@ -1815,11 +1931,10 @@ class AnsibleModule(object):
:kw handle_exceptions: This flag indicates whether an exception will
be handled inline and issue a failed_json or if the caller should
handle it.
- :returns: A 3-tuple of return code (integer), stdout (native string),
- and stderr (native string). On python2, stdout and stderr are both
- byte strings. On python3, stdout and stderr are text strings converted
- according to the encoding and errors parameters. If you want byte
- strings on python3, use encoding=None to turn decoding to text off.
+ :returns: A 3-tuple of return code (int), stdout (str), and stderr (str).
+ stdout and stderr are text strings converted according to the encoding
+ and errors parameters. If you want byte strings, use encoding=None
+ to turn decoding to text off.
"""
# used by clean args later on
self._clean = None
@@ -2001,18 +2116,16 @@ class AnsibleModule(object):
selector.close()
rc = cmd.returncode
- except (OSError, IOError) as e:
- self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
+ except OSError as ex:
if handle_exceptions:
- self.fail_json(rc=e.errno, stdout=b'', stderr=b'', msg=to_native(e), cmd=self._clean_args(args))
+ self.fail_json(rc=ex.errno, stdout='', stderr='', msg="Error executing command.", cmd=self._clean_args(args), exception=ex)
else:
- raise e
- except Exception as e:
- self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
+ raise
+ except Exception as ex:
if handle_exceptions:
- self.fail_json(rc=257, stdout=b'', stderr=b'', msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
+ self.fail_json(rc=257, stdout='', stderr='', msg="Error executing command.", cmd=self._clean_args(args), exception=ex)
else:
- raise e
+ raise
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
@@ -2066,13 +2179,7 @@ def get_module_path():
def __getattr__(importable_name):
"""Inject import-time deprecation warnings."""
- if importable_name == 'get_exception':
- from ansible.module_utils.pycompat24 import get_exception
- importable = get_exception
- elif importable_name in {'literal_eval', '_literal_eval'}:
- from ast import literal_eval
- importable = literal_eval
- elif importable_name == 'datetime':
+ if importable_name == 'datetime':
import datetime
importable = datetime
elif importable_name == 'signal':
@@ -2089,7 +2196,7 @@ def __getattr__(importable_name):
importable = repeat
elif importable_name in {
'PY2', 'PY3', 'b', 'binary_type', 'integer_types',
- 'iteritems', 'string_types', 'test_type'
+ 'iteritems', 'string_types', 'text_type',
}:
import importlib
importable = getattr(
diff --git a/lib/ansible/module_utils/common/_utils.py b/lib/ansible/module_utils/common/_utils.py
index deab1fcdf9c..51af1e69e16 100644
--- a/lib/ansible/module_utils/common/_utils.py
+++ b/lib/ansible/module_utils/common/_utils.py
@@ -1,38 +1,34 @@
# Copyright (c) 2018, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-
"""
Modules in _utils are waiting to find a better home. If you need to use them, be prepared for them
to move to a different location in the future.
"""
+
from __future__ import annotations
+import inspect
+import typing as t
+
+_Type = t.TypeVar('_Type')
+
+
+def get_all_subclasses(cls: type[_Type], *, include_abstract: bool = True, consider_self: bool = False) -> set[type[_Type]]:
+ """Recursively find all subclasses of a given type, including abstract classes by default."""
+ subclasses: set[type[_Type]] = {cls} if consider_self else set()
+ queue: list[type[_Type]] = [cls]
+
+ while queue:
+ parent = queue.pop()
+
+ for child in parent.__subclasses__():
+ if child in subclasses:
+ continue
+
+ queue.append(child)
+ subclasses.add(child)
+
+ if not include_abstract:
+ subclasses = {sc for sc in subclasses if not inspect.isabstract(sc)}
-def get_all_subclasses(cls):
- """
- Recursively search and find all subclasses of a given class
-
- :arg cls: A python class
- :rtype: set
- :returns: The set of python classes which are the subclasses of `cls`.
-
- In python, you can use a class's :py:meth:`__subclasses__` method to determine what subclasses
- of a class exist. However, `__subclasses__` only goes one level deep. This function searches
- each child class's `__subclasses__` method to find all of the descendent classes. It then
- returns an iterable of the descendent classes.
- """
- # Retrieve direct subclasses
- subclasses = set(cls.__subclasses__())
- to_visit = list(subclasses)
- # Then visit all subclasses
- while to_visit:
- for sc in to_visit:
- # The current class is now visited, so remove it from list
- to_visit.remove(sc)
- # Appending all subclasses to visit and keep a reference of available class
- for ssc in sc.__subclasses__():
- if ssc not in subclasses:
- to_visit.append(ssc)
- subclasses.add(ssc)
return subclasses
diff --git a/lib/ansible/module_utils/common/arg_spec.py b/lib/ansible/module_utils/common/arg_spec.py
index 37019e7df33..5044f58a8cc 100644
--- a/lib/ansible/module_utils/common/arg_spec.py
+++ b/lib/ansible/module_utils/common/arg_spec.py
@@ -6,6 +6,7 @@ from __future__ import annotations
from copy import deepcopy
+from ansible.module_utils.datatag import deprecator_from_collection_name
from ansible.module_utils.common.parameters import (
_ADDITIONAL_CHECKS,
_get_legal_inputs,
@@ -300,9 +301,13 @@ class ModuleArgumentSpecValidator(ArgumentSpecValidator):
result = super(ModuleArgumentSpecValidator, self).validate(parameters)
for d in result._deprecations:
- deprecate(d['msg'],
- version=d.get('version'), date=d.get('date'),
- collection_name=d.get('collection_name'))
+ # DTFIX-FUTURE: pass an actual deprecator instead of one derived from collection_name
+ deprecate( # pylint: disable=ansible-deprecated-date-not-permitted,ansible-deprecated-unnecessary-collection-name
+ msg=d['msg'],
+ version=d.get('version'),
+ date=d.get('date'),
+ deprecator=deprecator_from_collection_name(d.get('collection_name')),
+ )
for w in result._warnings:
warn('Both option {option} and its alias {alias} are set.'.format(option=w['option'], alias=w['alias']))
diff --git a/lib/ansible/module_utils/common/collections.py b/lib/ansible/module_utils/common/collections.py
index 28c53e14e2c..9f4dfb9b4d0 100644
--- a/lib/ansible/module_utils/common/collections.py
+++ b/lib/ansible/module_utils/common/collections.py
@@ -6,6 +6,7 @@
from __future__ import annotations
+from ansible.module_utils.common import warnings as _warnings
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves.collections_abc import Hashable, Mapping, MutableMapping, Sequence # pylint: disable=unused-import
@@ -66,8 +67,7 @@ class ImmutableDict(Hashable, Mapping):
def is_string(seq):
"""Identify whether the input has a string-like type (including bytes)."""
- # AnsibleVaultEncryptedUnicode inherits from Sequence, but is expected to be a string like object
- return isinstance(seq, (text_type, binary_type)) or getattr(seq, '__ENCRYPTED__', False)
+ return isinstance(seq, (text_type, binary_type))
def is_iterable(seq, include_strings=False):
@@ -103,6 +103,11 @@ def count(seq):
code is run on Python 2.6.* where collections.Counter is not available. It should be
deprecated and replaced when support for Python < 2.7 is dropped.
"""
+ _warnings.deprecate(
+ msg="The `ansible.module_utils.common.collections.count` function is deprecated.",
+ version="2.23",
+ help_text="Use `collections.Counter` from the Python standard library instead.",
+ )
if not is_iterable(seq):
raise Exception('Argument provided is not an iterable')
counters = dict()
diff --git a/lib/ansible/module_utils/common/json.py b/lib/ansible/module_utils/common/json.py
index fe65a8d701c..7a5ed2b4a92 100644
--- a/lib/ansible/module_utils/common/json.py
+++ b/lib/ansible/module_utils/common/json.py
@@ -1,84 +1,90 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2019 Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import annotations
-
-import json
-
-import datetime
-
-from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.six.moves.collections_abc import Mapping
-from ansible.module_utils.common.collections import is_sequence
-
-
-def _is_unsafe(value):
- return getattr(value, '__UNSAFE__', False) and not getattr(value, '__ENCRYPTED__', False)
-
-
-def _is_vault(value):
- return getattr(value, '__ENCRYPTED__', False)
-
-
-def _preprocess_unsafe_encode(value):
- """Recursively preprocess a data structure converting instances of ``AnsibleUnsafe``
- into their JSON dict representations
-
- Used in ``AnsibleJSONEncoder.iterencode``
- """
- if _is_unsafe(value):
- value = {'__ansible_unsafe': to_text(value, errors='surrogate_or_strict', nonstring='strict')}
- elif is_sequence(value):
- value = [_preprocess_unsafe_encode(v) for v in value]
- elif isinstance(value, Mapping):
- value = dict((k, _preprocess_unsafe_encode(v)) for k, v in value.items())
-
- return value
-
-
-def json_dump(structure):
- return json.dumps(structure, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)
-
-
-class AnsibleJSONEncoder(json.JSONEncoder):
- """
- Simple encoder class to deal with JSON encoding of Ansible internal types
- """
-
- def __init__(self, preprocess_unsafe=False, vault_to_text=False, **kwargs):
- self._preprocess_unsafe = preprocess_unsafe
- self._vault_to_text = vault_to_text
- super(AnsibleJSONEncoder, self).__init__(**kwargs)
-
- # NOTE: ALWAYS inform AWS/Tower when new items get added as they consume them downstream via a callback
- def default(self, o):
- if getattr(o, '__ENCRYPTED__', False):
- # vault object
- if self._vault_to_text:
- value = to_text(o, errors='surrogate_or_strict')
- else:
- value = {'__ansible_vault': to_text(o._ciphertext, errors='surrogate_or_strict', nonstring='strict')}
- elif getattr(o, '__UNSAFE__', False):
- # unsafe object, this will never be triggered, see ``AnsibleJSONEncoder.iterencode``
- value = {'__ansible_unsafe': to_text(o, errors='surrogate_or_strict', nonstring='strict')}
- elif isinstance(o, Mapping):
- # hostvars and other objects
- value = dict(o)
- elif isinstance(o, (datetime.date, datetime.datetime)):
- # date object
- value = o.isoformat()
- else:
- # use default encoder
- value = super(AnsibleJSONEncoder, self).default(o)
- return value
-
- def iterencode(self, o, **kwargs):
- """Custom iterencode, primarily design to handle encoding ``AnsibleUnsafe``
- as the ``AnsibleUnsafe`` subclasses inherit from string types and
- ``json.JSONEncoder`` does not support custom encoders for string types
- """
- if self._preprocess_unsafe:
- o = _preprocess_unsafe_encode(o)
-
- return super(AnsibleJSONEncoder, self).iterencode(o, **kwargs)
+from __future__ import annotations as _annotations
+
+import enum as _enum
+import json as _stdlib_json
+import types as _types
+
+from ansible.module_utils import _internal
+from ansible.module_utils._internal import _json
+from ansible.module_utils._internal._json import _legacy_encoder
+from ansible.module_utils._internal._json import _profiles
+from ansible.module_utils._internal._json._profiles import _tagless
+from ansible.module_utils.common import warnings as _warnings
+
+
+def __getattr__(name: str) -> object:
+ """Handle dynamic module members which are or will be deprecated."""
+ if name in ('AnsibleJSONEncoder', '_AnsibleJSONEncoder'):
+ # deprecated: description='deprecate legacy encoder' core_version='2.23'
+ # if not name.startswith('_'): # avoid duplicate deprecation warning for imports from ajson
+ # _warnings.deprecate(
+ # msg="The `AnsibleJSONEncoder` type is deprecated.",
+ # version="2.27",
+ # help_text="Use a profile-based encoder instead.", # DTFIX-FUTURE: improve this help text
+ # )
+
+ return _get_legacy_encoder()
+
+ if name in ('AnsibleJSONDecoder', '_AnsibleJSONDecoder'):
+ # deprecated: description='deprecate legacy decoder' core_version='2.23'
+ # if not name.startswith('_'): # avoid duplicate deprecation warning for imports from ajson
+ # _warnings.deprecate(
+ # msg="The `AnsibleJSONDecoder` type is deprecated.",
+ # version="2.27",
+ # help_text="Use a profile-based decoder instead.", # DTFIX-FUTURE: improve this help text
+ # )
+
+ return _tagless.Decoder
+
+ if name == 'json_dump':
+ _warnings.deprecate(
+ msg="The `json_dump` function is deprecated.",
+ version="2.23",
+ help_text="Use `json.dumps` with the appropriate `cls` instead.",
+ )
+
+ return _json_dump
+
+ raise AttributeError(name)
+
+
+def _get_legacy_encoder() -> type[_stdlib_json.JSONEncoder]:
+ """Compatibility hack: previous module_utils AnsibleJSONEncoder impl did controller-side work, controller plugins require a more fully-featured impl."""
+ if _internal.is_controller:
+ return _internal.import_controller_module('ansible._internal._json._legacy_encoder').LegacyControllerJSONEncoder
+
+ return _legacy_encoder.LegacyTargetJSONEncoder
+
+
+def _json_dump(structure):
+ """JSON dumping function maintained for temporary backward compatibility."""
+ return _stdlib_json.dumps(structure, cls=_get_legacy_encoder(), sort_keys=True, indent=4)
+
+
+class Direction(_enum.Enum):
+ """Enumeration used to select a contextually-appropriate JSON profile for module messaging."""
+
+ CONTROLLER_TO_MODULE = _enum.auto()
+ """Encode/decode messages from the Ansible controller to an Ansible module."""
+ MODULE_TO_CONTROLLER = _enum.auto()
+ """Encode/decode messages from an Ansible module to the Ansible controller."""
+
+
+def get_encoder(profile: str | _types.ModuleType, /) -> type[_stdlib_json.JSONEncoder]:
+ """Return a `JSONEncoder` for the given `profile`."""
+ return _json.get_encoder_decoder(profile, _profiles.AnsibleProfileJSONEncoder)
+
+
+def get_decoder(profile: str | _types.ModuleType, /) -> type[_stdlib_json.JSONDecoder]:
+ """Return a `JSONDecoder` for the given `profile`."""
+ return _json.get_encoder_decoder(profile, _profiles.AnsibleProfileJSONDecoder)
+
+
+def get_module_encoder(name: str, direction: Direction, /) -> type[_stdlib_json.JSONEncoder]:
+ """Return a `JSONEncoder` for the module profile specified by `name` and `direction`."""
+ return get_encoder(_json.get_module_serialization_profile_name(name, direction == Direction.CONTROLLER_TO_MODULE))
+
+
+def get_module_decoder(name: str, direction: Direction, /) -> type[_stdlib_json.JSONDecoder]:
+ """Return a `JSONDecoder` for the module profile specified by `name` and `direction`."""
+ return get_decoder(_json.get_module_serialization_profile_name(name, direction == Direction.CONTROLLER_TO_MODULE))
diff --git a/lib/ansible/module_utils/common/parameters.py b/lib/ansible/module_utils/common/parameters.py
index b9f5be43a70..fc886463c94 100644
--- a/lib/ansible/module_utils/common/parameters.py
+++ b/lib/ansible/module_utils/common/parameters.py
@@ -6,13 +6,16 @@ from __future__ import annotations
import datetime
import os
+import typing as t
from collections import deque
from itertools import chain
from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils._internal._datatag import AnsibleSerializable, AnsibleTagHelper
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.warnings import warn
+from ansible.module_utils.datatag import native_type_name
from ansible.module_utils.errors import (
AliasError,
AnsibleFallbackNotFound,
@@ -83,7 +86,7 @@ _ADDITIONAL_CHECKS = (
# if adding boolean attribute, also add to PASS_BOOL
# some of this dupes defaults from controller config
# keep in sync with copy in lib/ansible/module_utils/csharp/Ansible.Basic.cs
-PASS_VARS = {
+PASS_VARS: dict[str, t.Any] = {
'check_mode': ('check_mode', False),
'debug': ('_debug', False),
'diff': ('_diff', False),
@@ -96,9 +99,9 @@ PASS_VARS = {
'selinux_special_fs': ('_selinux_special_fs', ['fuse', 'nfs', 'vboxsf', 'ramfs', '9p', 'vfat']),
'shell_executable': ('_shell', '/bin/sh'),
'socket': ('_socket_path', None),
- 'string_conversion_action': ('_string_conversion_action', 'warn'),
'syslog_facility': ('_syslog_facility', 'INFO'),
'tmpdir': ('_tmpdir', None),
+ 'tracebacks_for': ('_tracebacks_for', frozenset()),
'verbosity': ('_verbosity', 0),
'version': ('ansible_version', '0.0'),
}
@@ -408,6 +411,8 @@ def _remove_values_conditions(value, no_log_strings, deferred_removals):
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
+ original_value = value
+
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
@@ -432,31 +437,25 @@ def _remove_values_conditions(value, no_log_strings, deferred_removals):
else:
value = native_str_value
+ elif value is True or value is False or value is None:
+ return value
+
elif isinstance(value, Sequence):
- if isinstance(value, MutableSequence):
- new_value = type(value)()
- else:
- new_value = [] # Need a mutable value
+ new_value = AnsibleTagHelper.tag_copy(original_value, [])
deferred_removals.append((value, new_value))
- value = new_value
+ return new_value
elif isinstance(value, Set):
- if isinstance(value, MutableSet):
- new_value = type(value)()
- else:
- new_value = set() # Need a mutable value
+ new_value = AnsibleTagHelper.tag_copy(original_value, set())
deferred_removals.append((value, new_value))
- value = new_value
+ return new_value
elif isinstance(value, Mapping):
- if isinstance(value, MutableMapping):
- new_value = type(value)()
- else:
- new_value = {} # Need a mutable value
+ new_value = AnsibleTagHelper.tag_copy(original_value, {})
deferred_removals.append((value, new_value))
- value = new_value
+ return new_value
- elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
+ elif isinstance(value, (int, float)):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
@@ -464,11 +463,15 @@ def _remove_values_conditions(value, no_log_strings, deferred_removals):
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
- elif isinstance(value, (datetime.datetime, datetime.date)):
- value = value.isoformat()
+ elif isinstance(value, (datetime.datetime, datetime.date, datetime.time)):
+ return value
+ elif isinstance(value, AnsibleSerializable):
+ return value
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
+ value = AnsibleTagHelper.tag_copy(original_value, value)
+
return value
@@ -541,7 +544,7 @@ def _sanitize_keys_conditions(value, no_log_strings, ignore_keys, deferred_remov
if isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
return value
- if isinstance(value, (datetime.datetime, datetime.date)):
+ if isinstance(value, (datetime.datetime, datetime.date, datetime.time)):
return value
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
@@ -570,7 +573,7 @@ def _validate_elements(wanted_type, parameter, values, options_context=None, err
msg = "Elements value for option '%s'" % parameter
if options_context:
msg += " found in '%s'" % " -> ".join(options_context)
- msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_element_type, to_native(e))
+ msg += " is of type %s and we were unable to convert to %s: %s" % (native_type_name(value), wanted_element_type, to_native(e))
errors.append(ElementError(msg))
return validated_parameters
@@ -629,7 +632,7 @@ def _validate_argument_types(argument_spec, parameters, prefix='', options_conte
elements_wanted_type = spec.get('elements', None)
if elements_wanted_type:
elements = parameters[param]
- if wanted_type != 'list' or not isinstance(elements, list):
+ if not isinstance(parameters[param], list) or not isinstance(elements, list):
msg = "Invalid type %s for option '%s'" % (wanted_name, elements)
if options_context:
msg += " found in '%s'." % " -> ".join(options_context)
@@ -638,7 +641,7 @@ def _validate_argument_types(argument_spec, parameters, prefix='', options_conte
parameters[param] = _validate_elements(elements_wanted_type, param, elements, options_context, errors)
except (TypeError, ValueError) as e:
- msg = "argument '%s' is of type %s" % (param, type(value))
+ msg = "argument '%s' is of type %s" % (param, native_type_name(value))
if options_context:
msg += " found in '%s'." % " -> ".join(options_context)
msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e))
diff --git a/lib/ansible/module_utils/common/process.py b/lib/ansible/module_utils/common/process.py
index eb11f8e44d1..5c546ec6c43 100644
--- a/lib/ansible/module_utils/common/process.py
+++ b/lib/ansible/module_utils/common/process.py
@@ -29,7 +29,6 @@ def get_bin_path(arg, opt_dirs=None, required=None):
deprecate(
msg="The `required` parameter in `get_bin_path` API is deprecated.",
version="2.21",
- collection_name="ansible.builtin",
)
paths = []
diff --git a/lib/ansible/module_utils/common/respawn.py b/lib/ansible/module_utils/common/respawn.py
index 2938c86a487..0eae8daa1e7 100644
--- a/lib/ansible/module_utils/common/respawn.py
+++ b/lib/ansible/module_utils/common/respawn.py
@@ -4,11 +4,15 @@
from __future__ import annotations
import os
+import pathlib
import subprocess
import sys
import typing as t
from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils._internal._ansiballz import _respawn
+
+_ANSIBLE_PARENT_PATH = pathlib.Path(__file__).parents[3]
def has_respawned():
@@ -36,7 +40,7 @@ def respawn_module(interpreter_path) -> t.NoReturn:
raise Exception('module has already been respawned')
# FUTURE: we need a safe way to log that a respawn has occurred for forensic/debug purposes
- payload = _create_payload()
+ payload = _respawn.create_payload()
stdin_read, stdin_write = os.pipe()
os.write(stdin_write, to_bytes(payload))
os.close(stdin_write)
@@ -53,45 +57,30 @@ def probe_interpreters_for_module(interpreter_paths, module_name):
:arg interpreter_paths: iterable of paths to Python interpreters. The paths will be probed
in order, and the first path that exists and can successfully import the named module will
be returned (or ``None`` if probing fails for all supplied paths).
- :arg module_name: fully-qualified Python module name to probe for (eg, ``selinux``)
+ :arg module_name: fully-qualified Python module name to probe for (for example, ``selinux``)
"""
+ PYTHONPATH = os.getenv('PYTHONPATH', '')
+
+ env = os.environ.copy()
+ env.update({
+ 'PYTHONPATH': f'{_ANSIBLE_PARENT_PATH}:{PYTHONPATH}'.rstrip(': ')
+ })
+
for interpreter_path in interpreter_paths:
if not os.path.exists(interpreter_path):
continue
try:
- rc = subprocess.call([interpreter_path, '-c', 'import {0}'.format(module_name)])
+ rc = subprocess.call(
+ [
+ interpreter_path,
+ '-c',
+ f'import {module_name}, ansible.module_utils.basic',
+ ],
+ env=env,
+ )
if rc == 0:
return interpreter_path
except Exception:
continue
return None
-
-
-def _create_payload():
- from ansible.module_utils import basic
- smuggled_args = getattr(basic, '_ANSIBLE_ARGS')
- if not smuggled_args:
- raise Exception('unable to access ansible.module_utils.basic._ANSIBLE_ARGS (not launched by AnsiballZ?)')
- module_fqn = sys.modules['__main__']._module_fqn
- modlib_path = sys.modules['__main__']._modlib_path
- respawn_code_template = """
-import runpy
-import sys
-
-module_fqn = {module_fqn!r}
-modlib_path = {modlib_path!r}
-smuggled_args = {smuggled_args!r}
-
-if __name__ == '__main__':
- sys.path.insert(0, modlib_path)
-
- from ansible.module_utils import basic
- basic._ANSIBLE_ARGS = smuggled_args
-
- runpy.run_module(module_fqn, init_globals=dict(_respawned=True), run_name='__main__', alter_sys=True)
- """
-
- respawn_code = respawn_code_template.format(module_fqn=module_fqn, modlib_path=modlib_path, smuggled_args=smuggled_args.strip())
-
- return respawn_code
diff --git a/lib/ansible/module_utils/common/text/converters.py b/lib/ansible/module_utils/common/text/converters.py
index 6bfa8470b69..057d06bdbea 100644
--- a/lib/ansible/module_utils/common/text/converters.py
+++ b/lib/ansible/module_utils/common/text/converters.py
@@ -6,12 +6,9 @@
from __future__ import annotations
import codecs
-import datetime
import json
-from ansible.module_utils.six.moves.collections_abc import Set
from ansible.module_utils.six import (
- PY3,
binary_type,
iteritems,
text_type,
@@ -237,44 +234,21 @@ def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
return to_text(value, encoding, errors)
-#: :py:func:`to_native`
-#: Transform a variable into the native str type for the python version
-#:
-#: On Python2, this is an alias for
-#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
-#: :func:`~ansible.module_utils.to_text`. It makes it easier to
-#: transform a variable into the native str type for the python version
-#: the code is running on. Use this when constructing the message to
-#: send to exceptions or when dealing with an API that needs to take
-#: a native string. Example::
-#:
-#: try:
-#: 1//0
-#: except ZeroDivisionError as e:
-#: raise MyException('Encountered and error: %s' % to_native(e))
-if PY3:
- to_native = to_text
-else:
- to_native = to_bytes
-
-
-def _json_encode_fallback(obj):
- if isinstance(obj, Set):
- return list(obj)
- elif isinstance(obj, datetime.datetime):
- return obj.isoformat()
- raise TypeError("Cannot json serialize %s" % to_native(obj))
+to_native = to_text
def jsonify(data, **kwargs):
- # After 2.18, we should remove this loop, and hardcode to utf-8 in alignment with requiring utf-8 module responses
- for encoding in ("utf-8", "latin-1"):
- try:
- new_data = container_to_text(data, encoding=encoding)
- except UnicodeDecodeError:
- continue
- return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
- raise UnicodeError('Invalid unicode encoding encountered')
+ from ansible.module_utils.common import json as _common_json
+ # from ansible.module_utils.common.warnings import deprecate
+
+ # deprecated: description='deprecate jsonify()' core_version='2.23'
+ # deprecate(
+ # msg="The `jsonify` function is deprecated.",
+ # version="2.27",
+ # # help_text="", # DTFIX-FUTURE: fill in this help text
+ # )
+
+ return json.dumps(data, cls=_common_json._get_legacy_encoder(), _decode_bytes=True, **kwargs)
def container_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
@@ -283,6 +257,7 @@ def container_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
"""
+ # DTFIX-FUTURE: deprecate
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
@@ -302,6 +277,7 @@ def container_to_text(d, encoding='utf-8', errors='surrogate_or_strict'):
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
"""
+ # DTFIX-FUTURE: deprecate
if isinstance(d, binary_type):
# Warning, can traceback
diff --git a/lib/ansible/module_utils/common/validation.py b/lib/ansible/module_utils/common/validation.py
index 399767e775d..f5d5f5a061f 100644
--- a/lib/ansible/module_utils/common/validation.py
+++ b/lib/ansible/module_utils/common/validation.py
@@ -10,16 +10,14 @@ import os
import re
from ast import literal_eval
+from ansible.module_utils.common import json as _common_json
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.collections import is_iterable
-from ansible.module_utils.common.text.converters import jsonify
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible.module_utils.common.warnings import deprecate
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import (
- binary_type,
string_types,
- text_type,
)
@@ -185,7 +183,7 @@ def check_required_by(requirements, parameters, options_context=None):
:kwarg options_context: List of strings of parent key names if ``requirements`` are
in a sub spec.
- :returns: Empty dictionary or raises :class:`TypeError` if the
+ :returns: Empty dictionary or raises :class:`TypeError` if the check fails.
"""
result = {}
@@ -195,22 +193,15 @@ def check_required_by(requirements, parameters, options_context=None):
for (key, value) in requirements.items():
if key not in parameters or parameters[key] is None:
continue
- result[key] = []
# Support strings (single-item lists)
if isinstance(value, string_types):
value = [value]
- for required in value:
- if required not in parameters or parameters[required] is None:
- result[key].append(required)
-
- if result:
- for key, missing in result.items():
- if len(missing) > 0:
- msg = "missing parameter(s) required by '%s': %s" % (key, ', '.join(missing))
- if options_context:
- msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
- raise TypeError(to_native(msg))
+ if missing := [required for required in value if required not in parameters or parameters[required] is None]:
+ msg = f"missing parameter(s) required by '{key}': {', '.join(missing)}"
+ if options_context:
+ msg = f"{msg} found in {' -> '.join(options_context)}"
+ raise TypeError(to_native(msg))
return result
@@ -392,6 +383,10 @@ def check_type_str(value, allow_conversion=True, param=None, prefix=''):
raise TypeError(to_native(msg))
+def _check_type_str_no_conversion(value) -> str:
+ return check_type_str(value, allow_conversion=False)
+
+
def check_type_list(value):
"""Verify that the value is a list or convert to a list
@@ -407,6 +402,7 @@ def check_type_list(value):
if isinstance(value, list):
return value
+ # DTFIX-FUTURE: deprecate legacy comma split functionality, eventually replace with `_check_type_list_strict`
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
@@ -415,6 +411,14 @@ def check_type_list(value):
raise TypeError('%s cannot be converted to a list' % type(value))
+def _check_type_list_strict(value):
+ # FUTURE: this impl should replace `check_type_list`
+ if isinstance(value, list):
+ return value
+
+ return [value]
+
+
def check_type_dict(value):
"""Verify that value is a dict or convert it to a dict and return it.
@@ -572,14 +576,21 @@ def check_type_bits(value):
def check_type_jsonarg(value):
- """Return a jsonified string. Sometimes the controller turns a json string
- into a dict/list so transform it back into json here
-
- Raises :class:`TypeError` if unable to convert the value
-
"""
- if isinstance(value, (text_type, binary_type)):
+ JSON serialize dict/list/tuple, strip str and bytes.
+ Previously required for cases where Ansible/Jinja classic-mode literal eval pass could inadvertently deserialize objects.
+ """
+ # deprecated: description='deprecate jsonarg type support' core_version='2.23'
+ # deprecate(
+ # msg="The `jsonarg` type is deprecated.",
+ # version="2.27",
+ # help_text="JSON string arguments should use `str`; structures can be explicitly serialized as JSON with the `to_json` filter.",
+ # )
+
+ if isinstance(value, (str, bytes)):
return value.strip()
- elif isinstance(value, (list, tuple, dict)):
- return jsonify(value)
+
+ if isinstance(value, (list, tuple, dict)):
+ return json.dumps(value, cls=_common_json._get_legacy_encoder(), _decode_bytes=True)
+
raise TypeError('%s cannot be converted to a json string' % type(value))
diff --git a/lib/ansible/module_utils/common/warnings.py b/lib/ansible/module_utils/common/warnings.py
index 14fe516cf5b..f4a6406e4a9 100644
--- a/lib/ansible/module_utils/common/warnings.py
+++ b/lib/ansible/module_utils/common/warnings.py
@@ -2,38 +2,156 @@
# Copyright (c) 2019 Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-from __future__ import annotations
+from __future__ import annotations as _annotations
-from ansible.module_utils.six import string_types
+import typing as _t
-_global_warnings = []
-_global_deprecations = []
+from ansible.module_utils._internal import _traceback, _deprecator, _event_utils, _messages, _errors
+from ansible.module_utils import _internal
-def warn(warning):
- if isinstance(warning, string_types):
- _global_warnings.append(warning)
- else:
- raise TypeError("warn requires a string not a %s" % type(warning))
+def warn(
+ warning: str,
+ *,
+ help_text: str | None = None,
+ obj: object | None = None,
+) -> None:
+ """Record a warning to be returned with the module result."""
+ _skip_stackwalk = True
+ if _internal.is_controller:
+ _display = _internal.import_controller_module('ansible.utils.display').Display()
+ _display.warning(
+ msg=warning,
+ help_text=help_text,
+ obj=obj,
+ )
-def deprecate(msg, version=None, date=None, collection_name=None):
- if isinstance(msg, string_types):
- # For compatibility, we accept that neither version nor date is set,
- # and treat that the same as if version would haven been set
- if date is not None:
- _global_deprecations.append({'msg': msg, 'date': date, 'collection_name': collection_name})
- else:
- _global_deprecations.append({'msg': msg, 'version': version, 'collection_name': collection_name})
- else:
- raise TypeError("deprecate requires a string not a %s" % type(msg))
+ return
+ warning = _messages.WarningSummary(
+ event=_messages.Event(
+ msg=warning,
+ help_text=help_text,
+ formatted_traceback=_traceback.maybe_capture_traceback(warning, _traceback.TracebackEvent.WARNING),
+ ),
+ )
-def get_warning_messages():
- """Return a tuple of warning messages accumulated over this run"""
- return tuple(_global_warnings)
+ _global_warnings[warning] = None
-def get_deprecation_messages():
- """Return a tuple of deprecations accumulated over this run"""
- return tuple(_global_deprecations)
+def error_as_warning(
+ msg: str | None,
+ exception: BaseException,
+ *,
+ help_text: str | None = None,
+ obj: object = None,
+) -> None:
+ """Display an exception as a warning."""
+ _skip_stackwalk = True
+
+ if _internal.is_controller:
+ _display = _internal.import_controller_module('ansible.utils.display').Display()
+ _display.error_as_warning(
+ msg=msg,
+ exception=exception,
+ help_text=help_text,
+ obj=obj,
+ )
+
+ return
+
+ event = _errors.EventFactory.from_exception(exception, _traceback.is_traceback_enabled(_traceback.TracebackEvent.WARNING))
+
+ warning = _messages.WarningSummary(
+ event=_messages.Event(
+ msg=msg,
+ help_text=help_text,
+ formatted_traceback=_traceback.maybe_capture_traceback(msg, _traceback.TracebackEvent.WARNING),
+ chain=_messages.EventChain(
+ msg_reason=_errors.MSG_REASON_DIRECT_CAUSE,
+ traceback_reason=_errors.TRACEBACK_REASON_EXCEPTION_DIRECT_WARNING,
+ event=event,
+ ),
+ ),
+ )
+
+ _global_warnings[warning] = None
+
+
+def deprecate(
+ msg: str,
+ version: str | None = None,
+ date: str | None = None,
+ collection_name: str | None = None,
+ *,
+ deprecator: _messages.PluginInfo | None = None,
+ help_text: str | None = None,
+ obj: object | None = None,
+) -> None:
+ """
+ Record a deprecation warning.
+ The `obj` argument is only useful in a controller context; it is ignored for target-side callers.
+ Most callers do not need to provide `collection_name` or `deprecator` -- but provide only one if needed.
+ Specify `version` or `date`, but not both.
+ If `date` is a string, it must be in the form `YYYY-MM-DD`.
+ """
+ _skip_stackwalk = True
+
+ deprecator = _deprecator.get_best_deprecator(deprecator=deprecator, collection_name=collection_name)
+
+ if _internal.is_controller:
+ _display = _internal.import_controller_module('ansible.utils.display').Display()
+ _display.deprecated(
+ msg=msg,
+ version=version,
+ date=date,
+ help_text=help_text,
+ obj=obj,
+ # skip passing collection_name; get_best_deprecator already accounted for it when present
+ deprecator=deprecator,
+ )
+
+ return
+
+ warning = _messages.DeprecationSummary(
+ event=_messages.Event(
+ msg=msg,
+ help_text=help_text,
+ formatted_traceback=_traceback.maybe_capture_traceback(msg, _traceback.TracebackEvent.DEPRECATED),
+ ),
+ version=version,
+ date=date,
+ deprecator=deprecator,
+ )
+
+ _global_deprecations[warning] = None
+
+
+def get_warning_messages() -> tuple[str, ...]:
+ """Return a tuple of warning messages accumulated over this run."""
+ # DTFIX7: add future deprecation comment
+ return tuple(_event_utils.format_event_brief_message(item.event) for item in _global_warnings)
+
+
+def get_deprecation_messages() -> tuple[dict[str, _t.Any], ...]:
+ """Return a tuple of deprecation warning messages accumulated over this run."""
+ # DTFIX7: add future deprecation comment
+ return tuple(_event_utils.deprecation_as_dict(item) for item in _global_deprecations)
+
+
+def get_warnings() -> list[_messages.WarningSummary]:
+ """Return a list of warning messages accumulated over this run."""
+ return list(_global_warnings)
+
+
+def get_deprecations() -> list[_messages.DeprecationSummary]:
+ """Return a list of deprecations accumulated over this run."""
+ return list(_global_deprecations)
+
+
+_global_warnings: dict[_messages.WarningSummary, object] = {}
+"""Global, ordered, de-duplicated storage of accumulated warnings for the current module run."""
+
+_global_deprecations: dict[_messages.DeprecationSummary, object] = {}
+"""Global, ordered, de-duplicated storage of accumulated deprecations for the current module run."""
diff --git a/lib/ansible/module_utils/common/yaml.py b/lib/ansible/module_utils/common/yaml.py
index 2e1ee52dc0b..18f520eb4ef 100644
--- a/lib/ansible/module_utils/common/yaml.py
+++ b/lib/ansible/module_utils/common/yaml.py
@@ -6,10 +6,15 @@ This file provides ease of use shortcuts for loading and dumping YAML,
preferring the YAML compiled C extensions to reduce duplicated code.
"""
-from __future__ import annotations
+from __future__ import annotations as _annotations
+
+import collections.abc as _c
+import typing as _t
from functools import partial as _partial
+from .._internal import _datatag
+
HAS_LIBYAML = False
try:
@@ -19,23 +24,44 @@ except ImportError:
else:
HAS_YAML = True
+# DTFIX-FUTURE: refactor this to share the implementation with the controller version
+# use an abstract base class, with __init_subclass__ for representer registration, and instance methods for overridable representers
+# then tests can be consolidated instead of having two nearly identical copies
+
if HAS_YAML:
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
+ from yaml.representer import SafeRepresenter
from yaml.cyaml import CParser as Parser # type: ignore[attr-defined] # pylint: disable=unused-import
HAS_LIBYAML = True
except (ImportError, AttributeError):
from yaml import SafeLoader # type: ignore[assignment]
from yaml import SafeDumper # type: ignore[assignment]
+ from yaml.representer import SafeRepresenter # type: ignore[assignment]
from yaml.parser import Parser # type: ignore[assignment] # pylint: disable=unused-import
+ class _AnsibleDumper(SafeDumper):
+ pass
+
+ def _represent_ansible_tagged_object(self, data: _datatag.AnsibleTaggedObject) -> _t.Any:
+ return self.represent_data(_datatag.AnsibleTagHelper.as_native_type(data))
+
+ def _represent_tripwire(self, data: _datatag.Tripwire) -> _t.NoReturn:
+ data.trip()
+
+ _AnsibleDumper.add_multi_representer(_datatag.AnsibleTaggedObject, _represent_ansible_tagged_object)
+
+ _AnsibleDumper.add_multi_representer(_datatag.Tripwire, _represent_tripwire)
+ _AnsibleDumper.add_multi_representer(_c.Mapping, SafeRepresenter.represent_dict)
+ _AnsibleDumper.add_multi_representer(_c.Sequence, SafeRepresenter.represent_list)
+
yaml_load = _partial(_yaml.load, Loader=SafeLoader)
yaml_load_all = _partial(_yaml.load_all, Loader=SafeLoader)
- yaml_dump = _partial(_yaml.dump, Dumper=SafeDumper)
- yaml_dump_all = _partial(_yaml.dump_all, Dumper=SafeDumper)
+ yaml_dump = _partial(_yaml.dump, Dumper=_AnsibleDumper)
+ yaml_dump_all = _partial(_yaml.dump_all, Dumper=_AnsibleDumper)
else:
SafeLoader = object # type: ignore[assignment,misc]
SafeDumper = object # type: ignore[assignment,misc]
diff --git a/lib/ansible/module_utils/compat/datetime.py b/lib/ansible/module_utils/compat/datetime.py
index d3cdc0d3d38..7392a753340 100644
--- a/lib/ansible/module_utils/compat/datetime.py
+++ b/lib/ansible/module_utils/compat/datetime.py
@@ -3,36 +3,48 @@
from __future__ import annotations
-from ansible.module_utils.six import PY3
+import datetime as _datetime
+import typing as t
-import datetime
+from ansible.module_utils.common.warnings import deprecate
-if PY3:
- UTC = datetime.timezone.utc
-else:
- _ZERO = datetime.timedelta(0)
+_UTC = _datetime.timezone.utc
- class _UTC(datetime.tzinfo):
- __slots__ = ()
- def utcoffset(self, dt):
- return _ZERO
+def _utcfromtimestamp(timestamp: float) -> _datetime.datetime:
+ """Construct an aware UTC datetime from a POSIX timestamp."""
+ return _datetime.datetime.fromtimestamp(timestamp, _UTC)
- def dst(self, dt):
- return _ZERO
- def tzname(self, dt):
- return "UTC"
+def _utcnow() -> _datetime.datetime:
+ """Construct an aware UTC datetime from time.time()."""
+ return _datetime.datetime.now(_UTC)
- UTC = _UTC()
+_deprecated_shims_map: dict[str, t.Callable[..., object] | _datetime.timezone] = {
+ 'UTC': _UTC,
+ 'utcfromtimestamp': _utcfromtimestamp,
+ 'utcnow': _utcnow,
+}
-def utcfromtimestamp(timestamp): # type: (float) -> datetime.datetime
- """Construct an aware UTC datetime from a POSIX timestamp."""
- return datetime.datetime.fromtimestamp(timestamp, UTC)
+__all__ = tuple(_deprecated_shims_map)
-def utcnow(): # type: () -> datetime.datetime
- """Construct an aware UTC datetime from time.time()."""
- return datetime.datetime.now(UTC)
+def __getattr__(importable_name: str) -> t.Callable[..., object] | _datetime.timezone:
+ """Inject import-time deprecation warnings.
+
+ Specifically, for ``UTC``, ``utcfromtimestamp()`` and ``utcnow()``.
+ """
+ try:
+ importable = _deprecated_shims_map[importable_name]
+ except KeyError as key_err:
+ raise AttributeError(f"module {__name__!r} has no attribute {key_err}") from None
+
+ deprecate(
+ msg=f'The `ansible.module_utils.compat.datetime.{importable_name}` '
+ 'function is deprecated.',
+ version='2.21',
+ )
+
+ return importable
diff --git a/lib/ansible/module_utils/compat/importlib.py b/lib/ansible/module_utils/compat/importlib.py
deleted file mode 100644
index 4074f3733d0..00000000000
--- a/lib/ansible/module_utils/compat/importlib.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (c) 2020 Matt Martz
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import annotations
-
-from ansible.module_utils.common.warnings import deprecate
-
-
-def __getattr__(importable_name):
- """Inject import-time deprecation warnings.
-
- Specifically, for ``import_module()``.
- """
- if importable_name == 'import_module':
- deprecate(
- msg=f'The `ansible.module_utils.compat.importlib.'
- f'{importable_name}` function is deprecated.',
- version='2.19',
- )
- from importlib import import_module
- return import_module
-
- raise AttributeError(
- f'cannot import name {importable_name !r} '
- f'has no attribute ({__file__ !s})',
- )
diff --git a/lib/ansible/module_utils/compat/paramiko.py b/lib/ansible/module_utils/compat/paramiko.py
index 302309cdaa8..f654229580d 100644
--- a/lib/ansible/module_utils/compat/paramiko.py
+++ b/lib/ansible/module_utils/compat/paramiko.py
@@ -2,25 +2,36 @@
# Copyright (c) 2019 Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-from __future__ import annotations
+from __future__ import annotations as _annotations
-import types # pylint: disable=unused-import
-import warnings
+import warnings as _warnings
-PARAMIKO_IMPORT_ERR = None
+from ansible.module_utils.common.warnings import deprecate as _deprecate
+
+_PARAMIKO_IMPORT_ERR = None
try:
- with warnings.catch_warnings():
+ with _warnings.catch_warnings():
# Blowfish has been moved, but the deprecated import is used by paramiko versions older than 2.9.5.
# See: https://github.com/paramiko/paramiko/pull/2039
- warnings.filterwarnings('ignore', message='Blowfish has been ', category=UserWarning)
+ _warnings.filterwarnings('ignore', message='Blowfish has been ', category=UserWarning)
# TripleDES has been moved, but the deprecated import is used by paramiko versions older than 3.3.2 and 3.4.1.
# See: https://github.com/paramiko/paramiko/pull/2421
- warnings.filterwarnings('ignore', message='TripleDES has been ', category=UserWarning)
- import paramiko # pylint: disable=unused-import
+ _warnings.filterwarnings('ignore', message='TripleDES has been ', category=UserWarning)
+ import paramiko as _paramiko
# paramiko and gssapi are incompatible and raise AttributeError not ImportError
# When running in FIPS mode, cryptography raises InternalError
# https://bugzilla.redhat.com/show_bug.cgi?id=1778939
except Exception as err:
- paramiko = None # type: types.ModuleType | None # type: ignore[no-redef]
- PARAMIKO_IMPORT_ERR = err
+ _paramiko = None # type: ignore[no-redef]
+ _PARAMIKO_IMPORT_ERR = err
+
+
+def __getattr__(name: str) -> object:
+ """Dynamic lookup to issue deprecation warnings for external import of deprecated items."""
+ if (res := globals().get(f'_{name}', ...)) is not ...:
+ _deprecate(f'The {name!r} compat import is deprecated.', version='2.21')
+
+ return res
+
+ raise AttributeError(name)
diff --git a/lib/ansible/module_utils/compat/typing.py b/lib/ansible/module_utils/compat/typing.py
index d753f72b25e..af118bc723e 100644
--- a/lib/ansible/module_utils/compat/typing.py
+++ b/lib/ansible/module_utils/compat/typing.py
@@ -6,6 +6,8 @@ from __future__ import annotations
# catch *all* exceptions to prevent type annotation support module bugs causing runtime failures
# (eg, https://github.com/ansible/ansible/issues/77857)
+TYPE_CHECKING = False
+
try:
from typing_extensions import *
except Exception: # pylint: disable=broad-except
@@ -17,8 +19,7 @@ except Exception: # pylint: disable=broad-except
pass
-try:
- cast # type: ignore[used-before-def]
-except NameError:
- def cast(typ, val): # type: ignore[no-redef]
- return val
+# this import and patch occur after typing_extensions/typing imports since the presence of those modules affects dataclasses behavior
+from .._internal._patches import _dataclass_annotation_patch
+
+_dataclass_annotation_patch.DataclassesIsTypePatch.patch()
diff --git a/lib/ansible/module_utils/connection.py b/lib/ansible/module_utils/connection.py
index b6720125855..aa81095d93d 100644
--- a/lib/ansible/module_utils/connection.py
+++ b/lib/ansible/module_utils/connection.py
@@ -33,12 +33,11 @@ import json
import pickle
import socket
import struct
-import traceback
import uuid
from functools import partial
from ansible.module_utils.common.text.converters import to_bytes, to_text
-from ansible.module_utils.common.json import AnsibleJSONEncoder
+from ansible.module_utils.common.json import _get_legacy_encoder
from ansible.module_utils.six import iteritems
@@ -127,7 +126,7 @@ class Connection(object):
)
try:
- data = json.dumps(req, cls=AnsibleJSONEncoder, vault_to_text=True)
+ data = json.dumps(req, cls=_get_legacy_encoder(), vault_to_text=True)
except TypeError as exc:
raise ConnectionError(
"Failed to encode some variables as JSON for communication with the persistent connection helper. "
@@ -136,12 +135,11 @@ class Connection(object):
try:
out = self.send(data)
- except socket.error as e:
+ except OSError as ex:
raise ConnectionError(
- 'unable to connect to socket %s. See Troubleshooting socket path issues '
- 'in the Network Debug and Troubleshooting Guide' % self.socket_path,
- err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc()
- )
+ f'Unable to connect to socket {self.socket_path!r}. See Troubleshooting socket path issues '
+ 'in the Network Debug and Troubleshooting Guide.'
+ ) from ex
try:
response = json.loads(out)
@@ -192,13 +190,12 @@ class Connection(object):
send_data(sf, to_bytes(data))
response = recv_data(sf)
- except socket.error as e:
+ except OSError as ex:
sf.close()
raise ConnectionError(
- 'unable to connect to socket %s. See the socket path issue category in '
- 'Network Debug and Troubleshooting Guide' % self.socket_path,
- err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc()
- )
+ f'Unable to connect to socket {self.socket_path!r}. See the socket path issue category in '
+ 'Network Debug and Troubleshooting Guide.',
+ ) from ex
sf.close()
diff --git a/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs b/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs
index 49fba4e5e77..a7959efb305 100644
--- a/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs
@@ -339,19 +339,47 @@ namespace Ansible.AccessToken
public static IEnumerable EnumerateUserTokens(SecurityIdentifier sid,
TokenAccessLevels access = TokenAccessLevels.Query)
{
+ return EnumerateUserTokens(sid, access, (p, h) => true);
+ }
+
+ public static IEnumerable EnumerateUserTokens(
+ SecurityIdentifier sid,
+ TokenAccessLevels access,
+ Func processFilter)
+ {
+ // We always need the Query access level so we can query the TokenUser
+ access |= TokenAccessLevels.Query;
+
foreach (System.Diagnostics.Process process in System.Diagnostics.Process.GetProcesses())
{
- // We always need the Query access level so we can query the TokenUser
using (process)
- using (SafeNativeHandle hToken = TryOpenAccessToken(process, access | TokenAccessLevels.Query))
+ using (SafeNativeHandle processHandle = NativeMethods.OpenProcess(ProcessAccessFlags.QueryInformation, false, (UInt32)process.Id))
{
- if (hToken == null)
+ if (processHandle.IsInvalid)
+ {
continue;
+ }
- if (!sid.Equals(GetTokenUser(hToken)))
+ if (!processFilter(process, processHandle))
+ {
continue;
+ }
+
+ SafeNativeHandle accessToken;
+ if (!NativeMethods.OpenProcessToken(processHandle, access, out accessToken))
+ {
+ continue;
+ }
+
+ using (accessToken)
+ {
+ if (!sid.Equals(GetTokenUser(accessToken)))
+ {
+ continue;
+ }
- yield return hToken;
+ yield return accessToken;
+ }
}
}
}
@@ -440,18 +468,5 @@ namespace Ansible.AccessToken
for (int i = 0; i < array.Length; i++, ptrOffset = IntPtr.Add(ptrOffset, Marshal.SizeOf(typeof(T))))
array[i] = (T)Marshal.PtrToStructure(ptrOffset, typeof(T));
}
-
- private static SafeNativeHandle TryOpenAccessToken(System.Diagnostics.Process process, TokenAccessLevels access)
- {
- try
- {
- using (SafeNativeHandle hProcess = OpenProcess(process.Id, ProcessAccessFlags.QueryInformation, false))
- return OpenProcessToken(hProcess, access);
- }
- catch (Win32Exception)
- {
- return null;
- }
- }
}
}
diff --git a/lib/ansible/module_utils/csharp/Ansible.Basic.cs b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
index 085958270d7..2752d8c3367 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Basic.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
@@ -46,6 +46,10 @@ namespace Ansible.Basic
public static bool _DebugArgSpec = false;
+ // Used by the executor scripts to store warnings from the wrapper functions.
+ // This is public to avoid reflection but should not be used by modules.
+ public static List _WrapperWarnings;
+
private static List BOOLEANS_TRUE = new List() { "y", "yes", "on", "1", "true", "t", "1.0" };
private static List BOOLEANS_FALSE = new List() { "n", "no", "off", "0", "false", "f", "0.0" };
@@ -73,9 +77,9 @@ namespace Ansible.Basic
{ "selinux_special_fs", null },
{ "shell_executable", null },
{ "socket", null },
- { "string_conversion_action", null },
{ "syslog_facility", null },
{ "target_log_info", "TargetLogInfo"},
+ { "tracebacks_for", null},
{ "tmpdir", "tmpdir" },
{ "verbosity", "Verbosity" },
{ "version", "AnsibleVersion" },
@@ -1025,16 +1029,7 @@ namespace Ansible.Basic
foreach (DictionaryEntry entry in param)
{
string paramKey = (string)entry.Key;
- if (paramKey == "_ansible_exec_wrapper_warnings")
- {
- // Special key used in module_powershell_wrapper to pass
- // along any warnings that should be returned back to
- // Ansible.
- removedParameters.Add(paramKey);
- foreach (string warning in (IList)entry.Value)
- Warn(warning);
- }
- else if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase))
+ if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase))
unsupportedParameters.Add(paramKey);
else if (!legalInputs.Contains(paramKey))
// For backwards compatibility we do not care about the case but we need to warn the users as this will
@@ -1210,7 +1205,7 @@ namespace Ansible.Basic
object val = requiredCheck[1];
IList requirements = (IList)requiredCheck[2];
- if (ParseStr(param[key]) != ParseStr(val))
+ if (param[key] == null || ParseStr(param[key]) != ParseStr(val))
continue;
string term = "all";
@@ -1343,6 +1338,14 @@ namespace Ansible.Basic
if (!result.ContainsKey("invocation"))
result["invocation"] = new Dictionary() { { "module_args", RemoveNoLogValues(Params, noLogValues) } };
+ if (_WrapperWarnings != null)
+ {
+ foreach (string warning in _WrapperWarnings)
+ {
+ warnings.Add(warning);
+ }
+ }
+
if (warnings.Count > 0)
result["warnings"] = warnings;
@@ -1693,7 +1696,7 @@ namespace Ansible.Basic
if ((attr & FileAttributes.ReadOnly) != 0)
{
// Windows does not allow files set with ReadOnly to be
- // deleted. Pre-emptively unset the attribute.
+ // deleted. Preemptively unset the attribute.
// FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE is quite new,
// look at using that flag with POSIX delete once Server 2019
// is the baseline.
diff --git a/lib/ansible/module_utils/csharp/Ansible.Become.cs b/lib/ansible/module_utils/csharp/Ansible.Become.cs
index 68d4d11d7a5..3656d036c2a 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Become.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Become.cs
@@ -93,10 +93,21 @@ namespace Ansible.Become
CachedRemoteInteractive,
CachedUnlock
}
+
+ [Flags]
+ public enum ProcessChildProcessPolicyFlags
+ {
+ None = 0x0,
+ NoChildProcessCreation = 0x1,
+ AuditNoChildProcessCreation = 0x2,
+ AllowSecureProcessCreation = 0x4,
+ }
}
internal class NativeMethods
{
+ public const int ProcessChildProcessPolicy = 13;
+
[DllImport("advapi32.dll", SetLastError = true)]
public static extern bool AllocateLocallyUniqueId(
out Luid Luid);
@@ -116,6 +127,13 @@ namespace Ansible.Become
[DllImport("kernel32.dll")]
public static extern UInt32 GetCurrentThreadId();
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool GetProcessMitigationPolicy(
+ SafeNativeHandle hProcess,
+ int MitigationPolicy,
+ ref NativeHelpers.ProcessChildProcessPolicyFlags lpBuffer,
+ IntPtr dwLength);
+
[DllImport("user32.dll", SetLastError = true)]
public static extern NoopSafeHandle GetProcessWindowStation();
@@ -203,6 +221,7 @@ namespace Ansible.Become
[Flags]
public enum LogonFlags
{
+ None = 0x00000000,
WithProfile = 0x00000001,
NetcredentialsOnly = 0x00000002
}
@@ -217,6 +236,7 @@ namespace Ansible.Become
};
private static int WINDOWS_STATION_ALL_ACCESS = 0x000F037F;
private static int DESKTOP_RIGHTS_ALL_ACCESS = 0x000F01FF;
+ private static bool _getProcessMitigationPolicySupported = true;
public static Result CreateProcessAsUser(string username, string password, string command)
{
@@ -333,12 +353,13 @@ namespace Ansible.Become
// Grant access to the current Windows Station and Desktop to the become user
GrantAccessToWindowStationAndDesktop(account);
- // Try and impersonate a SYSTEM token. We need the SeTcbPrivilege for
- // - LogonUser for a service SID
- // - S4U logon
- // - Token elevation
+ // Try and impersonate a SYSTEM token, we need a SYSTEM token to either become a well known service
+ // account or have administrative rights on the become access token.
+ // If we ultimately are becoming the SYSTEM account we want the token with the most privileges available.
+ // https://github.com/ansible/ansible/issues/71453
+ bool usedForProcess = becomeSid == "S-1-5-18";
systemToken = GetPrimaryTokenForUser(new SecurityIdentifier("S-1-5-18"),
- new List() { "SeTcbPrivilege" });
+ new List() { "SeTcbPrivilege" }, usedForProcess);
if (systemToken != null)
{
try
@@ -356,9 +377,11 @@ namespace Ansible.Become
try
{
+ if (becomeSid == "S-1-5-18")
+ userTokens.Add(systemToken);
// Cannot use String.IsEmptyOrNull() as an empty string is an account that doesn't have a pass.
// We only use S4U if no password was defined or it was null
- if (!SERVICE_SIDS.Contains(becomeSid) && password == null && logonType != LogonType.NewCredentials)
+ else if (!SERVICE_SIDS.Contains(becomeSid) && password == null && logonType != LogonType.NewCredentials)
{
// If no password was specified, try and duplicate an existing token for that user or use S4U to
// generate one without network credentials
@@ -381,11 +404,6 @@ namespace Ansible.Become
string domain = null;
switch (becomeSid)
{
- case "S-1-5-18":
- logonType = LogonType.Service;
- domain = "NT AUTHORITY";
- username = "SYSTEM";
- break;
case "S-1-5-19":
logonType = LogonType.Service;
domain = "NT AUTHORITY";
@@ -427,8 +445,10 @@ namespace Ansible.Become
return userTokens;
}
- private static SafeNativeHandle GetPrimaryTokenForUser(SecurityIdentifier sid,
- List requiredPrivileges = null)
+ private static SafeNativeHandle GetPrimaryTokenForUser(
+ SecurityIdentifier sid,
+ List requiredPrivileges = null,
+ bool usedForProcess = false)
{
// According to CreateProcessWithTokenW we require a token with
// TOKEN_QUERY, TOKEN_DUPLICATE and TOKEN_ASSIGN_PRIMARY
@@ -438,7 +458,19 @@ namespace Ansible.Become
TokenAccessLevels.AssignPrimary |
TokenAccessLevels.Impersonate;
- foreach (SafeNativeHandle hToken in TokenUtil.EnumerateUserTokens(sid, dwAccess))
+ SafeNativeHandle userToken = null;
+ int privilegeCount = 0;
+
+ // If we are using this token for the process, we need to check the
+ // process mitigation policy allows child processes to be created.
+ var processFilter = usedForProcess
+ ? (Func)((p, t) =>
+ {
+ return GetProcessChildProcessPolicyFlags(t) == NativeHelpers.ProcessChildProcessPolicyFlags.None;
+ })
+ : ((p, t) => true);
+
+ foreach (SafeNativeHandle hToken in TokenUtil.EnumerateUserTokens(sid, dwAccess, processFilter))
{
// Filter out any Network logon tokens, using become with that is useless when S4U
// can give us a Batch logon
@@ -448,6 +480,10 @@ namespace Ansible.Become
List actualPrivileges = TokenUtil.GetTokenPrivileges(hToken).Select(x => x.Name).ToList();
+ // If the token has less or the same number of privileges than the current token, skip it.
+ if (usedForProcess && privilegeCount >= actualPrivileges.Count)
+ continue;
+
// Check that the required privileges are on the token
if (requiredPrivileges != null)
{
@@ -459,16 +495,22 @@ namespace Ansible.Become
// Duplicate the token to convert it to a primary token with the access level required.
try
{
- return TokenUtil.DuplicateToken(hToken, TokenAccessLevels.MaximumAllowed,
+ userToken = TokenUtil.DuplicateToken(hToken, TokenAccessLevels.MaximumAllowed,
SecurityImpersonationLevel.Anonymous, TokenType.Primary);
+ privilegeCount = actualPrivileges.Count;
}
catch (Process.Win32Exception)
{
continue;
}
+
+ // If we don't care about getting the token with the most privileges, escape the loop as we already
+ // have a token.
+ if (!usedForProcess)
+ break;
}
- return null;
+ return userToken;
}
private static SafeNativeHandle GetS4UTokenForUser(SecurityIdentifier sid, LogonType logonType)
@@ -581,6 +623,35 @@ namespace Ansible.Become
return null;
}
+ private static NativeHelpers.ProcessChildProcessPolicyFlags GetProcessChildProcessPolicyFlags(SafeNativeHandle processHandle)
+ {
+ // Because this is only used to check the policy, we ignore any
+ // errors and pretend that the policy is None.
+ NativeHelpers.ProcessChildProcessPolicyFlags policy = NativeHelpers.ProcessChildProcessPolicyFlags.None;
+
+ if (_getProcessMitigationPolicySupported)
+ {
+ try
+ {
+ if (NativeMethods.GetProcessMitigationPolicy(
+ processHandle,
+ NativeMethods.ProcessChildProcessPolicy,
+ ref policy,
+ (IntPtr)4))
+ {
+ return policy;
+ }
+ }
+ catch (EntryPointNotFoundException)
+ {
+ // If the function is not available, we won't try to call it again
+ _getProcessMitigationPolicySupported = false;
+ }
+ }
+
+ return policy;
+ }
+
private static NativeHelpers.SECURITY_LOGON_TYPE GetTokenLogonType(SafeNativeHandle hToken)
{
TokenStatistics stats = TokenUtil.GetTokenStatistics(hToken);
@@ -637,4 +708,4 @@ namespace Ansible.Become
{ }
}
}
-}
+}
\ No newline at end of file
diff --git a/lib/ansible/module_utils/csharp/Ansible.Privilege.cs b/lib/ansible/module_utils/csharp/Ansible.Privilege.cs
index 9d5c0b17190..3fb6b97355f 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Privilege.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Privilege.cs
@@ -228,7 +228,7 @@ namespace Ansible.Privilege
}
///
- /// Get's the status of all the privileges on the token specified
+ /// Gets the status of all the privileges on the token specified
///
/// The process token to get the privilege status on
/// Dictionary where the key is the privilege constant and the value is the PrivilegeAttributes flags
@@ -342,7 +342,7 @@ namespace Ansible.Privilege
// Need to manually marshal the bytes requires for newState as the constant size
// of LUID_AND_ATTRIBUTES is set to 1 and can't be overridden at runtime, TOKEN_PRIVILEGES
// always contains at least 1 entry so we need to calculate the extra size if there are
- // nore than 1 LUID_AND_ATTRIBUTES entry
+ // more than 1 LUID_AND_ATTRIBUTES entry
int tokenPrivilegesSize = Marshal.SizeOf(typeof(NativeHelpers.TOKEN_PRIVILEGES));
int luidAttrSize = 0;
if (newState.Length > 1)
diff --git a/lib/ansible/module_utils/csharp/Ansible._Async.cs b/lib/ansible/module_utils/csharp/Ansible._Async.cs
new file mode 100644
index 00000000000..e21a8b27bec
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible._Async.cs
@@ -0,0 +1,517 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.IO;
+using System.Runtime.InteropServices;
+using System.Security;
+using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
+
+// Used by async_wrapper.ps1, not for general use.
+//AllowUnsafe
+
+namespace Ansible._Async
+{
+ internal class NativeHelpers
+ {
+ public const int CREATE_SUSPENDED = 0x00000004;
+ public const int CREATE_NEW_CONSOLE = 0x00000010;
+ public const int CREATE_UNICODE_ENVIRONMENT = 0x00000400;
+ public const int EXTENDED_STARTUPINFO_PRESENT = 0x00080000;
+ public const int CREATE_BREAKAWAY_FROM_JOB = 0x01000000;
+
+ public const int DUPLICATE_CLOSE_SOURCE = 0x00000001;
+ public const int DUPLICATE_SAME_ACCESS = 0x00000002;
+
+ public const int JobObjectBasicLimitInformation = 2;
+
+ public const int JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800;
+
+ public const int PROCESS_DUP_HANDLE = 0x00000040;
+ public const int PROCESS_CREATE_PROCESS = 0x00000080;
+
+ public const int PROC_THREAD_ATTRIBUTE_PARENT_PROCESS = 0x00020000;
+ public const int PROC_THREAD_ATTRIBUTE_HANDLE_LIST = 0x00020002;
+
+ public const int STARTF_USESHOWWINDOW = 0x00000001;
+ public const int STARTF_USESTDHANDLES = 0x00000100;
+
+ public const short SW_HIDE = 0;
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct JOBOBJECT_BASIC_LIMIT_INFORMATION
+ {
+ public long PerProcessUserTimeLimit;
+ public long PerJobUserTimeLimit;
+ public int LimitFlags;
+ public IntPtr MinimumWorkingSetSize;
+ public IntPtr MaximumWorkingSetSize;
+ public int ActiveProcessLimit;
+ public UIntPtr Affinity;
+ public int PriorityClass;
+ public int SchedulingClass;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SECURITY_ATTRIBUTES
+ {
+ public int nLength;
+ public IntPtr lpSecurityDescriptor;
+ public int bInheritHandle;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct STARTUPINFO
+ {
+ public int cb;
+ public IntPtr lpReserved;
+ public IntPtr lpDesktop;
+ public IntPtr lpTitle;
+ public int dwX;
+ public int dwY;
+ public int dwXSize;
+ public int dwYSize;
+ public int dwXCountChars;
+ public int dwYCountChars;
+ public int dwFillAttribute;
+ public int dwFlags;
+ public short wShowWindow;
+ public short cbReserved2;
+ public IntPtr lpReserved2;
+ public IntPtr hStdInput;
+ public IntPtr hStdOutput;
+ public IntPtr hStdError;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct STARTUPINFOEX
+ {
+ public STARTUPINFO startupInfo;
+ public IntPtr lpAttributeList;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct PROCESS_INFORMATION
+ {
+ public IntPtr hProcess;
+ public IntPtr hThread;
+ public int dwProcessId;
+ public int dwThreadId;
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern IntPtr CreateEventW(
+ ref NativeHelpers.SECURITY_ATTRIBUTES lpEventAttributes,
+ bool bManualReset,
+ bool bInitialState,
+ IntPtr lpName);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool CreateProcessW(
+ [MarshalAs(UnmanagedType.LPWStr)] string lpApplicationName,
+ StringBuilder lpCommandLine,
+ IntPtr lpProcessAttributes,
+ IntPtr lpThreadAttributes,
+ bool bInheritHandles,
+ int dwCreationFlags,
+ IntPtr lpEnvironment,
+ IntPtr lpCurrentDirectory,
+ ref NativeHelpers.STARTUPINFOEX lpStartupInfo,
+ out NativeHelpers.PROCESS_INFORMATION lpProcessInformation);
+
+ [DllImport("kernel32.dll")]
+ public static extern void DeleteProcThreadAttributeList(
+ IntPtr lpAttributeList);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool DuplicateHandle(
+ IntPtr hSourceProcessHandle,
+ IntPtr hSourceHandle,
+ IntPtr hTargetProcessHandle,
+ out IntPtr lpTargetHandle,
+ int dwDesiredAccess,
+ bool bInheritHandle,
+ int dwOptions);
+
+ [DllImport("kernel32.dll")]
+ public static extern IntPtr GetCurrentProcess();
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool GetExitCodeProcess(
+ IntPtr hProcess,
+ out int lpExitCode);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool InitializeProcThreadAttributeList(
+ IntPtr lpAttributeList,
+ int dwAttributeCount,
+ int dwFlags,
+ ref IntPtr lpSize);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool IsProcessInJob(
+ IntPtr ProcessHandle,
+ IntPtr JobHandle,
+ out bool Result);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern IntPtr OpenProcess(
+ Int32 dwDesiredAccess,
+ bool bInheritHandle,
+ Int32 dwProcessId);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool QueryInformationJobObject(
+ IntPtr hJob,
+ int JobObjectInformationClass,
+ ref NativeHelpers.JOBOBJECT_BASIC_LIMIT_INFORMATION lpJobObjectInformation,
+ int cbJobObjectInformationLength,
+ IntPtr lpReturnLength);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern int ResumeThread(
+ IntPtr hThread);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static unsafe extern bool UpdateProcThreadAttribute(
+ SafeProcThreadAttrList lpAttributeList,
+ int dwFlags,
+ UIntPtr Attribute,
+ void* lpValue,
+ UIntPtr cbSize,
+ IntPtr lpPreviousValue,
+ IntPtr lpReturnSize);
+ }
+
+ public class ProcessInformation : IDisposable
+ {
+ public SafeWaitHandle Process { get; private set; }
+ public SafeWaitHandle Thread { get; private set; }
+ public int ProcessId { get; private set; }
+ public int ThreadId { get; private set; }
+ public Task StdoutReader { get; private set; }
+ public Task StderrReader { get; private set; }
+
+ public ProcessInformation(
+ SafeWaitHandle process,
+ SafeWaitHandle thread,
+ int processId,
+ int threadId,
+ Task stdoutReader,
+ Task stderrReader)
+ {
+ Process = process;
+ Thread = thread;
+ ProcessId = processId;
+ ThreadId = threadId;
+ StdoutReader = stdoutReader;
+ StderrReader = stderrReader;
+ }
+
+ public void Dispose()
+ {
+ Process.Dispose();
+ Thread.Dispose();
+ GC.SuppressFinalize(this);
+ }
+ ~ProcessInformation() { Dispose(); }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2} - 0x{2:X8})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ public class ManagedWaitHandle : WaitHandle
+ {
+ public ManagedWaitHandle(SafeWaitHandle handle)
+ {
+ SafeWaitHandle = handle;
+ }
+ }
+
+ internal sealed class SafeProcThreadAttrList : SafeHandle
+ {
+ public SafeProcThreadAttrList(IntPtr handle) : base(handle, true) { }
+
+ public override bool IsInvalid { get { return handle == IntPtr.Zero; } }
+
+ protected override bool ReleaseHandle()
+ {
+ NativeMethods.DeleteProcThreadAttributeList(handle);
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ public class AsyncUtil
+ {
+ public static bool CanCreateBreakawayProcess()
+ {
+ bool isInJob;
+ if (!NativeMethods.IsProcessInJob(NativeMethods.GetCurrentProcess(), IntPtr.Zero, out isInJob))
+ {
+ throw new Win32Exception("IsProcessInJob() failed");
+ }
+
+ if (!isInJob)
+ {
+ return true;
+ }
+
+ NativeHelpers.JOBOBJECT_BASIC_LIMIT_INFORMATION jobInfo = new NativeHelpers.JOBOBJECT_BASIC_LIMIT_INFORMATION();
+ bool jobRes = NativeMethods.QueryInformationJobObject(
+ IntPtr.Zero,
+ NativeHelpers.JobObjectBasicLimitInformation,
+ ref jobInfo,
+ Marshal.SizeOf(),
+ IntPtr.Zero);
+ if (!jobRes)
+ {
+ throw new Win32Exception("QueryInformationJobObject() failed");
+ }
+
+ return (jobInfo.LimitFlags & NativeHelpers.JOB_OBJECT_LIMIT_BREAKAWAY_OK) != 0;
+ }
+
+ public static ProcessInformation CreateAsyncProcess(
+ string applicationName,
+ string commandLine,
+ SafeHandle stdin,
+ SafeHandle stdout,
+ SafeHandle stderr,
+ SafeHandle mutexHandle,
+ SafeHandle parentProcess,
+ StreamReader stdoutReader,
+ StreamReader stderrReader)
+ {
+ StringBuilder commandLineBuffer = new StringBuilder(commandLine);
+ int creationFlags = NativeHelpers.CREATE_NEW_CONSOLE |
+ NativeHelpers.CREATE_SUSPENDED |
+ NativeHelpers.CREATE_UNICODE_ENVIRONMENT |
+ NativeHelpers.EXTENDED_STARTUPINFO_PRESENT;
+ if (parentProcess == null)
+ {
+ creationFlags |= NativeHelpers.CREATE_BREAKAWAY_FROM_JOB;
+ }
+
+ NativeHelpers.STARTUPINFOEX si = new NativeHelpers.STARTUPINFOEX();
+ si.startupInfo.cb = Marshal.SizeOf(typeof(NativeHelpers.STARTUPINFOEX));
+ si.startupInfo.dwFlags = NativeHelpers.STARTF_USESHOWWINDOW | NativeHelpers.STARTF_USESTDHANDLES;
+ si.startupInfo.wShowWindow = NativeHelpers.SW_HIDE;
+ si.startupInfo.hStdInput = stdin.DangerousGetHandle();
+ si.startupInfo.hStdOutput = stdout.DangerousGetHandle();
+ si.startupInfo.hStdError = stderr.DangerousGetHandle();
+
+ int attrCount = 1;
+ IntPtr rawParentProcessHandle = IntPtr.Zero;
+ if (parentProcess != null)
+ {
+ attrCount++;
+ rawParentProcessHandle = parentProcess.DangerousGetHandle();
+ }
+
+ using (SafeProcThreadAttrList attrList = CreateProcThreadAttribute(attrCount))
+ {
+ si.lpAttributeList = attrList.DangerousGetHandle();
+
+ IntPtr[] handlesToInherit = new IntPtr[4]
+ {
+ stdin.DangerousGetHandle(),
+ stdout.DangerousGetHandle(),
+ stderr.DangerousGetHandle(),
+ mutexHandle.DangerousGetHandle()
+ };
+ unsafe
+ {
+ fixed (IntPtr* handlesToInheritPtr = &handlesToInherit[0])
+ {
+ UpdateProcThreadAttribute(
+ attrList,
+ NativeHelpers.PROC_THREAD_ATTRIBUTE_HANDLE_LIST,
+ handlesToInheritPtr,
+ IntPtr.Size * 4);
+
+ if (rawParentProcessHandle != IntPtr.Zero)
+ {
+ UpdateProcThreadAttribute(
+ attrList,
+ NativeHelpers.PROC_THREAD_ATTRIBUTE_PARENT_PROCESS,
+ &rawParentProcessHandle,
+ IntPtr.Size);
+ }
+
+ NativeHelpers.PROCESS_INFORMATION pi = new NativeHelpers.PROCESS_INFORMATION();
+ bool res = NativeMethods.CreateProcessW(
+ applicationName,
+ commandLineBuffer,
+ IntPtr.Zero,
+ IntPtr.Zero,
+ true,
+ (int)creationFlags,
+ IntPtr.Zero,
+ IntPtr.Zero,
+ ref si,
+ out pi);
+ if (!res)
+ {
+ throw new Win32Exception("CreateProcessW() failed");
+ }
+
+ return new ProcessInformation(
+ new SafeWaitHandle(pi.hProcess, true),
+ new SafeWaitHandle(pi.hThread, true),
+ pi.dwProcessId,
+ pi.dwThreadId,
+ Task.Run(() => stdoutReader.ReadToEnd()),
+ Task.Run(() => stderrReader.ReadToEnd()));
+ }
+ }
+ }
+ }
+
+ public static SafeWaitHandle CreateInheritableEvent()
+ {
+ NativeHelpers.SECURITY_ATTRIBUTES sa = new NativeHelpers.SECURITY_ATTRIBUTES();
+ sa.nLength = Marshal.SizeOf(sa);
+ sa.bInheritHandle = 1;
+
+ IntPtr hEvent = NativeMethods.CreateEventW(ref sa, true, false, IntPtr.Zero);
+ if (hEvent == IntPtr.Zero)
+ {
+ throw new Win32Exception("CreateEventW() failed");
+ }
+ return new SafeWaitHandle(hEvent, true);
+ }
+
+ public static SafeHandle DuplicateHandleToProcess(
+ SafeHandle handle,
+ SafeHandle targetProcess)
+ {
+ IntPtr targetHandle;
+ bool res = NativeMethods.DuplicateHandle(
+ NativeMethods.GetCurrentProcess(),
+ handle.DangerousGetHandle(),
+ targetProcess.DangerousGetHandle(),
+ out targetHandle,
+ 0,
+ true,
+ NativeHelpers.DUPLICATE_SAME_ACCESS);
+ if (!res)
+ {
+ throw new Win32Exception("DuplicateHandle() failed");
+ }
+
+ // This will not dispose the handle, it is assumed
+ // the caller will close it manually with CloseHandleInProcess.
+ return new SafeWaitHandle(targetHandle, false);
+ }
+
+ public static void CloseHandleInProcess(
+ SafeHandle handle,
+ SafeHandle targetProcess)
+ {
+ IntPtr _ = IntPtr.Zero;
+ bool res = NativeMethods.DuplicateHandle(
+ targetProcess.DangerousGetHandle(),
+ handle.DangerousGetHandle(),
+ IntPtr.Zero,
+ out _,
+ 0,
+ false,
+ NativeHelpers.DUPLICATE_CLOSE_SOURCE);
+ if (!res)
+ {
+ throw new Win32Exception("DuplicateHandle() failed to close handle");
+ }
+ }
+
+ public static int GetProcessExitCode(SafeHandle process)
+ {
+ int exitCode;
+ bool res = NativeMethods.GetExitCodeProcess(process.DangerousGetHandle(), out exitCode);
+ if (!res)
+ {
+ throw new Win32Exception("GetExitCodeProcess() failed");
+ }
+ return exitCode;
+ }
+
+ public static SafeHandle OpenProcessAsParent(int processId)
+ {
+ IntPtr hProcess = NativeMethods.OpenProcess(
+ NativeHelpers.PROCESS_DUP_HANDLE | NativeHelpers.PROCESS_CREATE_PROCESS,
+ false,
+ processId);
+ if (hProcess == IntPtr.Zero)
+ {
+ throw new Win32Exception("OpenProcess() failed");
+ }
+ return new SafeWaitHandle(hProcess, true);
+ }
+
+ public static void ResumeThread(SafeHandle thread)
+ {
+ int res = NativeMethods.ResumeThread(thread.DangerousGetHandle());
+ if (res == -1)
+ {
+ throw new Win32Exception("ResumeThread() failed");
+ }
+ }
+
+ private static SafeProcThreadAttrList CreateProcThreadAttribute(int count)
+ {
+ IntPtr attrSize = IntPtr.Zero;
+ NativeMethods.InitializeProcThreadAttributeList(IntPtr.Zero, count, 0, ref attrSize);
+
+ IntPtr attributeList = Marshal.AllocHGlobal((int)attrSize);
+ try
+ {
+ if (!NativeMethods.InitializeProcThreadAttributeList(attributeList, count, 0, ref attrSize))
+ {
+ throw new Win32Exception("InitializeProcThreadAttributeList() failed");
+ }
+
+ return new SafeProcThreadAttrList(attributeList);
+ }
+ catch
+ {
+ Marshal.FreeHGlobal(attributeList);
+ throw;
+ }
+ }
+
+ private static unsafe void UpdateProcThreadAttribute(
+ SafeProcThreadAttrList attributeList,
+ int attribute,
+ void* value,
+ int size)
+ {
+ bool res = NativeMethods.UpdateProcThreadAttribute(
+ attributeList,
+ 0,
+ (UIntPtr)attribute,
+ value,
+ (UIntPtr)size,
+ IntPtr.Zero,
+ IntPtr.Zero);
+ if (!res)
+ {
+ string msg = string.Format("UpdateProcThreadAttribute() failed to set attribute 0x{0:X8}", attribute);
+ throw new Win32Exception(msg);
+ }
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/datatag.py b/lib/ansible/module_utils/datatag.py
new file mode 100644
index 00000000000..e2bc9c1504a
--- /dev/null
+++ b/lib/ansible/module_utils/datatag.py
@@ -0,0 +1,49 @@
+"""Public API for data tagging."""
+from __future__ import annotations as _annotations
+
+import typing as _t
+
+from ._internal import _datatag, _deprecator, _traceback, _messages
+from ._internal._datatag import _tags
+
+_T = _t.TypeVar('_T')
+
+
+deprecator_from_collection_name = _deprecator.deprecator_from_collection_name
+
+
+def deprecate_value(
+ value: _T,
+ msg: str,
+ *,
+ version: str | None = None,
+ date: str | None = None,
+ collection_name: str | None = None,
+ deprecator: _messages.PluginInfo | None = None,
+ help_text: str | None = None,
+) -> _T:
+ """
+ Return `value` tagged with the given deprecation details.
+ The types `None` and `bool` cannot be deprecated and are returned unmodified.
+ Raises a `TypeError` if `value` is not a supported type.
+ Most callers do not need to provide `collection_name` or `deprecator` -- but provide only one if needed.
+ Specify `version` or `date`, but not both.
+ If `date` is provided, it should be in the form `YYYY-MM-DD`.
+ """
+ _skip_stackwalk = True
+
+ deprecated = _tags.Deprecated(
+ msg=msg,
+ help_text=help_text,
+ date=date,
+ version=version,
+ deprecator=_deprecator.get_best_deprecator(deprecator=deprecator, collection_name=collection_name),
+ formatted_traceback=_traceback.maybe_capture_traceback(msg, _traceback.TracebackEvent.DEPRECATED_VALUE),
+ )
+
+ return deprecated.tag(value)
+
+
+def native_type_name(value: object | type, /) -> str:
+ """Return the type name of `value`, substituting the native Python type name for internal tagged types."""
+ return _datatag.AnsibleTagHelper.base_type(value).__name__
diff --git a/lib/ansible/module_utils/facts/ansible_collector.py b/lib/ansible/module_utils/facts/ansible_collector.py
index 9fe1c8a84ee..82b6e16746b 100644
--- a/lib/ansible/module_utils/facts/ansible_collector.py
+++ b/lib/ansible/module_utils/facts/ansible_collector.py
@@ -30,8 +30,7 @@ from __future__ import annotations
import fnmatch
import sys
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts import timeout
from ansible.module_utils.facts import collector
@@ -113,7 +112,13 @@ class CollectorMetaDataCollector(collector.BaseFactCollector):
self.module_setup = module_setup
def collect(self, module=None, collected_facts=None):
+ # NOTE: deprecate/remove once DT lands
+ # we can return this data, but should not be top level key
meta_facts = {'gather_subset': self.gather_subset}
+
+ # NOTE: this is just a boolean indicator that 'facts were gathered'
+ # and should be moved to the 'gather_facts' action plugin
+ # probably revised to handle modules/subsets combos
if self.module_setup:
meta_facts['module_setup'] = self.module_setup
return meta_facts
diff --git a/lib/ansible/module_utils/facts/collector.py b/lib/ansible/module_utils/facts/collector.py
index f3e144f7dda..6e5591f7de1 100644
--- a/lib/ansible/module_utils/facts/collector.py
+++ b/lib/ansible/module_utils/facts/collector.py
@@ -31,8 +31,7 @@ from __future__ import annotations
from collections import defaultdict
import platform
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts import timeout
diff --git a/lib/ansible/module_utils/facts/default_collectors.py b/lib/ansible/module_utils/facts/default_collectors.py
index af4391576c0..a1a92431919 100644
--- a/lib/ansible/module_utils/facts/default_collectors.py
+++ b/lib/ansible/module_utils/facts/default_collectors.py
@@ -27,7 +27,7 @@
#
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/hardware/aix.py b/lib/ansible/module_utils/facts/hardware/aix.py
index c2a074bf8ea..d359e06b707 100644
--- a/lib/ansible/module_utils/facts/hardware/aix.py
+++ b/lib/ansible/module_utils/facts/hardware/aix.py
@@ -45,6 +45,7 @@ class AIXHardware(Hardware):
vgs_facts = self.get_vgs_facts()
mount_facts = self.get_mount_facts()
devices_facts = self.get_device_facts()
+ uptime_facts = self.get_uptime_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
@@ -52,6 +53,7 @@ class AIXHardware(Hardware):
hardware_facts.update(vgs_facts)
hardware_facts.update(mount_facts)
hardware_facts.update(devices_facts)
+ hardware_facts.update(uptime_facts)
return hardware_facts
@@ -123,6 +125,38 @@ class AIXHardware(Hardware):
return memory_facts
+ def get_uptime_facts(self):
+ uptime_facts = {}
+ # On AIX, there are no options to get the uptime directly in seconds.
+ # Your options are to parse the output of "who", "uptime", or "ps".
+ # Only "ps" always provides a field with seconds.
+ ps_bin = self.module.get_bin_path("ps")
+ if ps_bin is None:
+ return uptime_facts
+
+ ps_cmd = [ps_bin, "-p", "1", "-o", "etime="]
+
+ rc, out, err = self.module.run_command(ps_cmd)
+ if rc != 0:
+ return uptime_facts
+
+ # Parse out
+ if out:
+ lines = out.splitlines()
+ data = lines[0].replace(':', '-').split('-')
+ try:
+ days = int(data[0])
+ hours = int(data[1])
+ minutes = int(data[2])
+ seconds = int(data[3])
+ except (IndexError, ValueError):
+ return uptime_facts
+ # Calculate uptime in seconds
+ uptime_seconds = (days * 86400) + (hours * 3600) + (minutes * 60) + seconds
+ uptime_facts['uptime_seconds'] = int(uptime_seconds)
+
+ return uptime_facts
+
def get_dmi_facts(self):
dmi_facts = {}
diff --git a/lib/ansible/module_utils/facts/hardware/base.py b/lib/ansible/module_utils/facts/hardware/base.py
index 8710ed57fcc..bb2cc600bcf 100644
--- a/lib/ansible/module_utils/facts/hardware/base.py
+++ b/lib/ansible/module_utils/facts/hardware/base.py
@@ -28,7 +28,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
@@ -49,7 +49,7 @@ class HardwareCollector(BaseFactCollector):
_fact_ids = set(['processor',
'processor_cores',
'processor_count',
- # TODO: mounts isnt exactly hardware
+ # TODO: mounts isn't exactly hardware
'mounts',
'devices']) # type: t.Set[str]
_fact_class = Hardware
diff --git a/lib/ansible/module_utils/facts/hardware/darwin.py b/lib/ansible/module_utils/facts/hardware/darwin.py
index ac159d5fd2b..091ed67595e 100644
--- a/lib/ansible/module_utils/facts/hardware/darwin.py
+++ b/lib/ansible/module_utils/facts/hardware/darwin.py
@@ -93,7 +93,7 @@ class DarwinHardware(Hardware):
}
total_used = 0
- page_size = 4096
+ page_size = int(self.sysctl.get('hw.pagesize', 4096))
vm_stat_command = self.module.get_bin_path('vm_stat')
if vm_stat_command is None:
diff --git a/lib/ansible/module_utils/facts/hardware/linux.py b/lib/ansible/module_utils/facts/hardware/linux.py
index f431c4e1f8c..62fdb896f0f 100644
--- a/lib/ansible/module_utils/facts/hardware/linux.py
+++ b/lib/ansible/module_utils/facts/hardware/linux.py
@@ -182,7 +182,7 @@ class LinuxHardware(Hardware):
xen = True
# Only interested in the first line
break
- except IOError:
+ except OSError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
diff --git a/lib/ansible/module_utils/facts/network/base.py b/lib/ansible/module_utils/facts/network/base.py
index 7e13e168b32..ae6f215735b 100644
--- a/lib/ansible/module_utils/facts/network/base.py
+++ b/lib/ansible/module_utils/facts/network/base.py
@@ -15,7 +15,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/network/fc_wwn.py b/lib/ansible/module_utils/facts/network/fc_wwn.py
index fb846cc08a8..58f59806f1f 100644
--- a/lib/ansible/module_utils/facts/network/fc_wwn.py
+++ b/lib/ansible/module_utils/facts/network/fc_wwn.py
@@ -19,8 +19,7 @@ from __future__ import annotations
import sys
import glob
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_lines
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/network/iscsi.py b/lib/ansible/module_utils/facts/network/iscsi.py
index 48f98a682bd..1ac48206055 100644
--- a/lib/ansible/module_utils/facts/network/iscsi.py
+++ b/lib/ansible/module_utils/facts/network/iscsi.py
@@ -18,8 +18,7 @@
from __future__ import annotations
import sys
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.network.base import NetworkCollector
diff --git a/lib/ansible/module_utils/facts/network/nvme.py b/lib/ansible/module_utils/facts/network/nvme.py
index 7eb070dcf5d..192f6f5275b 100644
--- a/lib/ansible/module_utils/facts/network/nvme.py
+++ b/lib/ansible/module_utils/facts/network/nvme.py
@@ -18,8 +18,7 @@
from __future__ import annotations
import sys
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.network.base import NetworkCollector
diff --git a/lib/ansible/module_utils/facts/other/facter.py b/lib/ansible/module_utils/facts/other/facter.py
index 41b3cea7c92..8e4ec1a7aef 100644
--- a/lib/ansible/module_utils/facts/other/facter.py
+++ b/lib/ansible/module_utils/facts/other/facter.py
@@ -4,8 +4,7 @@
from __future__ import annotations
import json
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
@@ -62,7 +61,7 @@ class FacterFactCollector(BaseFactCollector):
return out
def collect(self, module=None, collected_facts=None):
- # Note that this mirrors previous facter behavior, where there isnt
+ # Note that this mirrors previous facter behavior, where there isn't
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
diff --git a/lib/ansible/module_utils/facts/other/ohai.py b/lib/ansible/module_utils/facts/other/ohai.py
index db62fe4d73e..4cb2f7a2f0b 100644
--- a/lib/ansible/module_utils/facts/other/ohai.py
+++ b/lib/ansible/module_utils/facts/other/ohai.py
@@ -16,8 +16,7 @@
from __future__ import annotations
import json
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.namespace import PrefixFactNamespace
diff --git a/lib/ansible/module_utils/facts/sysctl.py b/lib/ansible/module_utils/facts/sysctl.py
index 639e77c41f0..a1a4db4030c 100644
--- a/lib/ansible/module_utils/facts/sysctl.py
+++ b/lib/ansible/module_utils/facts/sysctl.py
@@ -17,8 +17,6 @@ from __future__ import annotations
import re
-from ansible.module_utils.common.text.converters import to_text
-
def get_sysctl(module, prefixes):
@@ -31,8 +29,8 @@ def get_sysctl(module, prefixes):
try:
rc, out, err = module.run_command(cmd)
- except (IOError, OSError) as e:
- module.warn('Unable to read sysctl: %s' % to_text(e))
+ except OSError as ex:
+ module.error_as_warning('Unable to read sysctl.', exception=ex)
rc = 1
if rc == 0:
@@ -54,8 +52,8 @@ def get_sysctl(module, prefixes):
try:
(key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
- except Exception as e:
- module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e)))
+ except Exception as ex:
+ module.error_as_warning(f'Unable to split sysctl line {line!r}.', exception=ex)
if key:
sysctl[key] = value.strip()
diff --git a/lib/ansible/module_utils/facts/system/apparmor.py b/lib/ansible/module_utils/facts/system/apparmor.py
index ec29e883e09..d0ead37d34d 100644
--- a/lib/ansible/module_utils/facts/system/apparmor.py
+++ b/lib/ansible/module_utils/facts/system/apparmor.py
@@ -18,8 +18,7 @@
from __future__ import annotations
import os
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/system/caps.py b/lib/ansible/module_utils/facts/system/caps.py
index 365a04592ac..d92d1216978 100644
--- a/lib/ansible/module_utils/facts/system/caps.py
+++ b/lib/ansible/module_utils/facts/system/caps.py
@@ -17,7 +17,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
@@ -38,8 +38,8 @@ class SystemCapabilitiesFactCollector(BaseFactCollector):
# NOTE: -> get_caps_data()/parse_caps_data() for easier mocking -akl
try:
rc, out, err = module.run_command([capsh_path, "--print"], errors='surrogate_then_replace', handle_exceptions=False)
- except (IOError, OSError) as e:
- module.warn('Could not query system capabilities: %s' % str(e))
+ except OSError as ex:
+ module.error_as_warning('Could not query system capabilities.', exception=ex)
if rc == 0:
enforced_caps = []
diff --git a/lib/ansible/module_utils/facts/system/chroot.py b/lib/ansible/module_utils/facts/system/chroot.py
index bbf4b39dd3e..85c7a4288c5 100644
--- a/lib/ansible/module_utils/facts/system/chroot.py
+++ b/lib/ansible/module_utils/facts/system/chroot.py
@@ -3,8 +3,7 @@
from __future__ import annotations
import os
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/system/cmdline.py b/lib/ansible/module_utils/facts/system/cmdline.py
index 12376dc0ba1..dc4b8d08256 100644
--- a/lib/ansible/module_utils/facts/system/cmdline.py
+++ b/lib/ansible/module_utils/facts/system/cmdline.py
@@ -16,8 +16,7 @@
from __future__ import annotations
import shlex
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_content
diff --git a/lib/ansible/module_utils/facts/system/date_time.py b/lib/ansible/module_utils/facts/system/date_time.py
index 908d00aa163..21b97bce773 100644
--- a/lib/ansible/module_utils/facts/system/date_time.py
+++ b/lib/ansible/module_utils/facts/system/date_time.py
@@ -19,10 +19,9 @@ from __future__ import annotations
import datetime
import time
+import typing as t
-import ansible.module_utils.compat.typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
-from ansible.module_utils.compat.datetime import utcfromtimestamp
class DateTimeFactCollector(BaseFactCollector):
@@ -36,7 +35,10 @@ class DateTimeFactCollector(BaseFactCollector):
# Store the timestamp once, then get local and UTC versions from that
epoch_ts = time.time()
now = datetime.datetime.fromtimestamp(epoch_ts)
- utcnow = utcfromtimestamp(epoch_ts).replace(tzinfo=None)
+ utcnow = datetime.datetime.fromtimestamp(
+ epoch_ts,
+ tz=datetime.timezone.utc,
+ )
date_time_facts['year'] = now.strftime('%Y')
date_time_facts['month'] = now.strftime('%m')
diff --git a/lib/ansible/module_utils/facts/system/distribution.py b/lib/ansible/module_utils/facts/system/distribution.py
index 66c768a126f..9f81beab64d 100644
--- a/lib/ansible/module_utils/facts/system/distribution.py
+++ b/lib/ansible/module_utils/facts/system/distribution.py
@@ -8,8 +8,7 @@ from __future__ import annotations
import os
import platform
import re
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.common.sys_info import get_distribution, get_distribution_version, \
get_distribution_codename
@@ -101,7 +100,7 @@ class DistributionFiles:
return get_file_content(path)
def _get_dist_file_content(self, path, allow_empty=False):
- # cant find that dist file or it is incorrectly empty
+ # can't find that dist file, or it is incorrectly empty
if not _file_exists(path, allow_empty=allow_empty):
return False, None
@@ -208,7 +207,7 @@ class DistributionFiles:
return dist_file_facts
- # TODO: FIXME: split distro file parsing into its own module or class
+ # FIXME: split distro file parsing into its own module or class
def parse_distribution_file_Slackware(self, name, data, path, collected_facts):
slackware_facts = {}
if 'Slackware' not in data:
@@ -311,15 +310,28 @@ class DistributionFiles:
suse_facts['distribution_release'] = release.group(1)
suse_facts['distribution_version'] = collected_facts['distribution_version'] + '.' + release.group(1)
- # See https://www.suse.com/support/kb/doc/?id=000019341 for SLES for SAP
- if os.path.islink('/etc/products.d/baseproduct') and os.path.realpath('/etc/products.d/baseproduct').endswith('SLES_SAP.prod'):
- suse_facts['distribution'] = 'SLES_SAP'
+ # Check VARIANT_ID first for SLES4SAP or SL-Micro
+ variant_id_match = re.search(r'^VARIANT_ID="?([^"\n]*)"?', data, re.MULTILINE)
+ if variant_id_match:
+ variant_id = variant_id_match.group(1)
+ if variant_id in ('server-sap', 'sles-sap'):
+ suse_facts['distribution'] = 'SLES_SAP'
+ elif variant_id == 'transactional':
+ suse_facts['distribution'] = 'SL-Micro'
+ else:
+ # Fallback for older SLES 15 using baseproduct symlink
+ if os.path.islink('/etc/products.d/baseproduct'):
+ resolved = os.path.realpath('/etc/products.d/baseproduct')
+ if resolved.endswith('SLES_SAP.prod'):
+ suse_facts['distribution'] = 'SLES_SAP'
+ elif resolved.endswith('SL-Micro.prod'):
+ suse_facts['distribution'] = 'SL-Micro'
return True, suse_facts
def parse_distribution_file_Debian(self, name, data, path, collected_facts):
debian_facts = {}
- if 'Debian' in data or 'Raspbian' in data:
+ if any(distro in data for distro in ('Debian', 'Raspbian')):
debian_facts['distribution'] = 'Debian'
release = re.search(r"PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
@@ -398,6 +410,8 @@ class DistributionFiles:
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
+ elif 'LMDE' in data:
+ debian_facts['distribution'] = 'Linux Mint Debian Edition'
else:
return False, debian_facts
@@ -515,9 +529,10 @@ class Distribution(object):
'EuroLinux', 'Kylin Linux Advanced Server', 'MIRACLE'],
'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon',
'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
- 'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin', 'OSMC'],
+ 'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin', 'OSMC', 'Linux Mint Debian Edition'],
'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
- 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite', 'SL-Micro'],
+ 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite', 'SL-Micro',
+ 'openSUSE MicroOS'],
'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
'Mandrake': ['Mandrake', 'Mandriva'],
'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'],
@@ -570,8 +585,7 @@ class Distribution(object):
distribution_facts.update(dist_file_facts)
distro = distribution_facts['distribution']
-
- # look for a os family alias for the 'distribution', if there isnt one, use 'distribution'
+ # look for an os family alias for the 'distribution', if there isn't one, use 'distribution'
distribution_facts['os_family'] = self.OS_FAMILY.get(distro, None) or distro
return distribution_facts
diff --git a/lib/ansible/module_utils/facts/system/dns.py b/lib/ansible/module_utils/facts/system/dns.py
index 7ef69d136fc..5da8e5ba351 100644
--- a/lib/ansible/module_utils/facts/system/dns.py
+++ b/lib/ansible/module_utils/facts/system/dns.py
@@ -15,7 +15,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_content
diff --git a/lib/ansible/module_utils/facts/system/env.py b/lib/ansible/module_utils/facts/system/env.py
index 4547924532e..cf6a22457a9 100644
--- a/lib/ansible/module_utils/facts/system/env.py
+++ b/lib/ansible/module_utils/facts/system/env.py
@@ -16,8 +16,7 @@
from __future__ import annotations
import os
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.six import iteritems
diff --git a/lib/ansible/module_utils/facts/system/fips.py b/lib/ansible/module_utils/facts/system/fips.py
index 131434157d4..36b0a37f0c7 100644
--- a/lib/ansible/module_utils/facts/system/fips.py
+++ b/lib/ansible/module_utils/facts/system/fips.py
@@ -4,7 +4,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_content
diff --git a/lib/ansible/module_utils/facts/system/loadavg.py b/lib/ansible/module_utils/facts/system/loadavg.py
index 37cb554434f..3433c06ee34 100644
--- a/lib/ansible/module_utils/facts/system/loadavg.py
+++ b/lib/ansible/module_utils/facts/system/loadavg.py
@@ -4,8 +4,7 @@
from __future__ import annotations
import os
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/system/local.py b/lib/ansible/module_utils/facts/system/local.py
index 66ec58a2e7d..7cf0f144d26 100644
--- a/lib/ansible/module_utils/facts/system/local.py
+++ b/lib/ansible/module_utils/facts/system/local.py
@@ -7,8 +7,7 @@ import glob
import json
import os
import stat
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.facts.utils import get_file_content
@@ -51,7 +50,7 @@ class LocalFactCollector(BaseFactCollector):
rc, out, err = module.run_command(fn)
if rc != 0:
failed = 'Failure executing fact script (%s), rc: %s, err: %s' % (fn, rc, err)
- except (IOError, OSError) as e:
+ except OSError as e:
failed = 'Could not execute fact script (%s): %s' % (fn, to_text(e))
if failed is not None:
diff --git a/lib/ansible/module_utils/facts/system/lsb.py b/lib/ansible/module_utils/facts/system/lsb.py
index 5767536b1d7..93251c31087 100644
--- a/lib/ansible/module_utils/facts/system/lsb.py
+++ b/lib/ansible/module_utils/facts/system/lsb.py
@@ -18,8 +18,7 @@
from __future__ import annotations
import os
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_lines
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/system/pkg_mgr.py b/lib/ansible/module_utils/facts/system/pkg_mgr.py
index e9da18647b8..baa07076b8a 100644
--- a/lib/ansible/module_utils/facts/system/pkg_mgr.py
+++ b/lib/ansible/module_utils/facts/system/pkg_mgr.py
@@ -6,8 +6,7 @@ from __future__ import annotations
import os
import subprocess
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/system/platform.py b/lib/ansible/module_utils/facts/system/platform.py
index 94819861b4b..cd9f11cdb37 100644
--- a/lib/ansible/module_utils/facts/system/platform.py
+++ b/lib/ansible/module_utils/facts/system/platform.py
@@ -18,8 +18,7 @@ from __future__ import annotations
import re
import socket
import platform
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_content
diff --git a/lib/ansible/module_utils/facts/system/python.py b/lib/ansible/module_utils/facts/system/python.py
index 0252c0c96a7..b75d32974e6 100644
--- a/lib/ansible/module_utils/facts/system/python.py
+++ b/lib/ansible/module_utils/facts/system/python.py
@@ -16,8 +16,7 @@
from __future__ import annotations
import sys
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/system/selinux.py b/lib/ansible/module_utils/facts/system/selinux.py
index c110f17e720..1e5ea81ac78 100644
--- a/lib/ansible/module_utils/facts/system/selinux.py
+++ b/lib/ansible/module_utils/facts/system/selinux.py
@@ -17,7 +17,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/system/service_mgr.py b/lib/ansible/module_utils/facts/system/service_mgr.py
index 20257967c1e..ba798e09dfb 100644
--- a/lib/ansible/module_utils/facts/system/service_mgr.py
+++ b/lib/ansible/module_utils/facts/system/service_mgr.py
@@ -20,8 +20,7 @@ from __future__ import annotations
import os
import platform
import re
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.common.text.converters import to_native
diff --git a/lib/ansible/module_utils/facts/system/ssh_pub_keys.py b/lib/ansible/module_utils/facts/system/ssh_pub_keys.py
index 7214dea3de6..295ea135b11 100644
--- a/lib/ansible/module_utils/facts/system/ssh_pub_keys.py
+++ b/lib/ansible/module_utils/facts/system/ssh_pub_keys.py
@@ -15,7 +15,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.utils import get_file_content
diff --git a/lib/ansible/module_utils/facts/system/systemd.py b/lib/ansible/module_utils/facts/system/systemd.py
index 3ba2bbfcbdf..cb6f4c7931d 100644
--- a/lib/ansible/module_utils/facts/system/systemd.py
+++ b/lib/ansible/module_utils/facts/system/systemd.py
@@ -17,7 +17,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
diff --git a/lib/ansible/module_utils/facts/system/user.py b/lib/ansible/module_utils/facts/system/user.py
index 64b8fef8be6..cbfd37348eb 100644
--- a/lib/ansible/module_utils/facts/system/user.py
+++ b/lib/ansible/module_utils/facts/system/user.py
@@ -18,8 +18,7 @@ from __future__ import annotations
import getpass
import os
import pwd
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/virtual/base.py b/lib/ansible/module_utils/facts/virtual/base.py
index 943ce406d86..f03e2289180 100644
--- a/lib/ansible/module_utils/facts/virtual/base.py
+++ b/lib/ansible/module_utils/facts/virtual/base.py
@@ -18,7 +18,7 @@
from __future__ import annotations
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.facts.collector import BaseFactCollector
diff --git a/lib/ansible/module_utils/facts/virtual/linux.py b/lib/ansible/module_utils/facts/virtual/linux.py
index 57b047b11a1..db1195efc8f 100644
--- a/lib/ansible/module_utils/facts/virtual/linux.py
+++ b/lib/ansible/module_utils/facts/virtual/linux.py
@@ -129,7 +129,7 @@ class LinuxVirtual(Virtual):
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
is_xen_host = True
- except IOError:
+ except OSError:
pass
if is_xen_host:
@@ -151,7 +151,7 @@ class LinuxVirtual(Virtual):
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
product_family = get_file_content('/sys/devices/virtual/dmi/id/product_family')
- if product_name in ('KVM', 'KVM Server', 'Bochs', 'AHV'):
+ if product_name in ('KVM', 'KVM Server', 'Bochs', 'AHV', 'CloudStack KVM Hypervisor'):
guest_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
@@ -201,7 +201,7 @@ class LinuxVirtual(Virtual):
virtual_facts['virtualization_type'] = 'virtualbox'
found_virt = True
- if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner'):
+ if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner', 'Linode'):
guest_tech.add('kvm')
if not found_virt:
virtual_facts['virtualization_type'] = 'kvm'
diff --git a/lib/ansible/module_utils/facts/virtual/sunos.py b/lib/ansible/module_utils/facts/virtual/sunos.py
index 7a595f701a5..6c6ffb291a1 100644
--- a/lib/ansible/module_utils/facts/virtual/sunos.py
+++ b/lib/ansible/module_utils/facts/virtual/sunos.py
@@ -1,17 +1,5 @@
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
@@ -77,7 +65,7 @@ class SunOSVirtual(Virtual):
if virtinfo:
# The output of virtinfo is different whether we are on a machine with logical
# domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
- rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p")
+ rc, out, err = self.module.run_command([virtinfo, '-p'])
# The output contains multiple lines with different keys like this:
# DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
# The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
diff --git a/lib/ansible/module_utils/facts/virtual/sysctl.py b/lib/ansible/module_utils/facts/virtual/sysctl.py
index 649f335ad72..6bf1d74d661 100644
--- a/lib/ansible/module_utils/facts/virtual/sysctl.py
+++ b/lib/ansible/module_utils/facts/virtual/sysctl.py
@@ -1,24 +1,11 @@
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import re
-class VirtualSysctlDetectionMixin(object):
+class VirtualSysctlDetectionMixin:
def detect_sysctl(self):
self.sysctl_path = self.module.get_bin_path('sysctl')
diff --git a/lib/ansible/module_utils/parsing/convert_bool.py b/lib/ansible/module_utils/parsing/convert_bool.py
index 3367b2a09fa..b97a6d05780 100644
--- a/lib/ansible/module_utils/parsing/convert_bool.py
+++ b/lib/ansible/module_utils/parsing/convert_bool.py
@@ -3,6 +3,8 @@
from __future__ import annotations
+import collections.abc as c
+
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.common.text.converters import to_text
@@ -17,12 +19,16 @@ def boolean(value, strict=True):
return value
normalized_value = value
+
if isinstance(value, (text_type, binary_type)):
normalized_value = to_text(value, errors='surrogate_or_strict').lower().strip()
+ if not isinstance(value, c.Hashable):
+ normalized_value = None # prevent unhashable types from bombing, but keep the rest of the existing fallback/error behavior
+
if normalized_value in BOOLEANS_TRUE:
return True
elif normalized_value in BOOLEANS_FALSE or not strict:
return False
- raise TypeError("The value '%s' is not a valid boolean. Valid booleans include: %s" % (to_text(value), ', '.join(repr(i) for i in BOOLEANS)))
+ raise TypeError("The value '%s' is not a valid boolean. Valid booleans include: %s" % (to_text(value), ', '.join(repr(i) for i in BOOLEANS)))
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
index 3a1a317ec66..407fc0968a1 100644
--- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
@@ -5,7 +5,7 @@ Function Add-CSharpType {
<#
.SYNOPSIS
Compiles one or more C# scripts similar to Add-Type. This exposes
- more configuration options that are useable within Ansible and it
+ more configuration options that are usable within Ansible and it
also allows multiple C# sources to be compiled together.
.PARAMETER References
@@ -312,7 +312,7 @@ Function Add-CSharpType {
# fatal error.
# https://github.com/ansible-collections/ansible.windows/issues/598
$ignore_warnings = [System.Collections.ArrayList]@('1610')
- $compile_units = [System.Collections.Generic.List`1[System.CodeDom.CodeSnippetCompileUnit]]@()
+ $compile_units = [System.Collections.Generic.List`1[string]]@()
foreach ($reference in $References) {
# scan through code and add any assemblies that match
# //AssemblyReference -Name ... [-CLR Framework]
@@ -346,7 +346,7 @@ Function Add-CSharpType {
}
$ignore_warnings.Add($warning_id) > $null
}
- $compile_units.Add((New-Object -TypeName System.CodeDom.CodeSnippetCompileUnit -ArgumentList $reference)) > $null
+ $compile_units.Add($reference) > $null
$type_matches = $type_pattern.Matches($reference)
foreach ($match in $type_matches) {
@@ -381,7 +381,10 @@ Function Add-CSharpType {
$null = New-Item -Path $temp_path -ItemType Directory -Force
try {
- $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units)
+ # FromSource is important, it will create the .cs files with
+ # the required extended attribute for the source to be trusted
+ # when using WDAC.
+ $compile = $provider.CompileAssemblyFromSource($compile_parameters, $compile_units)
}
finally {
# Try to delete the temp path, if this fails and we are running
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
index fb9fb11c490..449902f5bb9 100644
--- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
@@ -21,7 +21,7 @@ Function Convert-StringToSnakeCase($string) {
return $string
}
-# used by Convert-DictToSnakeCase to covert list entries from camelCase
+# used by Convert-DictToSnakeCase to convert list entries from camelCase
# to snake_case
Function Convert-ListToSnakeCase($list) {
$snake_list = [System.Collections.ArrayList]@()
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1
index 56b5d392857..208dd911a54 100644
--- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1
@@ -6,7 +6,7 @@
Function Get-ExecutablePath {
<#
.SYNOPSIS
- Get's the full path to an executable, will search the directory specified or ones in the PATH env var.
+ Gets the full path to an executable, will search the directory specified or ones in the PATH env var.
.PARAMETER executable
[String]The executable to search for.
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1
index 29e5be1673e..59efc5227eb 100644
--- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1
@@ -17,7 +17,7 @@ Function Get-AnsibleWebRequest {
The protocol method to use, if omitted, will use the default value for the URI protocol specified.
.PARAMETER FollowRedirects
- Whether to follow redirect reponses. This is only valid when using a HTTP URI.
+ Whether to follow redirect responses. This is only valid when using a HTTP URI.
all - Will follow all redirects
none - Will follow no redirects
safe - Will only follow redirects when GET or HEAD is used as the Method
diff --git a/lib/ansible/module_utils/pycompat24.py b/lib/ansible/module_utils/pycompat24.py
deleted file mode 100644
index 27d61485b2c..00000000000
--- a/lib/ansible/module_utils/pycompat24.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2016, Toshio Kuratomi
-# Copyright (c) 2015, Marius Gedminas
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import annotations
-
-import sys
-
-from ansible.module_utils.common.warnings import deprecate
-
-
-def get_exception():
- """Get the current exception.
-
- This code needs to work on Python 2.4 through 3.x, so we cannot use
- "except Exception, e:" (SyntaxError on Python 3.x) nor
- "except Exception as e:" (SyntaxError on Python 2.4-2.5).
- Instead we must use ::
-
- except Exception:
- e = get_exception()
-
- """
- deprecate(
- msg='The `ansible.module_utils.pycompat24.get_exception` '
- 'function is deprecated.',
- version='2.19',
- )
- return sys.exc_info()[1]
-
-
-def __getattr__(importable_name):
- """Inject import-time deprecation warning for ``literal_eval()``."""
- if importable_name == 'literal_eval':
- deprecate(
- msg=f'The `ansible.module_utils.pycompat24.'
- f'{importable_name}` function is deprecated.',
- version='2.19',
- )
- from ast import literal_eval
- return literal_eval
-
- raise AttributeError(
- f'cannot import name {importable_name !r} '
- f'has no attribute ({__file__ !s})',
- )
-
-
-__all__ = ('get_exception', 'literal_eval') # pylint: disable=undefined-all-variable
diff --git a/lib/ansible/module_utils/service.py b/lib/ansible/module_utils/service.py
index 6d3ecea4b8d..013ec0435f6 100644
--- a/lib/ansible/module_utils/service.py
+++ b/lib/ansible/module_utils/service.py
@@ -35,9 +35,8 @@ import platform
import select
import shlex
import subprocess
-import traceback
-from ansible.module_utils.six import PY2, b
+from ansible.module_utils.six import b
from ansible.module_utils.common.text.converters import to_bytes, to_text
@@ -180,18 +179,16 @@ def daemonize(module, cmd):
pipe = os.pipe()
pid = fork_process()
except (OSError, RuntimeError):
- module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc())
+ module.fail_json(msg="Error while attempting to fork.")
+ except Exception as exc:
+ module.fail_json(msg=to_text(exc))
# we don't do any locking as this should be a unique module/process
if pid == 0:
os.close(pipe[0])
- # if command is string deal with py2 vs py3 conversions for shlex
if not isinstance(cmd, list):
- if PY2:
- cmd = shlex.split(to_bytes(cmd, errors=errors))
- else:
- cmd = shlex.split(to_text(cmd, errors=errors))
+ cmd = shlex.split(to_text(cmd, errors=errors))
# make sure we always use byte strings
run_cmd = []
@@ -246,9 +243,6 @@ def daemonize(module, cmd):
break
return_data += to_bytes(data, errors=errors)
- # Note: no need to specify encoding on py3 as this module sends the
- # pickle to itself (thus same python interpreter so we aren't mixing
- # py2 and py3)
return pickle.loads(to_bytes(return_data, errors=errors))
@@ -292,7 +286,7 @@ def is_systemd_managed(module):
with open('/proc/1/comm', 'r') as init_proc:
init = init_proc.readline().strip()
return init == 'systemd'
- except IOError:
+ except OSError:
# If comm doesn't exist, old kernel, no systemd
return False
diff --git a/lib/ansible/module_utils/testing.py b/lib/ansible/module_utils/testing.py
new file mode 100644
index 00000000000..4f2ed9435a7
--- /dev/null
+++ b/lib/ansible/module_utils/testing.py
@@ -0,0 +1,31 @@
+"""
+Utilities to support unit testing of Ansible Python modules.
+Not supported for use cases other than testing.
+"""
+
+from __future__ import annotations as _annotations
+
+import contextlib as _contextlib
+import json as _json
+import typing as _t
+
+from unittest import mock as _mock
+
+from ansible.module_utils.common import json as _common_json
+from . import basic as _basic
+
+
+@_contextlib.contextmanager
+def patch_module_args(args: dict[str, _t.Any] | None = None) -> _t.Iterator[None]:
+ """Expose the given module args to `AnsibleModule` instances created within this context."""
+ if not isinstance(args, (dict, type(None))):
+ raise TypeError("The `args` arg must be a dict or None.")
+
+ args = dict(ANSIBLE_MODULE_ARGS=args or {})
+ profile = 'legacy' # this should be configurable in the future, once the profile feature is more fully baked
+
+ encoder = _common_json.get_module_encoder(profile, _common_json.Direction.CONTROLLER_TO_MODULE)
+ args = _json.dumps(args, cls=encoder).encode()
+
+ with _mock.patch.object(_basic, '_ANSIBLE_ARGS', args), _mock.patch.object(_basic, '_ANSIBLE_PROFILE', profile):
+ yield
diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py
index c90f0b78fd4..9316dc16091 100644
--- a/lib/ansible/module_utils/urls.py
+++ b/lib/ansible/module_utils/urls.py
@@ -30,6 +30,7 @@ this code instead.
from __future__ import annotations
import base64
+import email.encoders
import email.mime.application
import email.mime.multipart
import email.mime.nonmultipart
@@ -579,7 +580,7 @@ def get_ca_certs(cafile=None, capath=None):
cadata[b_der] = None
except Exception:
continue
- except (OSError, IOError):
+ except OSError:
pass
# paths_checked isn't used any more, but is kept just for ease of debugging
@@ -693,7 +694,7 @@ def _configure_auth(url, url_username, url_password, use_gssapi, force_basic_aut
try:
rc = netrc.netrc(os.environ.get('NETRC'))
login = rc.authenticators(parsed.hostname)
- except IOError:
+ except OSError:
login = None
if login:
@@ -1045,6 +1046,7 @@ def prepare_multipart(fields):
filename = None
elif isinstance(value, Mapping):
filename = value.get('filename')
+ multipart_encoding_str = value.get('multipart_encoding') or 'base64'
content = value.get('content')
if not any((filename, content)):
raise ValueError('at least one of filename or content must be provided')
@@ -1056,14 +1058,16 @@ def prepare_multipart(fields):
except Exception:
mime = 'application/octet-stream'
main_type, sep, sub_type = mime.partition('/')
+
else:
raise TypeError(
'value must be a string, or mapping, cannot be type %s' % value.__class__.__name__
)
if not content and filename:
+ multipart_encoding = set_multipart_encoding(multipart_encoding_str)
with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f:
- part = email.mime.application.MIMEApplication(f.read())
+ part = email.mime.application.MIMEApplication(f.read(), _encoder=multipart_encoding)
del part['Content-Type']
part.add_header('Content-Type', '%s/%s' % (main_type, sub_type))
else:
@@ -1102,11 +1106,24 @@ def prepare_multipart(fields):
)
+def set_multipart_encoding(encoding):
+ """Takes an string with specific encoding type for multipart data.
+ Will return reference to function from email.encoders library.
+ If given string key doesn't exist it will raise a ValueError"""
+ encoders_dict = {
+ "base64": email.encoders.encode_base64,
+ "7or8bit": email.encoders.encode_7or8bit
+ }
+ if encoders_dict.get(encoding):
+ return encoders_dict.get(encoding)
+ else:
+ raise ValueError("multipart_encoding must be one of %s." % repr(encoders_dict.keys()))
+
+
#
# Module-related functions
#
-
def basic_auth_header(username, password):
"""Takes a username and password and returns a byte string suitable for
using as value of an Authorization header to do basic auth.
@@ -1136,6 +1153,16 @@ def url_argument_spec():
)
+def url_redirect_argument_spec():
+ """
+ Creates an addition argument spec to `url_argument_spec`
+ for `follow_redirects` argument
+ """
+ return dict(
+ follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
+ )
+
+
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=None, force=False, last_mod_time=None, timeout=10,
use_gssapi=False, unix_socket=None, ca_path=None, cookies=None, unredirected_headers=None,
@@ -1171,7 +1198,7 @@ def fetch_url(module, url, data=None, headers=None, method=None,
data={...}
resp, info = fetch_url(module,
"http://example.com",
- data=module.jsonify(data),
+ data=json.dumps(data),
headers={'Content-type': 'application/json'},
method="POST")
status_code = info["status"]
@@ -1249,7 +1276,7 @@ def fetch_url(module, url, data=None, headers=None, method=None,
except (ConnectionError, ValueError) as e:
module.fail_json(msg=to_native(e), **info)
except MissingModuleError as e:
- module.fail_json(msg=to_text(e), exception=e.import_traceback)
+ module.fail_json(msg=to_text(e))
except urllib.error.HTTPError as e:
r = e
try:
@@ -1276,13 +1303,12 @@ def fetch_url(module, url, data=None, headers=None, method=None,
except urllib.error.URLError as e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % to_native(e), status=code))
- except socket.error as e:
- info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1))
+ except OSError as ex:
+ info.update(dict(msg=f"Connection failure: {ex}", status=-1))
except http.client.BadStatusLine as e:
info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1))
- except Exception as e:
- info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1),
- exception=traceback.format_exc())
+ except Exception as ex:
+ info.update(dict(msg="An unknown error occurred: %s" % to_native(ex), status=-1, exception=traceback.format_exc()))
finally:
tempfile.tempdir = old_tempdir
diff --git a/lib/ansible/module_utils/yumdnf.py b/lib/ansible/module_utils/yumdnf.py
index b2cbba3fde2..bdcf5ad7f72 100644
--- a/lib/ansible/module_utils/yumdnf.py
+++ b/lib/ansible/module_utils/yumdnf.py
@@ -32,10 +32,6 @@ yumdnf_argument_spec = dict(
enablerepo=dict(type='list', elements='str', default=[]),
exclude=dict(type='list', elements='str', default=[]),
installroot=dict(type='str', default="/"),
- install_repoquery=dict(
- type='bool', default=True,
- removed_in_version='2.20', removed_from_collection='ansible.builtin',
- ),
install_weak_deps=dict(type='bool', default=True),
list=dict(type='str'),
name=dict(type='list', elements='str', aliases=['pkg'], default=[]),
@@ -85,7 +81,6 @@ class YumDnf(metaclass=ABCMeta):
self.enablerepo = self.module.params.get('enablerepo', [])
self.exclude = self.module.params['exclude']
self.installroot = self.module.params['installroot']
- self.install_repoquery = self.module.params['install_repoquery']
self.install_weak_deps = self.module.params['install_weak_deps']
self.list = self.module.params['list']
self.names = [p.strip() for p in self.module.params['name']]
diff --git a/lib/ansible/modules/apt.py b/lib/ansible/modules/apt.py
index 266165f22a2..53c403133a2 100644
--- a/lib/ansible/modules/apt.py
+++ b/lib/ansible/modules/apt.py
@@ -17,6 +17,12 @@ description:
- Manages I(apt) packages (such as for Debian/Ubuntu).
version_added: "0.0.2"
options:
+ auto_install_module_deps:
+ description:
+ - Automatically install dependencies required to run this module.
+ type: bool
+ default: yes
+ version_added: 2.19
name:
description:
- A list of package names, like V(foo), or package specifier with version, like V(foo=1.0) or V(foo>=1.0).
@@ -191,8 +197,7 @@ options:
default: 60
version_added: "2.12"
requirements:
- - python-apt (python 2)
- - python3-apt (python 3)
+ - python3-apt
- aptitude (before 2.4)
author: "Matthew Williams (@mgwilliams)"
extends_documentation_fragment: action_common_attributes
@@ -214,8 +219,8 @@ notes:
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
- When O(default_release) is used, an implicit priority of 990 is used. This is the same behavior as C(apt-get -t).
- When an exact version is specified, an implicit priority of 1001 is used.
- - If the interpreter can't import C(python-apt)/C(python3-apt) the module will check for it in system-owned interpreters as well.
- If the dependency can't be found, the module will attempt to install it.
+ - If the interpreter can't import C(python3-apt) the module will check for it in system-owned interpreters as well.
+ If the dependency can't be found, depending on the value of O(auto_install_module_deps) the module will attempt to install it.
If the dependency is found or installed, the module will be respawned under the correct interpreter.
"""
@@ -367,6 +372,7 @@ import locale as locale_module
import os
import re
import secrets
+import shlex
import shutil
import sys
import tempfile
@@ -385,8 +391,6 @@ APT_GET_ZERO = "\n0 upgraded, 0 newly installed, 0 to remove"
APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed, 0 to remove"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
-APT_MARK_INVALID_OP = 'Invalid operation'
-APT_MARK_INVALID_OP_DEB6 = 'Usage: apt-mark [options] {markauto|unmarkauto} packages'
CLEAN_OP_CHANGED_STR = dict(
autoremove='The following packages will be REMOVED',
@@ -483,7 +487,7 @@ class PolicyRcD(object):
def package_split(pkgspec):
- parts = re.split(r'(>?=)', pkgspec, 1)
+ parts = re.split(r'(>?=)', pkgspec, maxsplit=1)
if len(parts) > 1:
return parts
return parts[0], None, None
@@ -685,26 +689,30 @@ def parse_diff(output):
return {'prepared': '\n'.join(diff[diff_start:diff_end])}
-def mark_installed_manually(m, packages):
+def mark_installed(m: AnsibleModule, packages: list[str], manual: bool) -> None:
+ """Mark packages as manually or automatically installed."""
if not packages:
return
+ if manual:
+ mark_msg = "manually"
+ mark_op = "manual"
+ else:
+ mark_msg = "auto"
+ mark_op = "auto"
+
apt_mark_cmd_path = m.get_bin_path("apt-mark")
# https://github.com/ansible/ansible/issues/40531
if apt_mark_cmd_path is None:
- m.warn("Could not find apt-mark binary, not marking package(s) as manually installed.")
+ m.warn(f"Could not find apt-mark binary, not marking package(s) as {mark_msg} installed.")
return
- cmd = "%s manual %s" % (apt_mark_cmd_path, ' '.join(packages))
+ cmd = [apt_mark_cmd_path, mark_op] + packages
rc, out, err = m.run_command(cmd)
- if APT_MARK_INVALID_OP in err or APT_MARK_INVALID_OP_DEB6 in err:
- cmd = "%s unmarkauto %s" % (apt_mark_cmd_path, ' '.join(packages))
- rc, out, err = m.run_command(cmd)
-
if rc != 0:
- m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
+ m.fail_json(msg=f"Command {shlex.join(cmd)!r} failed.", stdout=out, stderr=err, rc=rc)
def install(m, pkgspec, cache, upgrade=False, default_release=None,
@@ -830,7 +838,7 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
data = dict(changed=False)
if not build_dep and not m.check_mode:
- mark_installed_manually(m, package_names)
+ mark_installed(m, package_names, manual=True)
return (status, data)
@@ -850,6 +858,7 @@ def install_deb(
allow_downgrade,
allow_change_held_packages,
dpkg_options,
+ lock_timeout,
):
changed = False
deps_to_install = []
@@ -898,15 +907,19 @@ def install_deb(
# install the deps through apt
retvals = {}
if deps_to_install:
+ install_dpkg_options = f"{expand_dpkg_options(dpkg_options)} -o DPkg::Lock::Timeout={lock_timeout}"
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
fail_on_autoremove=fail_on_autoremove,
allow_unauthenticated=allow_unauthenticated,
allow_downgrade=allow_downgrade,
allow_change_held_packages=allow_change_held_packages,
- dpkg_options=expand_dpkg_options(dpkg_options))
+ dpkg_options=install_dpkg_options)
if not success:
m.fail_json(**retvals)
+ # Mark the dependencies as auto installed
+ # https://github.com/ansible/ansible/issues/78123
+ mark_installed(m, deps_to_install, manual=False)
changed = retvals.get('changed', False)
if pkgs_to_install:
@@ -1233,6 +1246,7 @@ def main():
allow_downgrade=dict(type='bool', default=False, aliases=['allow-downgrade', 'allow_downgrades', 'allow-downgrades']),
allow_change_held_packages=dict(type='bool', default=False),
lock_timeout=dict(type='int', default=60),
+ auto_install_module_deps=dict(type='bool', default=True),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
@@ -1263,12 +1277,12 @@ def main():
p = module.params
install_recommends = p['install_recommends']
- dpkg_options = expand_dpkg_options(p['dpkg_options'])
+ dpkg_options = f"{expand_dpkg_options(p['dpkg_options'])} -o DPkg::Lock::Timeout={p['lock_timeout']}"
if not HAS_PYTHON_APT:
# This interpreter can't see the apt Python library- we'll do the following to try and fix that:
# 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
- # 2) finding none, try to install a matching python-apt package for the current interpreter version;
+ # 2) finding none, try to install a matching python3-apt package for the current interpreter version;
# we limit to the current interpreter version to try and avoid installing a whole other Python just
# for apt support
# 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
@@ -1294,39 +1308,47 @@ def main():
# don't make changes if we're in check_mode
if module.check_mode:
- module.fail_json(msg="%s must be installed to use check mode. "
- "If run normally this module can auto-install it." % apt_pkg_name)
-
- # We skip cache update in auto install the dependency if the
- # user explicitly declared it with update_cache=no.
- if module.params.get('update_cache') is False:
- module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name)
- else:
- module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name)
- module.run_command([APT_GET_CMD, 'update'], check_rc=True)
-
- # try to install the apt Python binding
- apt_pkg_cmd = [APT_GET_CMD, 'install', apt_pkg_name, '-y', '-q', dpkg_options]
-
- if install_recommends is False:
- apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=no"])
- elif install_recommends is True:
- apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=yes"])
- # install_recommends is None uses the OS default
-
- module.run_command(apt_pkg_cmd, check_rc=True)
-
- # try again to find the bindings in common places
- interpreter = probe_interpreters_for_module(interpreters, 'apt')
-
- if interpreter:
- # found the Python bindings; respawn this module under the interpreter where we found them
- # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
- respawn_module(interpreter)
- # this is the end of the line for this process, it will exit here once the respawned module has completed
- else:
- # we've done all we can do; just tell the user it's busted and get out
- module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+ module.fail_json(
+ msg=f"{apt_pkg_name} must be installed to use check mode. "
+ "If run normally this module can auto-install it, "
+ "see the auto_install_module_deps option.",
+ )
+ elif p['auto_install_module_deps']:
+ # We skip cache update in auto install the dependency if the
+ # user explicitly declared it with update_cache=no.
+ if module.params.get('update_cache') is False:
+ module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name)
+ else:
+ module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name)
+ module.run_command([APT_GET_CMD, 'update'], check_rc=True)
+
+ # try to install the apt Python binding
+ apt_pkg_cmd = [APT_GET_CMD, 'install', apt_pkg_name, '-y', '-q', dpkg_options]
+
+ if install_recommends is False:
+ apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=no"])
+ elif install_recommends is True:
+ apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=yes"])
+ # install_recommends is None uses the OS default
+
+ module.run_command(apt_pkg_cmd, check_rc=True)
+
+ # try again to find the bindings in common places
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+
+ # we've done all we can do; just tell the user it's busted and get out
+ py_version = sys.version.replace("\n", "")
+ module.fail_json(
+ msg=f"Could not import the {apt_pkg_name} module using {sys.executable} ({py_version}). "
+ f"Ensure {apt_pkg_name} package is installed (either manually or via the auto_install_module_deps option) "
+ f"or that you have specified the correct ansible_python_interpreter. (attempted {interpreters}).",
+ )
if p['clean'] is True:
aptclean_stdout, aptclean_stderr, aptclean_diff = aptclean(module)
@@ -1456,7 +1478,11 @@ def main():
allow_unauthenticated=allow_unauthenticated,
allow_change_held_packages=allow_change_held_packages,
allow_downgrade=allow_downgrade,
- force=force_yes, fail_on_autoremove=fail_on_autoremove, dpkg_options=p['dpkg_options'])
+ force=force_yes,
+ fail_on_autoremove=fail_on_autoremove,
+ dpkg_options=p['dpkg_options'],
+ lock_timeout=p['lock_timeout']
+ )
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
diff --git a/lib/ansible/modules/apt_key.py b/lib/ansible/modules/apt_key.py
index 3828f9a882b..06648041e32 100644
--- a/lib/ansible/modules/apt_key.py
+++ b/lib/ansible/modules/apt_key.py
@@ -33,6 +33,8 @@ notes:
To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)."
- If you specify both the key O(id) and the O(url) with O(state=present), the task can verify or add the key as needed.
- Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's C(update_cache) option).
+ - The C(apt-key) utility has been deprecated and removed in modern debian versions, use M(ansible.builtin.deb822_repository) as an alternative
+ to M(ansible.builtin.apt_repository) + apt_key combinations.
requirements:
- gpg
seealso:
@@ -170,9 +172,6 @@ short_id:
import os
-# FIXME: standardize into module_common
-from traceback import format_exc
-
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
@@ -196,8 +195,16 @@ def lang_env(module):
def find_needed_binaries(module):
global apt_key_bin
global gpg_bin
- apt_key_bin = module.get_bin_path('apt-key', required=True)
- gpg_bin = module.get_bin_path('gpg', required=True)
+
+ try:
+ apt_key_bin = module.get_bin_path('apt-key', required=True)
+ except ValueError as e:
+ module.exit_json(f'{to_native(e)}. Apt-key has been deprecated. See the deb822_repository as an alternative.')
+
+ try:
+ gpg_bin = module.get_bin_path('gpg', required=True)
+ except ValueError as e:
+ module.exit_json(msg=to_native(e))
def add_http_proxy(cmd):
@@ -310,7 +317,7 @@ def download_key(module, url):
return rsp.read()
except Exception:
- module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
+ module.fail_json(msg=f"Error getting key id from url: {url}")
def get_key_id_from_file(module, filename, data=None):
diff --git a/lib/ansible/modules/apt_repository.py b/lib/ansible/modules/apt_repository.py
index b17801f5f89..2f701d8c429 100644
--- a/lib/ansible/modules/apt_repository.py
+++ b/lib/ansible/modules/apt_repository.py
@@ -88,8 +88,8 @@ options:
description:
- Whether to automatically try to install the Python apt library or not, if it is not already installed.
Without this library, the module does not work.
- - Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3.
- - Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not
+ - Runs C(apt-get install python3-apt).
+ - Only works with the system Python. If you are using a Python on the remote that is not
the system Python, set O(install_python_apt=false) and ensure that the Python apt library
for your Python version is installed some other way.
type: bool
@@ -98,8 +98,7 @@ author:
- Alexander Saltanov (@sashka)
version_added: "0.7"
requirements:
- - python-apt (python 2)
- - python3-apt (python 3)
+ - python3-apt
- apt-key or gpg
"""
@@ -143,7 +142,7 @@ EXAMPLES = """
- name: somerepo | apt source
ansible.builtin.apt_repository:
- repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/myrepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
+ repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/somerepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
state: present
"""
@@ -232,14 +231,15 @@ class SourcesList(object):
self.files_mapping = {} # internal DS for tracking symlinks
# Repositories that we're adding -- used to implement mode param
self.new_repos = set()
- self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
+ self.default_file = apt_pkg.config.find_file('Dir::Etc::sourcelist')
# read sources.list if it exists
if os.path.isfile(self.default_file):
self.load(self.default_file)
# read sources.list.d
- for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
+ self.sources_dir = apt_pkg.config.find_dir('Dir::Etc::sourceparts')
+ for file in glob.iglob(f'{self.sources_dir}/*.list'):
if os.path.islink(file):
self.files_mapping[file] = os.readlink(file)
self.load(file)
@@ -255,7 +255,7 @@ class SourcesList(object):
if '/' in filename:
return filename
else:
- return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
+ return os.path.abspath(os.path.join(self.sources_dir, filename))
def _suggest_filename(self, line):
def _cleanup_filename(s):
@@ -313,28 +313,6 @@ class SourcesList(object):
return valid, enabled, source, comment
- @staticmethod
- def _apt_cfg_file(filespec):
- """
- Wrapper for `apt_pkg` module for running with Python 2.5
- """
- try:
- result = apt_pkg.config.find_file(filespec)
- except AttributeError:
- result = apt_pkg.Config.FindFile(filespec)
- return result
-
- @staticmethod
- def _apt_cfg_dir(dirspec):
- """
- Wrapper for `apt_pkg` module for running with Python 2.5
- """
- try:
- result = apt_pkg.config.find_dir(dirspec)
- except AttributeError:
- result = apt_pkg.Config.FindDir(dirspec)
- return result
-
def load(self, file):
group = []
f = open(file, 'r')
@@ -355,8 +333,8 @@ class SourcesList(object):
try:
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
- except (OSError, IOError) as e:
- self.module.fail_json(msg='Unable to create temp file at "%s" for apt source: %s' % (d, to_native(e)))
+ except OSError as ex:
+ raise Exception(f'Unable to create temp file at {d!r} for apt source.') from ex
f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources:
@@ -372,8 +350,8 @@ class SourcesList(object):
try:
f.write(line)
- except IOError as ex:
- self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(ex)))
+ except OSError as ex:
+ raise Exception(f"Failed to write to file {tmp_path!r}.") from ex
if filename in self.files_mapping:
# Write to symlink target instead of replacing symlink as a normal file
self.module.atomic_move(tmp_path, self.files_mapping[filename])
@@ -475,7 +453,10 @@ class UbuntuSourcesList(SourcesList):
self.apt_key_bin = self.module.get_bin_path('apt-key', required=False)
self.gpg_bin = self.module.get_bin_path('gpg', required=False)
if not self.apt_key_bin and not self.gpg_bin:
- self.module.fail_json(msg='Either apt-key or gpg binary is required, but neither could be found')
+ msg = 'Either apt-key or gpg binary is required, but neither could be found.' \
+ 'The apt-key CLI has been deprecated and removed in modern Debian and derivatives, ' \
+ 'you might want to use "deb822_repository" instead.'
+ self.module.fail_json(msg)
def __deepcopy__(self, memo=None):
return UbuntuSourcesList(self.module)
@@ -526,8 +507,8 @@ class UbuntuSourcesList(SourcesList):
if os.path.exists(key_file):
try:
rc, out, err = self.module.run_command([self.gpg_bin, '--list-packets', key_file])
- except (IOError, OSError) as e:
- self.debug("Could check key against file %s: %s" % (key_file, to_native(e)))
+ except OSError as ex:
+ self.debug(f"Could check key against file {key_file!r}: {ex}")
continue
if key_fingerprint in out:
@@ -576,8 +557,8 @@ class UbuntuSourcesList(SourcesList):
with open(keyfile, 'wb') as f:
f.write(stdout)
self.module.log('Added repo key "%s" for apt to file "%s"' % (info['signing_key_fingerprint'], keyfile))
- except (OSError, IOError) as e:
- self.module.fail_json(msg='Unable to add required signing key for%s ', rc=rc, stderr=stderr, error=to_native(e))
+ except OSError as ex:
+ self.module.fail_json(msg='Unable to add required signing key.', rc=rc, stderr=stderr, error=str(ex), exception=ex)
# apt source file
file = file or self._suggest_filename('%s_%s' % (line, self.codename))
@@ -771,9 +752,9 @@ def main():
)
module.fail_json(msg=msg)
- except (OSError, IOError) as ex:
+ except OSError as ex:
revert_sources_list(sources_before, sources_after, sourceslist_before)
- module.fail_json(msg=to_native(ex))
+ raise
module.exit_json(changed=changed, repo=repo, sources_added=sources_added, sources_removed=sources_removed, state=state, diff=diff)
diff --git a/lib/ansible/modules/assemble.py b/lib/ansible/modules/assemble.py
index ff570aee1b9..b1329496d96 100644
--- a/lib/ansible/modules/assemble.py
+++ b/lib/ansible/modules/assemble.py
@@ -80,7 +80,7 @@ attributes:
bypass_host_loop:
support: none
check_mode:
- support: none
+ support: full
diff_mode:
support: full
platform:
@@ -181,15 +181,15 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, igno
return temp_path
-def cleanup(path, result=None):
+def cleanup(module, path, result=None):
# cleanup just in case
if os.path.exists(path):
try:
os.remove(path)
- except (IOError, OSError) as e:
+ except OSError as ex:
# don't error on possible race conditions, but keep warning
if result is not None:
- result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))]
+ module.error_as_warning(f'Unable to remove temp file {path!r}.', exception=ex)
def main():
@@ -212,6 +212,7 @@ def main():
decrypt=dict(type='bool', default=True),
),
add_file_common_args=True,
+ supports_check_mode=True,
)
changed = False
@@ -261,17 +262,18 @@ def main():
(rc, out, err) = module.run_command(validate % path)
result['validation'] = dict(rc=rc, stdout=out, stderr=err)
if rc != 0:
- cleanup(path)
+ cleanup(module, path)
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
if backup and dest_hash is not None:
result['backup_file'] = module.backup_local(dest)
- module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
+ if not module.check_mode:
+ module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
changed = True
- cleanup(path, result)
+ cleanup(module, path, result)
- # handle file permissions
+ # handle file permissions (check mode aware)
file_args = module.load_file_common_arguments(module.params)
result['changed'] = module.set_fs_attributes_if_different(file_args, changed)
diff --git a/lib/ansible/modules/async_status.py b/lib/ansible/modules/async_status.py
index 0a4eeb53ac2..26763341e2b 100644
--- a/lib/ansible/modules/async_status.py
+++ b/lib/ansible/modules/async_status.py
@@ -5,6 +5,7 @@
from __future__ import annotations
+import sys
DOCUMENTATION = r"""
---
@@ -27,6 +28,8 @@ options:
type: str
choices: [ cleanup, status ]
default: status
+notes:
+ - The RV(started) and RV(finished) return values were updated to return V(True) or V(False) instead of V(1) or V(0) in ansible-core 2.19.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.flow
@@ -67,7 +70,7 @@ EXAMPLES = r"""
ansible.builtin.async_status:
jid: '{{ dnf_sleeper.ansible_job_id }}'
register: job_result
- until: job_result.finished
+ until: job_result is finished
retries: 100
delay: 10
@@ -84,15 +87,15 @@ ansible_job_id:
type: str
sample: '360874038559.4169'
finished:
- description: Whether the asynchronous job has finished (V(1)) or not (V(0))
+ description: Whether the asynchronous job has finished or not
returned: always
- type: int
- sample: 1
+ type: bool
+ sample: true
started:
- description: Whether the asynchronous job has started (V(1)) or not (V(0))
+ description: Whether the asynchronous job has started or not
returned: always
- type: int
- sample: 1
+ type: bool
+ sample: true
stdout:
description: Any output returned by async_wrapper
returned: always
@@ -111,8 +114,6 @@ import json
import os
from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.common.text.converters import to_native
def main():
@@ -135,7 +136,7 @@ def main():
log_path = os.path.join(async_dir, jid)
if not os.path.exists(log_path):
- module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1)
+ module.fail_json(msg="could not find job", ansible_job_id=jid, started=True, finished=True)
if mode == 'cleanup':
os.unlink(log_path)
@@ -152,21 +153,20 @@ def main():
except Exception:
if not data:
# file not written yet? That means it is running
- module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0)
+ module.exit_json(results_file=log_path, ansible_job_id=jid, started=True, finished=False)
else:
module.fail_json(ansible_job_id=jid, results_file=log_path,
- msg="Could not parse job output: %s" % data, started=1, finished=1)
+ msg="Could not parse job output: %s" % data, started=True, finished=True)
if 'started' not in data:
- data['finished'] = 1
+ data['finished'] = True
data['ansible_job_id'] = jid
elif 'finished' not in data:
- data['finished'] = 0
+ data['finished'] = False
- # Fix error: TypeError: exit_json() keywords must be strings
- data = {to_native(k): v for k, v in iteritems(data)}
-
- module.exit_json(**data)
+ # just write the module output directly to stdout and exit; bypass other processing done by exit_json since it's already been done
+ print(f"\n{json.dumps(data)}") # pylint: disable=ansible-bad-function
+ sys.exit(0) # pylint: disable=ansible-bad-function
if __name__ == '__main__':
diff --git a/lib/ansible/modules/async_wrapper.py b/lib/ansible/modules/async_wrapper.py
index d33ebe196ed..65d013ca451 100644
--- a/lib/ansible/modules/async_wrapper.py
+++ b/lib/ansible/modules/async_wrapper.py
@@ -6,7 +6,6 @@
from __future__ import annotations
-import errno
import json
import shlex
import shutil
@@ -122,24 +121,14 @@ def _get_interpreter(module_path):
return head[2:head.index(b'\n')].strip().split(b' ')
-def _make_temp_dir(path):
- # TODO: Add checks for permissions on path.
- try:
- os.makedirs(path)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
-
-
def jwrite(info):
-
jobfile = job_path + ".tmp"
tjob = open(jobfile, "w")
try:
tjob.write(json.dumps(info))
- except (IOError, OSError) as e:
- notice('failed to write to %s: %s' % (jobfile, str(e)))
- raise e
+ except OSError as ex:
+ notice(f'failed to write to {jobfile!r}: {ex}')
+ raise
finally:
tjob.close()
os.rename(jobfile, job_path)
@@ -147,7 +136,9 @@ def jwrite(info):
def _run_module(wrapped_cmd, jid):
- jwrite({"started": 1, "finished": 0, "ansible_job_id": jid})
+ # DTFIX-FUTURE: needs rework for serialization profiles
+
+ jwrite({"started": True, "finished": False, "ansible_job_id": jid})
result = {}
@@ -188,6 +179,9 @@ def _run_module(wrapped_cmd, jid):
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
+
+ # this relies on the controller's fallback conversion of string warnings to WarningMessageDetail instances, and assumes
+ # that the module result and warning collection are basic JSON datatypes (eg, no tags or other custom collections).
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
@@ -195,10 +189,10 @@ def _run_module(wrapped_cmd, jid):
result['stderr'] = stderr
jwrite(result)
- except (OSError, IOError):
+ except OSError:
e = sys.exc_info()[1]
result = {
- "failed": 1,
+ "failed": True,
"cmd": wrapped_cmd,
"msg": to_text(e),
"outdata": outdata, # temporary notice only
@@ -207,9 +201,9 @@ def _run_module(wrapped_cmd, jid):
result['ansible_job_id'] = jid
jwrite(result)
- except (ValueError, Exception):
+ except Exception:
result = {
- "failed": 1,
+ "failed": True,
"cmd": wrapped_cmd,
"data": outdata, # temporary notice only
"stderr": stderr,
@@ -252,12 +246,13 @@ def main():
job_path = os.path.join(jobdir, jid)
try:
- _make_temp_dir(jobdir)
+ # TODO: Add checks for permissions on path.
+ os.makedirs(jobdir, exist_ok=True)
except Exception as e:
end({
- "failed": 1,
+ "failed": True,
"msg": "could not create directory: %s - %s" % (jobdir, to_text(e)),
- "exception": to_text(traceback.format_exc()),
+ "exception": to_text(traceback.format_exc()), # NB: task executor compat will coerce to the correct dataclass type
}, 1)
# immediately exit this process, leaving an orphaned process
@@ -288,7 +283,7 @@ def main():
continue
notice("Return async_wrapper task started.")
- end({"failed": 0, "started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
+ end({"failed": False, "started": True, "finished": False, "ansible_job_id": jid, "results_file": job_path,
"_ansible_suppress_tmpdir_delete": (not preserve_tmp)}, 0)
else:
# The actual wrapper process
diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py
index 2ce939ac289..82d35fda668 100644
--- a/lib/ansible/modules/command.py
+++ b/lib/ansible/modules/command.py
@@ -15,12 +15,11 @@ version_added: historical
description:
- The M(ansible.builtin.command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes.
- - The command(s) will not be
- processed through the shell, so variables like C($HOSTNAME) and operations
- like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
+ - The command(s) will not be processed through the shell, so operations like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
+ Also, environment variables are resolved via Python, not shell, see O(expand_argument_vars) and are left unchanged if not matched.
Use the M(ansible.builtin.shell) module if you need these features.
- - To create C(command) tasks that are easier to read than the ones using space-delimited
- arguments, pass parameters using the C(args) L(task keyword,https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#task)
+ - To create C(command) tasks that are easier to read than the ones using space-delimited arguments,
+ pass parameters using the C(args) L(task keyword,https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#task)
or use O(cmd) parameter.
- Either a free form command or O(cmd) parameter is required, see the examples.
- For Windows targets, use the M(ansible.windows.win_command) module instead.
@@ -41,8 +40,8 @@ attributes:
options:
expand_argument_vars:
description:
- - Expands the arguments that are variables, for example C($HOME) will be expanded before being passed to the
- command to run.
+ - Expands the arguments that are variables, for example C($HOME) will be expanded before being passed to the command to run.
+ - If a variable is not matched, it is left unchanged, unlike shell substitution which would remove it.
- Set to V(false) to disable expansion and treat the value as a literal argument.
type: bool
default: true
@@ -250,6 +249,7 @@ def main():
argument_spec=dict(
_raw_params=dict(),
_uses_shell=dict(type='bool', default=False),
+ cmd=dict(),
argv=dict(type='list', elements='str'),
chdir=dict(type='path'),
executable=dict(),
@@ -261,12 +261,14 @@ def main():
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
),
+ required_one_of=[['_raw_params', 'cmd', 'argv']],
+ mutually_exclusive=[['_raw_params', 'cmd', 'argv']],
supports_check_mode=True,
)
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
- args = module.params['_raw_params']
+ args = module.params['_raw_params'] or module.params['cmd']
argv = module.params['argv']
creates = module.params['creates']
removes = module.params['removes']
@@ -282,16 +284,6 @@ def main():
module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
executable = None
- if (not args or args.strip() == '') and not argv:
- r['rc'] = 256
- r['msg'] = "no command given"
- module.fail_json(**r)
-
- if args and argv:
- r['rc'] = 256
- r['msg'] = "only command or argv can be given, not both"
- module.fail_json(**r)
-
if not shell and args:
args = shlex.split(args)
@@ -307,9 +299,9 @@ def main():
try:
os.chdir(chdir)
- except (IOError, OSError) as e:
- r['msg'] = 'Unable to change directory before execution: %s' % to_text(e)
- module.fail_json(**r)
+ except OSError as ex:
+ r['msg'] = 'Unable to change directory before execution.'
+ module.fail_json(**r, exception=ex)
# check_mode partial support, since it only really works in checking creates/removes
if module.check_mode:
diff --git a/lib/ansible/modules/copy.py b/lib/ansible/modules/copy.py
index 8a5297466f4..28c14f4d71f 100644
--- a/lib/ansible/modules/copy.py
+++ b/lib/ansible/modules/copy.py
@@ -286,14 +286,11 @@ state:
import errno
import filecmp
-import grp
import os
import os.path
-import pwd
import shutil
import stat
import tempfile
-import traceback
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.basic import AnsibleModule
@@ -335,67 +332,24 @@ def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list,
return changed
+def chown_path(module, path, owner, group):
+ """Update the owner/group if specified and different from the current owner/group."""
+ changed = module.set_owner_if_different(path, owner, False)
+ return module.set_group_if_different(path, group, changed)
+
+
def chown_recursive(path, module):
changed = False
owner = module.params['owner']
group = module.params['group']
- if owner is not None:
- if not module.check_mode:
- for dirpath, dirnames, filenames in os.walk(path):
- owner_changed = module.set_owner_if_different(dirpath, owner, False)
- if owner_changed is True:
- changed = owner_changed
- for dir in [os.path.join(dirpath, d) for d in dirnames]:
- owner_changed = module.set_owner_if_different(dir, owner, False)
- if owner_changed is True:
- changed = owner_changed
- for file in [os.path.join(dirpath, f) for f in filenames]:
- owner_changed = module.set_owner_if_different(file, owner, False)
- if owner_changed is True:
- changed = owner_changed
- else:
- uid = pwd.getpwnam(owner).pw_uid
- for dirpath, dirnames, filenames in os.walk(path):
- owner_changed = (os.stat(dirpath).st_uid != uid)
- if owner_changed is True:
- changed = owner_changed
- for dir in [os.path.join(dirpath, d) for d in dirnames]:
- owner_changed = (os.stat(dir).st_uid != uid)
- if owner_changed is True:
- changed = owner_changed
- for file in [os.path.join(dirpath, f) for f in filenames]:
- owner_changed = (os.stat(file).st_uid != uid)
- if owner_changed is True:
- changed = owner_changed
- if group is not None:
- if not module.check_mode:
- for dirpath, dirnames, filenames in os.walk(path):
- group_changed = module.set_group_if_different(dirpath, group, False)
- if group_changed is True:
- changed = group_changed
- for dir in [os.path.join(dirpath, d) for d in dirnames]:
- group_changed = module.set_group_if_different(dir, group, False)
- if group_changed is True:
- changed = group_changed
- for file in [os.path.join(dirpath, f) for f in filenames]:
- group_changed = module.set_group_if_different(file, group, False)
- if group_changed is True:
- changed = group_changed
- else:
- gid = grp.getgrnam(group).gr_gid
- for dirpath, dirnames, filenames in os.walk(path):
- group_changed = (os.stat(dirpath).st_gid != gid)
- if group_changed is True:
- changed = group_changed
- for dir in [os.path.join(dirpath, d) for d in dirnames]:
- group_changed = (os.stat(dir).st_gid != gid)
- if group_changed is True:
- changed = group_changed
- for file in [os.path.join(dirpath, f) for f in filenames]:
- group_changed = (os.stat(file).st_gid != gid)
- if group_changed is True:
- changed = group_changed
+ # TODO: Consolidate with the other methods calling set_*_if_different method, this is inefficient.
+ for dirpath, dirnames, filenames in os.walk(path):
+ changed |= chown_path(module, dirpath, owner, group)
+ for subdir in [os.path.join(dirpath, d) for d in dirnames]:
+ changed |= chown_path(module, subdir, owner, group)
+ for filepath in [os.path.join(dirpath, f) for f in filenames]:
+ changed |= chown_path(module, filepath, owner, group)
return changed
@@ -423,10 +377,7 @@ def copy_diff_files(src, dest, module):
shutil.copyfile(b_src_item_path, b_dest_item_path)
shutil.copymode(b_src_item_path, b_dest_item_path)
- if owner is not None:
- module.set_owner_if_different(b_dest_item_path, owner, False)
- if group is not None:
- module.set_group_if_different(b_dest_item_path, group, False)
+ chown_path(module, b_dest_item_path, owner, group)
changed = True
return changed
@@ -458,10 +409,7 @@ def copy_left_only(src, dest, module):
if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is True:
shutil.copyfile(b_src_item_path, b_dest_item_path)
- if owner is not None:
- module.set_owner_if_different(b_dest_item_path, owner, False)
- if group is not None:
- module.set_group_if_different(b_dest_item_path, group, False)
+ chown_path(module, b_dest_item_path, owner, group)
if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is False:
linkto = os.readlink(b_src_item_path)
@@ -470,11 +418,7 @@ def copy_left_only(src, dest, module):
if not os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path):
shutil.copyfile(b_src_item_path, b_dest_item_path)
shutil.copymode(b_src_item_path, b_dest_item_path)
-
- if owner is not None:
- module.set_owner_if_different(b_dest_item_path, owner, False)
- if group is not None:
- module.set_group_if_different(b_dest_item_path, group, False)
+ chown_path(module, b_dest_item_path, owner, group)
if not os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path):
shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow)
@@ -502,6 +446,21 @@ def copy_common_dirs(src, dest, module):
return changed
+def copy_directory(src, dest, module):
+ if not os.path.exists(dest):
+ if not module.check_mode:
+ shutil.copytree(src, dest, symlinks=not module.params['local_follow'])
+ chown_recursive(dest, module)
+ changed = True
+ else:
+ diff_files_changed = copy_diff_files(src, dest, module)
+ left_only_changed = copy_left_only(src, dest, module)
+ common_dirs_changed = copy_common_dirs(src, dest, module)
+ owner_group_changed = chown_recursive(dest, module)
+ changed = any([diff_files_changed, left_only_changed, common_dirs_changed, owner_group_changed])
+ return changed
+
+
def main():
module = AnsibleModule(
@@ -563,8 +522,8 @@ def main():
if os.path.isfile(src):
try:
checksum_src = module.sha1(src)
- except (OSError, IOError) as e:
- module.warn("Unable to calculate src checksum, assuming change: %s" % to_native(e))
+ except OSError as ex:
+ module.error_as_warning("Unable to calculate src checksum, assuming change.", exception=ex)
try:
# Backwards compat only. This will be None in FIPS mode
md5sum_src = module.md5(src)
@@ -652,12 +611,8 @@ def main():
if validate:
# if we have a mode, make sure we set it on the temporary
# file source as some validations may require it
- if mode is not None:
- module.set_mode_if_different(src, mode, False)
- if owner is not None:
- module.set_owner_if_different(src, owner, False)
- if group is not None:
- module.set_group_if_different(src, group, False)
+ module.set_mode_if_different(src, mode, False)
+ chown_path(module, src, owner, group)
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % src)
@@ -681,64 +636,23 @@ def main():
# at this point we should always have tmp file
module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'], keep_dest_attrs=not remote_src)
- except (IOError, OSError):
- module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
+ except OSError as ex:
+ raise Exception(f"Failed to copy {src!r} to {dest!r}.") from ex
changed = True
# If neither have checksums, both src and dest are directories.
- if checksum_src is None and checksum_dest is None:
- if remote_src and os.path.isdir(module.params['src']):
- b_src = to_bytes(module.params['src'], errors='surrogate_or_strict')
- b_dest = to_bytes(module.params['dest'], errors='surrogate_or_strict')
-
- if src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
- diff_files_changed = copy_diff_files(b_src, b_dest, module)
- left_only_changed = copy_left_only(b_src, b_dest, module)
- common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
- owner_group_changed = chown_recursive(b_dest, module)
- if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
- changed = True
-
- if src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
- b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
- b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
- b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
- if not module.check_mode:
- shutil.copytree(b_src, b_dest, symlinks=not local_follow)
- chown_recursive(dest, module)
- changed = True
-
- if not src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
- b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
- b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
- b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
- if not module.check_mode and not os.path.exists(b_dest):
- shutil.copytree(b_src, b_dest, symlinks=not local_follow)
- changed = True
- chown_recursive(dest, module)
- if module.check_mode and not os.path.exists(b_dest):
- changed = True
- if os.path.exists(b_dest):
- diff_files_changed = copy_diff_files(b_src, b_dest, module)
- left_only_changed = copy_left_only(b_src, b_dest, module)
- common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
- owner_group_changed = chown_recursive(b_dest, module)
- if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
- changed = True
-
- if not src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
- b_basename = to_bytes(os.path.basename(module.params['src']), errors='surrogate_or_strict')
- b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
- if not module.check_mode and not os.path.exists(b_dest):
- os.makedirs(b_dest)
- changed = True
- b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
- diff_files_changed = copy_diff_files(b_src, b_dest, module)
- left_only_changed = copy_left_only(b_src, b_dest, module)
- common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
- owner_group_changed = chown_recursive(b_dest, module)
- if module.check_mode and not os.path.exists(b_dest):
- changed = True
+ checksums_none = checksum_src is None and checksum_dest is None
+ both_directories = os.path.isdir(module.params['src']) and (os.path.isdir(module.params['dest']) or not os.path.exists(module.params['dest']))
+ if checksums_none and remote_src and both_directories:
+ b_src = to_bytes(module.params['src'], errors='surrogate_or_strict')
+ b_dest = to_bytes(module.params['dest'], errors='surrogate_or_strict')
+
+ if not b_src.endswith(to_bytes(os.path.sep)):
+ b_basename = os.path.basename(b_src)
+ b_dest = os.path.join(b_dest, b_basename)
+ b_src = os.path.join(b_src, b'')
+
+ changed |= copy_directory(b_src, b_dest, module)
res_args = dict(
dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
diff --git a/lib/ansible/modules/cron.py b/lib/ansible/modules/cron.py
index 0382aa6b265..e64be5d7b9f 100644
--- a/lib/ansible/modules/cron.py
+++ b/lib/ansible/modules/cron.py
@@ -72,33 +72,39 @@ options:
minute:
description:
- Minute when the job should run (V(0-59), V(*), V(*/2), and so on).
+ - Cannot be combined with O(special_time).
type: str
default: "*"
hour:
description:
- Hour when the job should run (V(0-23), V(*), V(*/2), and so on).
+ - Cannot be combined with O(special_time).
type: str
default: "*"
day:
description:
- Day of the month the job should run (V(1-31), V(*), V(*/2), and so on).
+ - Cannot be combined with O(special_time).
type: str
default: "*"
aliases: [ dom ]
month:
description:
- - Month of the year the job should run (V(1-12), V(*), V(*/2), and so on).
+ - Month of the year the job should run (V(JAN-DEC) or V(1-12), V(*), V(*/2), and so on).
+ - Cannot be combined with O(special_time).
type: str
default: "*"
weekday:
description:
- - Day of the week that the job should run (V(0-6) for Sunday-Saturday, V(*), and so on).
+ - Day of the week that the job should run (V(SUN-SAT) or V(0-6), V(*), and so on).
+ - Cannot be combined with O(special_time).
type: str
default: "*"
aliases: [ dow ]
special_time:
description:
- Special time specification nickname.
+ - Cannot be combined with O(minute), O(hour), O(day), O(month) or O(weekday).
type: str
choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
version_added: "1.3"
@@ -265,13 +271,13 @@ class CronTab(object):
with open(self.b_cron_file, 'rb') as f:
self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
self.lines = self.n_existing.splitlines()
- except IOError:
+ except OSError:
# cron file does not exist
return
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
- # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ # FIXME: using safely quoted shell for now, but this really should be two non-shell calls instead.
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
@@ -322,7 +328,7 @@ class CronTab(object):
# Add the entire crontab back to the user crontab
if not self.cron_file:
- # quoting shell args for now but really this should be two non-shell calls. FIXME
+ # FIXME: quoting shell args for now but really this should be two non-shell calls.
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
@@ -612,7 +618,6 @@ def main():
changed = False
res_args = dict()
- warnings = list()
if cron_file:
@@ -621,8 +626,8 @@ def main():
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
- warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
- ' solely of upper- and lower-case letters, digits, underscores, and hyphens')
+ module.warn('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
+ ' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
@@ -645,7 +650,7 @@ def main():
if special_time and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
- module.fail_json(msg="You must specify time and date fields or special time.")
+ module.fail_json(msg="You cannot combine special_time with any of the time or day/date parameters.")
# cannot support special_time on solaris
if special_time and platform.system() == 'SunOS':
@@ -687,7 +692,7 @@ def main():
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
- warnings.append('Job should not contain line breaks')
+ module.warn('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
@@ -728,7 +733,6 @@ def main():
res_args = dict(
jobs=crontab.get_jobnames(),
envs=crontab.get_envnames(),
- warnings=warnings,
changed=changed
)
diff --git a/lib/ansible/modules/deb822_repository.py b/lib/ansible/modules/deb822_repository.py
index a27af10786c..d4d6205511e 100644
--- a/lib/ansible/modules/deb822_repository.py
+++ b/lib/ansible/modules/deb822_repository.py
@@ -230,7 +230,6 @@ import os
import re
import tempfile
import textwrap
-import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import missing_required_lib
@@ -248,9 +247,9 @@ HAS_DEBIAN = True
DEBIAN_IMP_ERR = None
try:
from debian.deb822 import Deb822 # type: ignore[import]
-except ImportError:
+except ImportError as ex:
HAS_DEBIAN = False
- DEBIAN_IMP_ERR = traceback.format_exc()
+ DEBIAN_IMP_ERR = ex
KEYRINGS_DIR = '/etc/apt/keyrings'
diff --git a/lib/ansible/modules/dnf.py b/lib/ansible/modules/dnf.py
index 7ab874a941f..1922ba85e79 100644
--- a/lib/ansible/modules/dnf.py
+++ b/lib/ansible/modules/dnf.py
@@ -211,13 +211,6 @@ options:
type: bool
default: "no"
version_added: "2.7"
- install_repoquery:
- description:
- - This is effectively a no-op in DNF as it is not needed with DNF.
- - This option is deprecated and will be removed in ansible-core 2.20.
- type: bool
- default: "yes"
- version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
@@ -408,10 +401,10 @@ from ansible.module_utils.common.respawn import has_respawned, probe_interpreter
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
-# NOTE dnf Python bindings import is postponed, see DnfModule._ensure_dnf(),
-# because we need AnsibleModule object to use get_best_parsable_locale()
-# to set proper locale before importing dnf to be able to scrape
-# the output in some cases (FIXME?).
+# FIXME: NOTE dnf Python bindings import is postponed, see DnfModule._ensure_dnf(),
+# because we need AnsibleModule object to use get_best_parsable_locale()
+# to set proper locale before importing dnf to be able to scrape
+# the output in some cases.
dnf = None
diff --git a/lib/ansible/modules/dnf5.py b/lib/ansible/modules/dnf5.py
index df4ee206748..cd9bf6e3f2e 100644
--- a/lib/ansible/modules/dnf5.py
+++ b/lib/ansible/modules/dnf5.py
@@ -14,6 +14,12 @@ description:
provides are implemented in M(ansible.builtin.dnf5), please consult specific options for more information."
short_description: Manages packages with the I(dnf5) package manager
options:
+ auto_install_module_deps:
+ description:
+ - Automatically install dependencies required to run this module.
+ type: bool
+ default: yes
+ version_added: 2.19
name:
description:
- "A package name or package specifier with version, like C(name-1.0).
@@ -174,12 +180,6 @@ options:
in the earlier transaction).
type: bool
default: "no"
- install_repoquery:
- description:
- - This is effectively a no-op in DNF as it is not needed with DNF.
- - This option is deprecated and will be removed in ansible-core 2.20.
- type: bool
- default: "yes"
download_only:
description:
- Only download the packages, do not install them.
@@ -246,6 +246,10 @@ attributes:
platforms: rhel
requirements:
- "python3-libdnf5"
+notes:
+ - If the interpreter can't import C(python3-libdnf5) the module will check for it in system-owned interpreters as well.
+ If the dependency can't be found, depending on the value of O(auto_install_module_deps) the module will attempt to install it.
+ If the dependency is found or installed, the module will be respawned under the correct interpreter.
version_added: 2.15
"""
@@ -354,10 +358,37 @@ from ansible.module_utils.common.respawn import has_respawned, probe_interpreter
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
libdnf5 = None
+# Through dnf5-5.2.12 all exceptions raised through swig became RuntimeError
+LIBDNF5_ERRORS = RuntimeError
def is_installed(base, spec):
settings = libdnf5.base.ResolveSpecSettings()
+ try:
+ settings.set_group_with_name(True)
+ # Disable checking whether SPEC is a binary -> `/usr/(s)bin/`,
+ # this prevents scenarios like the following:
+ # * the `sssd-common` package is installed and provides `/usr/sbin/sssd` binary
+ # * the `sssd` package is NOT installed
+ # * due to `set_with_binaries(True)` being default `is_installed(base, "sssd")` would "unexpectedly" return True
+ # If users wish to target the `sssd` binary they can by specifying the full path `name=/usr/sbin/sssd` explicitly
+ # due to settings.set_with_filenames(True) being default.
+ settings.set_with_binaries(False)
+ # Disable checking whether SPEC is provided by an installed package.
+ # Consider following real scenario from the rpmfusion repo:
+ # * the `ffmpeg-libs` package is installed and provides `libavcodec-freeworld`
+ # * but `libavcodec-freeworld` is NOT installed (???)
+ # * due to `set_with_provides(True)` being default `is_installed(base, "libavcodec-freeworld")`
+ # would "unexpectedly" return True
+ # We disable provides only for this `is_installed` check, for actual installation we leave the default
+ # setting to mirror the dnf cmdline behavior.
+ settings.set_with_provides(False)
+ except AttributeError:
+ # dnf5 < 5.2.0.0
+ settings.group_with_name = True
+ settings.with_binaries = False
+ settings.with_provides = False
+
installed_query = libdnf5.rpm.PackageQuery(base)
installed_query.filter_installed()
match, nevra = installed_query.resolve_pkg_spec(spec, settings, True)
@@ -386,7 +417,9 @@ def is_newer_version_installed(base, spec):
try:
spec_nevra = next(iter(libdnf5.rpm.Nevra.parse(spec)))
- except (RuntimeError, StopIteration):
+ except LIBDNF5_ERRORS:
+ return False
+ except StopIteration:
return False
spec_version = spec_nevra.get_version()
@@ -445,6 +478,8 @@ def get_unneeded_pkgs(base):
class Dnf5Module(YumDnf):
def __init__(self, module):
super(Dnf5Module, self).__init__(module)
+ self.auto_install_module_deps = self.module.params["auto_install_module_deps"]
+
self._ensure_dnf()
self.pkg_mgr_name = "dnf5"
@@ -478,12 +513,19 @@ class Dnf5Module(YumDnf):
os.environ["LANGUAGE"] = os.environ["LANG"] = locale
global libdnf5
+ global LIBDNF5_ERRORS
has_dnf = True
try:
import libdnf5 # type: ignore[import]
except ImportError:
has_dnf = False
+ try:
+ import libdnf5.exception # type: ignore[import-not-found]
+ LIBDNF5_ERRORS = (libdnf5.exception.Error, libdnf5.exception.NonLibdnf5Exception)
+ except (ImportError, AttributeError):
+ pass
+
if has_dnf:
return
@@ -494,21 +536,30 @@ class Dnf5Module(YumDnf):
]
if not has_respawned():
- # probe well-known system Python locations for accessible bindings, favoring py3
- interpreter = probe_interpreters_for_module(system_interpreters, "libdnf5")
-
- if interpreter:
- # respawn under the interpreter where the bindings should be found
- respawn_module(interpreter)
- # end of the line for this module, the process will exit here once the respawned module completes
+ for attempt in (1, 2):
+ # probe well-known system Python locations for accessible bindings
+ interpreter = probe_interpreters_for_module(system_interpreters, "libdnf5")
+ if interpreter:
+ # respawn under the interpreter where the bindings should be found
+ respawn_module(interpreter)
+ # end of the line for this module, the process will exit here once the respawned module completes
+ if attempt == 1:
+ if self.module.check_mode:
+ self.module.fail_json(
+ msg="python3-libdnf5 must be installed to use check mode. "
+ "If run normally this module can auto-install it, "
+ "see the auto_install_module_deps option.",
+ )
+ elif self.auto_install_module_deps:
+ self.module.run_command(["dnf", "install", "-y", "python3-libdnf5"], check_rc=True)
+ else:
+ break
- # done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
+ py_version = sys.version.replace("\n", "")
self.module.fail_json(
- msg="Could not import the libdnf5 python module using {0} ({1}). "
- "Please install python3-libdnf5 package or ensure you have specified the "
- "correct ansible_python_interpreter. (attempted {2})".format(
- sys.executable, sys.version.replace("\n", ""), system_interpreters
- ),
+ msg=f"Could not import the libdnf5 python module using {sys.executable} ({py_version}). "
+ "Ensure python3-libdnf5 package is installed (either manually or via the auto_install_module_deps option) "
+ f"or that you have specified the correct ansible_python_interpreter. (attempted {system_interpreters}).",
failures=[],
)
@@ -526,15 +577,7 @@ class Dnf5Module(YumDnf):
if self.conf_file:
conf.config_file_path = self.conf_file
- try:
- base.load_config()
- except RuntimeError as e:
- self.module.fail_json(
- msg=str(e),
- conf_file=self.conf_file,
- failures=[],
- rc=1,
- )
+ base.load_config()
if self.releasever is not None:
variables = base.get_vars()
@@ -552,7 +595,14 @@ class Dnf5Module(YumDnf):
elif self.best is not None:
conf.best = self.best
conf.install_weak_deps = self.install_weak_deps
- conf.gpgcheck = not self.disable_gpg_check
+ try:
+ # raises AttributeError only on getter if not available
+ conf.pkg_gpgcheck # pylint: disable=pointless-statement
+ except AttributeError:
+ # dnf5 < 5.2.7.0
+ conf.gpgcheck = not self.disable_gpg_check
+ else:
+ conf.pkg_gpgcheck = not self.disable_gpg_check
conf.localpkg_gpgcheck = not self.disable_gpg_check
conf.sslverify = self.sslverify
conf.clean_requirements_on_remove = self.autoremove
@@ -646,9 +696,12 @@ class Dnf5Module(YumDnf):
settings = libdnf5.base.GoalJobSettings()
try:
settings.set_group_with_name(True)
+ settings.set_with_binaries(False)
except AttributeError:
# dnf5 < 5.2.0.0
settings.group_with_name = True
+ settings.with_binaries = False
+
if self.bugfix or self.security:
advisory_query = libdnf5.advisory.AdvisoryQuery(base)
types = []
@@ -657,6 +710,7 @@ class Dnf5Module(YumDnf):
if self.security:
types.append("security")
advisory_query.filter_type(types)
+ conf.skip_unavailable = True # ignore packages that are of a different type, for backwards compat
settings.set_advisory_filter(advisory_query)
goal = libdnf5.base.Goal(base)
@@ -679,19 +733,13 @@ class Dnf5Module(YumDnf):
goal.add_install(spec, settings)
elif self.state in {"absent", "removed"}:
for spec in self.names:
- try:
- goal.add_remove(spec, settings)
- except RuntimeError as e:
- self.module.fail_json(msg=str(e), failures=[], rc=1)
+ goal.add_remove(spec, settings)
if self.autoremove:
for pkg in get_unneeded_pkgs(base):
goal.add_rpm_remove(pkg, settings)
goal.set_allow_erasing(self.allowerasing)
- try:
- transaction = goal.resolve()
- except RuntimeError as e:
- self.module.fail_json(msg=str(e), failures=[], rc=1)
+ transaction = goal.resolve()
if transaction.get_problems():
failures = []
@@ -732,7 +780,7 @@ class Dnf5Module(YumDnf):
if self.module.check_mode:
if results:
msg = "Check mode: No changes made, but would have if not in check mode"
- else:
+ elif changed:
transaction.download()
if not self.download_only:
transaction.set_description("ansible dnf5 module")
@@ -762,7 +810,16 @@ class Dnf5Module(YumDnf):
def main():
- Dnf5Module(AnsibleModule(**yumdnf_argument_spec)).run()
+ yumdnf_argument_spec["argument_spec"].update(
+ dict(
+ auto_install_module_deps=dict(type="bool", default=True),
+ )
+ )
+ module = AnsibleModule(**yumdnf_argument_spec)
+ try:
+ Dnf5Module(module).run()
+ except LIBDNF5_ERRORS as e:
+ module.fail_json(msg=str(e), failures=[], rc=1)
if __name__ == "__main__":
diff --git a/lib/ansible/modules/expect.py b/lib/ansible/modules/expect.py
index 90ece7d76f3..d2d537f1a7d 100644
--- a/lib/ansible/modules/expect.py
+++ b/lib/ansible/modules/expect.py
@@ -120,14 +120,13 @@ EXAMPLES = r"""
import datetime
import os
-import traceback
PEXPECT_IMP_ERR = None
try:
import pexpect
HAS_PEXPECT = True
-except ImportError:
- PEXPECT_IMP_ERR = traceback.format_exc()
+except ImportError as ex:
+ PEXPECT_IMP_ERR = ex
HAS_PEXPECT = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
@@ -164,8 +163,7 @@ def main():
)
if not HAS_PEXPECT:
- module.fail_json(msg=missing_required_lib("pexpect"),
- exception=PEXPECT_IMP_ERR)
+ module.fail_json(msg=missing_required_lib("pexpect"), exception=PEXPECT_IMP_ERR)
chdir = module.params['chdir']
args = module.params['command']
@@ -220,7 +218,7 @@ def main():
rc=0
)
- startd = datetime.datetime.now()
+ start_date = datetime.datetime.now()
try:
try:
@@ -246,20 +244,17 @@ def main():
'(%s), this module requires pexpect>=3.3. '
'Error was %s' % (pexpect.__version__, to_native(e)))
except pexpect.ExceptionPexpect as e:
- module.fail_json(msg='%s' % to_native(e), exception=traceback.format_exc())
+ module.fail_json(msg='%s' % to_native(e))
- endd = datetime.datetime.now()
- delta = endd - startd
-
- if b_out is None:
- b_out = b''
+ end_date = datetime.datetime.now()
+ delta = end_date - start_date
result = dict(
cmd=args,
stdout=to_native(b_out).rstrip('\r\n'),
rc=rc,
- start=str(startd),
- end=str(endd),
+ start=str(start_date),
+ end=str(end_date),
delta=str(delta),
changed=True,
)
diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py
index f4761fc492f..da035c97be6 100644
--- a/lib/ansible/modules/file.py
+++ b/lib/ansible/modules/file.py
@@ -244,7 +244,19 @@ from ansible.module_utils.common.sentinel import Sentinel
module = None
-def additional_parameter_handling(module):
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return 'AnsibleModuleError(results={0})'.format(self.results)
+
+
+class ParameterError(AnsibleModuleError):
+ pass
+
+
+def additional_parameter_handling(params):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
@@ -256,7 +268,6 @@ def additional_parameter_handling(module):
# if state == file: place inside of the directory (use _original_basename)
# if state == link: place inside of the directory (use _original_basename. Fallback to src?)
# if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
- params = module.params
if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
basename = None
@@ -312,11 +323,8 @@ def get_state(path):
return 'file'
return 'absent'
- except OSError as e:
- if e.errno == errno.ENOENT: # It may already have been removed
- return 'absent'
- else:
- raise
+ except FileNotFoundError:
+ return 'absent'
# This should be moved into the common file utilities
@@ -516,12 +524,14 @@ def ensure_absent(path):
else:
try:
os.unlink(b_path)
- except OSError as e:
- if e.errno != errno.ENOENT: # It may already have been removed
- module.fail_json(
- msg=f"unlinking failed: {to_native(e)}",
- path=path
- )
+ except FileNotFoundError:
+ pass
+ except OSError as ex:
+ module.fail_json(
+ msg="Unlinking failed.",
+ path=path,
+ exception=ex,
+ )
result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
else:
@@ -549,10 +559,11 @@ def execute_touch(path, follow, timestamps):
try:
open(b_path, 'wb').close()
changed = True
- except (OSError, IOError) as e:
+ except OSError as ex:
module.fail_json(
- msg=f"Error, could not touch target: {to_native(e, nonstring='simplerepr')}",
- path=path
+ msg="Error, could not touch target.",
+ path=path,
+ exception=ex,
)
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
@@ -883,9 +894,8 @@ def ensure_hardlink(path, src, follow, force, timestamps):
if os.path.exists(b_path):
try:
os.unlink(b_path)
- except OSError as e:
- if e.errno != errno.ENOENT: # It may already have been removed
- raise
+ except FileNotFoundError:
+ pass
os.link(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
@@ -966,46 +976,52 @@ def main():
supports_check_mode=True,
)
- additional_parameter_handling(module)
- params = module.params
-
- state = params['state']
- recurse = params['recurse']
- force = params['force']
- follow = params['follow']
- path = params['path']
- src = params['src']
-
- if module.check_mode and state != 'absent':
- file_args = module.load_file_common_arguments(module.params)
- if file_args['owner']:
- check_owner_exists(module, file_args['owner'])
- if file_args['group']:
- check_group_exists(module, file_args['group'])
-
- timestamps = {}
- timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
- timestamps['modification_time_format'] = params['modification_time_format']
- timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
- timestamps['access_time_format'] = params['access_time_format']
-
- # short-circuit for diff_peek
- if params['_diff_peek'] is not None:
- appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
- module.exit_json(path=path, changed=False, appears_binary=appears_binary)
-
- if state == 'file':
- result = ensure_file_attributes(path, follow, timestamps)
- elif state == 'directory':
- result = ensure_directory(path, follow, recurse, timestamps)
- elif state == 'link':
- result = ensure_symlink(path, src, follow, force, timestamps)
- elif state == 'hard':
- result = ensure_hardlink(path, src, follow, force, timestamps)
- elif state == 'touch':
- result = execute_touch(path, follow, timestamps)
- elif state == 'absent':
- result = ensure_absent(path)
+ try:
+ additional_parameter_handling(module.params)
+ params = module.params
+
+ state = params['state']
+ recurse = params['recurse']
+ force = params['force']
+ follow = params['follow']
+ path = params['path']
+ src = params['src']
+
+ if module.check_mode and state != 'absent':
+ file_args = module.load_file_common_arguments(module.params)
+ if file_args['owner']:
+ check_owner_exists(module, file_args['owner'])
+ if file_args['group']:
+ check_group_exists(module, file_args['group'])
+
+ timestamps = {}
+ timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
+ timestamps['modification_time_format'] = params['modification_time_format']
+ timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
+ timestamps['access_time_format'] = params['access_time_format']
+
+ # short-circuit for diff_peek
+ if params['_diff_peek'] is not None:
+ appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
+ module.exit_json(path=path, changed=False, appears_binary=appears_binary)
+
+ if state == 'file':
+ result = ensure_file_attributes(path, follow, timestamps)
+ elif state == 'directory':
+ result = ensure_directory(path, follow, recurse, timestamps)
+ elif state == 'link':
+ result = ensure_symlink(path, src, follow, force, timestamps)
+ elif state == 'hard':
+ result = ensure_hardlink(path, src, follow, force, timestamps)
+ elif state == 'touch':
+ result = execute_touch(path, follow, timestamps)
+ elif state == 'absent':
+ result = ensure_absent(path)
+ except AnsibleModuleError as ex:
+ module.fail_json(**ex.results)
+
+ if not module._diff:
+ result.pop('diff', None)
module.exit_json(**result)
diff --git a/lib/ansible/modules/find.py b/lib/ansible/modules/find.py
index 8c2820c48e7..b30f6c71697 100644
--- a/lib/ansible/modules/find.py
+++ b/lib/ansible/modules/find.py
@@ -515,7 +515,7 @@ def main():
skipped = {}
def handle_walk_errors(e):
- if e.errno in (errno.EPERM, errno.EACCES):
+ if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
skipped[e.filename] = to_text(e)
return
raise e
@@ -571,9 +571,9 @@ def main():
try:
st = os.lstat(fsname)
- except (IOError, OSError) as e:
- module.warn("Skipped entry '%s' due to this access issue: %s\n" % (fsname, to_text(e)))
- skipped[fsname] = to_text(e)
+ except OSError as ex:
+ module.error_as_warning(f"Skipped entry {fsname!r} due to access issue.", exception=ex)
+ skipped[fsname] = str(ex)
has_warnings = True
continue
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
index 52c812c0c61..f25743b9a41 100644
--- a/lib/ansible/modules/get_url.py
+++ b/lib/ansible/modules/get_url.py
@@ -87,7 +87,7 @@ options:
- 'If a checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
- Format: :, for example C(checksum="sha256:D98291AC[...]B6DC7B97",
+ Format: :, for example C(checksum="sha256:D98291AC[...]B6DC7B97"),
C(checksum="sha256:http://example.com/path/sha256sum.txt").'
- If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions.
@@ -372,11 +372,11 @@ import os
import re
import shutil
import tempfile
-import traceback
+
+from datetime import datetime, timezone
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlsplit
-from ansible.module_utils.compat.datetime import utcnow, utcfromtimestamp
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.urls import fetch_url, url_argument_spec
@@ -399,10 +399,10 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head
Return (tempfile, info about the request)
"""
- start = utcnow()
+ start = datetime.now(timezone.utc)
rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method,
unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
- elapsed = (utcnow() - start).seconds
+ elapsed = (datetime.now(timezone.utc) - start).seconds
if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''), status_code=info['status'], elapsed=elapsed)
@@ -433,9 +433,26 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head
shutil.copyfileobj(rsp, f)
except Exception as e:
os.remove(tempname)
- module.fail_json(msg="failed to create temporary content file: %s" % to_native(e), elapsed=elapsed, exception=traceback.format_exc())
+ module.fail_json(msg="failed to create temporary content file: %s" % to_native(e), elapsed=elapsed)
f.close()
rsp.close()
+
+ # Since shutil.copyfileobj() will read from HTTPResponse in chunks, HTTPResponse.read() will not recognize
+ # if the entire content-length of data was not read. We need to do that validation here, unless a 'chunked'
+ # transfer-encoding was used, in which case we will not know content-length because it will not be returned.
+ # But in that case, HTTPResponse will behave correctly and recognize an IncompleteRead.
+
+ is_gzip = info.get('content-encoding') == 'gzip'
+
+ if not module.check_mode and 'content-length' in info:
+ # If data is decompressed, then content-length won't match the amount of data we've read, so skip.
+ if not is_gzip or (is_gzip and not decompress):
+ st = os.stat(tempname)
+ cl = int(info['content-length'])
+ if st.st_size != cl:
+ diff = cl - st.st_size
+ module.fail_json(msg=f'Incomplete read, ({rsp.length=}, {cl=}, {st.st_size=}) failed to read remaining {diff} bytes')
+
return tempname, info
@@ -460,6 +477,37 @@ def is_url(checksum):
return urlsplit(checksum).scheme in supported_schemes
+def parse_digest_lines(filename, lines):
+ """Returns a list of tuple containing the filename and digest depending upon
+ the lines provided
+
+ Args:
+ filename (str): Name of the filename, used only when the digest is one-liner
+ lines (list): A list of lines containing filenames and checksums
+ """
+ checksum_map = []
+ BSD_DIGEST_LINE = re.compile(r'^(\w+) ?\((?P.+)\) ?= (?P[\w.]+)$')
+ GNU_DIGEST_LINE = re.compile(r'^(?P[\w.]+) ([ *])(?P.+)$')
+
+ if len(lines) == 1 and len(lines[0].split()) == 1:
+ # Only a single line with a single string
+ # treat it as a checksum only file
+ checksum_map.append((lines[0], filename))
+ return checksum_map
+ # The assumption here is the file is in the format of
+ # checksum filename
+ for line in lines:
+ match = BSD_DIGEST_LINE.match(line)
+ if match:
+ checksum_map.append((match.group('digest'), match.group('path')))
+ else:
+ match = GNU_DIGEST_LINE.match(line)
+ if match:
+ checksum_map.append((match.group('digest'), match.group('path').lstrip("./")))
+
+ return checksum_map
+
+
# ==============================================================
# main
@@ -527,31 +575,13 @@ def main():
if is_url(checksum):
checksum_url = checksum
# download checksum file to checksum_tmpsrc
- checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest,
- unredirected_headers=unredirected_headers, ciphers=ciphers, use_netrc=use_netrc)
+ checksum_tmpsrc, _dummy = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest,
+ unredirected_headers=unredirected_headers, ciphers=ciphers, use_netrc=use_netrc)
with open(checksum_tmpsrc) as f:
lines = [line.rstrip('\n') for line in f]
os.remove(checksum_tmpsrc)
- checksum_map = []
filename = url_filename(url)
- if len(lines) == 1 and len(lines[0].split()) == 1:
- # Only a single line with a single string
- # treat it as a checksum only file
- checksum_map.append((lines[0], filename))
- else:
- # The assumption here is the file is in the format of
- # checksum filename
- for line in lines:
- # Split by one whitespace to keep the leading type char ' ' (whitespace) for text and '*' for binary
- parts = line.split(" ", 1)
- if len(parts) == 2:
- # Remove the leading type char, we expect
- if parts[1].startswith((" ", "*",)):
- parts[1] = parts[1][1:]
-
- # Append checksum and path without potential leading './'
- checksum_map.append((parts[0], parts[1].lstrip("./")))
-
+ checksum_map = parse_digest_lines(filename=filename, lines=lines)
# Look through each line in the checksum file for a hash corresponding to
# the filename in the url, returning the first hash that is found.
for cksum in (s for (s, f) in checksum_map if f == filename):
@@ -595,7 +625,7 @@ def main():
# If the file already exists, prepare the last modified time for the
# request.
mtime = os.path.getmtime(dest)
- last_mod_time = utcfromtimestamp(mtime)
+ last_mod_time = datetime.fromtimestamp(mtime, timezone.utc)
# If the checksum does not match we have to force the download
# because last_mod_time may be newer than on remote
@@ -603,11 +633,11 @@ def main():
force = True
# download to tmpsrc
- start = utcnow()
+ start = datetime.now(timezone.utc)
method = 'HEAD' if module.check_mode else 'GET'
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest, method,
unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
- result['elapsed'] = (utcnow() - start).seconds
+ result['elapsed'] = (datetime.now(timezone.utc) - start).seconds
result['src'] = tmpsrc
# Now the request has completed, we can finally generate the final
@@ -677,8 +707,7 @@ def main():
except Exception as e:
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
- module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)),
- exception=traceback.format_exc(), **result)
+ module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)), **result)
result['changed'] = True
else:
result['changed'] = False
diff --git a/lib/ansible/modules/getent.py b/lib/ansible/modules/getent.py
index 1938af1fcfa..e195b7ef7ea 100644
--- a/lib/ansible/modules/getent.py
+++ b/lib/ansible/modules/getent.py
@@ -114,8 +114,6 @@ ansible_facts:
type: list
"""
-import traceback
-
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
@@ -156,7 +154,7 @@ def main():
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ module.fail_json(msg=to_native(e))
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
diff --git a/lib/ansible/modules/git.py b/lib/ansible/modules/git.py
index 14d26195461..f4c1100fda1 100644
--- a/lib/ansible/modules/git.py
+++ b/lib/ansible/modules/git.py
@@ -21,6 +21,10 @@ options:
repo:
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
+ - Avoid embedding usernames and passwords within Git repository URLs.
+ This practice is insecure and can lead to unauthorized access to your repositories.
+ For secure authentication, configure SSH keys (recommended) or use a credential helper.
+ See Git documentation on SSH keys/credential helpers for instructions.
type: str
required: true
aliases: [ name ]
@@ -313,11 +317,6 @@ remote_url_changed:
returned: success
type: bool
sample: True
-warnings:
- description: List of warnings if requested features were not available due to a too old git version.
- returned: error
- type: str
- sample: git version is too old to fully support the depth argument. Falling back to full checkouts.
git_dir_now:
description: Contains the new path of .git directory if it is changed.
returned: success
@@ -358,11 +357,11 @@ def relocate_repo(module, result, repo_dir, old_repo_dir, worktree_dir):
dot_git_file.write('gitdir: %s' % repo_dir)
result['git_dir_before'] = old_repo_dir
result['git_dir_now'] = repo_dir
- except (IOError, OSError) as err:
+ except OSError as ex:
# if we already moved the .git dir, roll it back
if os.path.exists(repo_dir):
shutil.move(repo_dir, old_repo_dir)
- module.fail_json(msg=u'Unable to move git dir. %s' % to_text(err))
+ raise Exception('Unable to move git dir.') from ex
def head_splitter(headfile, remote, module=None, fail_on_error=False):
@@ -440,7 +439,7 @@ def write_ssh_wrapper(module):
fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/')
else:
raise OSError
- except (IOError, OSError):
+ except OSError:
fd, wrapper_path = tempfile.mkstemp()
# use existing git_ssh/ssh_command, fallback to 'ssh'
@@ -825,13 +824,14 @@ def get_head_branch(git_path, module, dest, remote, bare=False):
"""
try:
repo_path = get_repo_path(dest, bare)
- except (IOError, ValueError) as err:
+ except (OSError, ValueError) as ex:
# No repo path found
# ``.git`` file does not have a valid format for detached Git dir.
module.fail_json(
msg='Current repo does not have a valid reference to a '
'separate Git dir or it refers to the invalid path',
- details=to_text(err),
+ details=str(ex),
+ exception=ex,
)
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
@@ -1236,7 +1236,7 @@ def main():
archive_prefix = module.params['archive_prefix']
separate_git_dir = module.params['separate_git_dir']
- result = dict(changed=False, warnings=list())
+ result = dict(changed=False)
if module.params['accept_hostkey']:
if ssh_opts is not None:
@@ -1291,13 +1291,14 @@ def main():
if not module.check_mode:
relocate_repo(module, result, separate_git_dir, repo_path, dest)
repo_path = separate_git_dir
- except (IOError, ValueError) as err:
+ except (OSError, ValueError) as ex:
# No repo path found
# ``.git`` file does not have a valid format for detached Git dir.
module.fail_json(
msg='Current repo does not have a valid reference to a '
'separate Git dir or it refers to the invalid path',
- details=to_text(err),
+ details=str(ex),
+ exception=ex,
)
gitconfig = os.path.join(repo_path, 'config')
diff --git a/lib/ansible/modules/hostname.py b/lib/ansible/modules/hostname.py
index 79f9bcb0709..0f30ddf0f16 100644
--- a/lib/ansible/modules/hostname.py
+++ b/lib/ansible/modules/hostname.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Hiroaki Nakamura
@@ -68,9 +67,7 @@ EXAMPLES = """
import os
import platform
import socket
-import traceback
-
-import ansible.module_utils.compat.typing as t
+import typing as t
from ansible.module_utils.basic import (
AnsibleModule,
@@ -209,17 +206,14 @@ class FileStrategy(BaseStrategy):
return get_file_content(self.FILE, default='', strip=True)
except Exception as e:
self.module.fail_json(
- msg="failed to read hostname: %s" % to_native(e),
- exception=traceback.format_exc())
+ msg="failed to read hostname: %s" % to_native(e))
def set_permanent_hostname(self, name):
try:
with open(self.FILE, 'w+') as f:
f.write("%s\n" % name)
except Exception as e:
- self.module.fail_json(
- msg="failed to update hostname: %s" % to_native(e),
- exception=traceback.format_exc())
+ self.module.fail_json(msg="failed to update hostname: %s" % to_native(e))
class SLESStrategy(FileStrategy):
@@ -249,8 +243,7 @@ class RedHatStrategy(BaseStrategy):
)
except Exception as e:
self.module.fail_json(
- msg="failed to read hostname: %s" % to_native(e),
- exception=traceback.format_exc())
+ msg="failed to read hostname: %s" % to_native(e))
def set_permanent_hostname(self, name):
try:
@@ -269,9 +262,7 @@ class RedHatStrategy(BaseStrategy):
with open(self.NETWORK_FILE, 'w+') as f:
f.writelines(lines)
except Exception as e:
- self.module.fail_json(
- msg="failed to update hostname: %s" % to_native(e),
- exception=traceback.format_exc())
+ self.module.fail_json(msg="failed to update hostname: %s" % to_native(e))
class AlpineStrategy(FileStrategy):
@@ -361,9 +352,7 @@ class OpenRCStrategy(BaseStrategy):
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
- self.module.fail_json(
- msg="failed to read hostname: %s" % to_native(e),
- exception=traceback.format_exc())
+ self.module.fail_json(msg="failed to read hostname: %s" % to_native(e))
def set_permanent_hostname(self, name):
try:
@@ -377,9 +366,7 @@ class OpenRCStrategy(BaseStrategy):
with open(self.FILE, 'w') as f:
f.write('\n'.join(lines) + '\n')
except Exception as e:
- self.module.fail_json(
- msg="failed to update hostname: %s" % to_native(e),
- exception=traceback.format_exc())
+ self.module.fail_json(msg="failed to update hostname: %s" % to_native(e))
class OpenBSDStrategy(FileStrategy):
@@ -481,9 +468,7 @@ class FreeBSDStrategy(BaseStrategy):
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
- self.module.fail_json(
- msg="failed to read hostname: %s" % to_native(e),
- exception=traceback.format_exc())
+ self.module.fail_json(msg="failed to read hostname: %s" % to_native(e))
def set_permanent_hostname(self, name):
try:
@@ -500,9 +485,7 @@ class FreeBSDStrategy(BaseStrategy):
with open(self.FILE, 'w') as f:
f.write('\n'.join(lines) + '\n')
except Exception as e:
- self.module.fail_json(
- msg="failed to update hostname: %s" % to_native(e),
- exception=traceback.format_exc())
+ self.module.fail_json(msg="failed to update hostname: %s" % to_native(e))
class DarwinStrategy(BaseStrategy):
@@ -625,8 +608,8 @@ class Hostname(object):
self.use = module.params['use']
if self.use is not None:
- strat = globals()['%sStrategy' % STRATS[self.use]]
- self.strategy = strat(module)
+ strategy = globals()['%sStrategy' % STRATS[self.use]]
+ self.strategy = strategy(module)
elif platform.system() == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
# This is Linux and systemd is active
self.strategy = SystemdStrategy(module)
diff --git a/lib/ansible/modules/import_role.py b/lib/ansible/modules/import_role.py
index 0b9eff71244..fd9f0d6ad2d 100644
--- a/lib/ansible/modules/import_role.py
+++ b/lib/ansible/modules/import_role.py
@@ -59,7 +59,7 @@ options:
description:
- This option dictates whether the role's C(vars) and C(defaults) are exposed to the play.
- Variables are exposed to the play at playbook parsing time, and available to earlier roles and tasks as well unlike C(include_role).
- - The default depends on the configuration option :ref:`default_private_role_vars`.
+ - The default depends on the configuration option R(DEFAULT_PRIVATE_ROLE_VARS, DEFAULT_PRIVATE_ROLE_VARS).
type: bool
default: yes
version_added: '2.17'
diff --git a/lib/ansible/modules/iptables.py b/lib/ansible/modules/iptables.py
index 164b53960b0..9502dcad2cb 100644
--- a/lib/ansible/modules/iptables.py
+++ b/lib/ansible/modules/iptables.py
@@ -67,7 +67,7 @@ options:
description:
- Which version of the IP protocol this rule should apply to.
type: str
- choices: [ ipv4, ipv6 ]
+ choices: [ ipv4, ipv6, both ]
default: ipv4
chain:
description:
@@ -564,6 +564,7 @@ BINS = dict(
ICMP_TYPE_OPTIONS = dict(
ipv4='--icmp-type',
ipv6='--icmpv6-type',
+ both='--icmp-type --icmpv6-type',
)
@@ -614,7 +615,6 @@ def append_wait(rule, param, flag):
def construct_rule(params):
rule = []
- append_wait(rule, params['wait'], '-w')
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
@@ -701,6 +701,8 @@ def push_arguments(iptables_path, action, params, make_rule=True):
cmd.extend([action, params['chain']])
if action == '-I' and params['rule_num']:
cmd.extend([params['rule_num']])
+ if params['wait']:
+ cmd.extend(['-w', params['wait']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
@@ -781,7 +783,7 @@ def main():
table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(type='str', default='present', choices=['absent', 'present']),
action=dict(type='str', default='append', choices=['append', 'insert']),
- ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
+ ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6', 'both']),
chain=dict(type='str'),
rule_num=dict(type='str'),
protocol=dict(type='str'),
@@ -861,86 +863,97 @@ def main():
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
chain_management=module.params['chain_management'],
+ wait=module.params['wait'],
)
- ip_version = module.params['ip_version']
- iptables_path = module.get_bin_path(BINS[ip_version], True)
+ ip_version = ['ipv4', 'ipv6'] if module.params['ip_version'] == 'both' else [module.params['ip_version']]
+ iptables_path = [module.get_bin_path('iptables', True) if ip_version == 'ipv4' else module.get_bin_path('ip6tables', True) for ip_version in ip_version]
- if module.params.get('log_prefix', None) or module.params.get('log_level', None):
- if module.params['jump'] is None:
- module.params['jump'] = 'LOG'
- elif module.params['jump'] != 'LOG':
- module.fail_json(msg="Logging options can only be used with the LOG jump target.")
+ both_changed = False
- # Check if wait option is supported
- iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
+ for path in iptables_path:
+ if module.params.get('log_prefix', None) or module.params.get('log_level', None):
+ if module.params['jump'] is None:
+ module.params['jump'] = 'LOG'
+ elif module.params['jump'] != 'LOG':
+ module.fail_json(msg="Logging options can only be used with the LOG jump target.")
- if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
- if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
- module.params['wait'] = ''
- else:
- module.params['wait'] = None
-
- # Flush the table
- if args['flush'] is True:
- args['changed'] = True
- if not module.check_mode:
- flush_table(iptables_path, module, module.params)
-
- # Set the policy
- elif module.params['policy']:
- current_policy = get_chain_policy(iptables_path, module, module.params)
- if not current_policy:
- module.fail_json(msg='Can\'t detect current policy')
-
- changed = current_policy != module.params['policy']
- args['changed'] = changed
- if changed and not module.check_mode:
- set_chain_policy(iptables_path, module, module.params)
-
- # Delete the chain if there is no rule in the arguments
- elif (args['state'] == 'absent') and not args['rule']:
- chain_is_present = check_chain_present(
- iptables_path, module, module.params
- )
- args['changed'] = chain_is_present
-
- if (chain_is_present and args['chain_management'] and not module.check_mode):
- delete_chain(iptables_path, module, module.params)
+ # Check if wait option is supported
+ iptables_version = LooseVersion(get_iptables_version(path, module))
- else:
- # Create the chain if there are no rule arguments
- if (args['state'] == 'present') and not args['rule']:
+ if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
+ if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
+ module.params['wait'] = ''
+ else:
+ module.params['wait'] = None
+
+ # Flush the table
+ if args['flush'] is True:
+ args['changed'] = True
+ both_changed = True
+ if not module.check_mode:
+ flush_table(path, module, module.params)
+
+ # Set the policy
+ elif module.params['policy']:
+ current_policy = get_chain_policy(path, module, module.params)
+ if not current_policy:
+ module.fail_json(msg='Can\'t detect current policy')
+
+ changed = current_policy != module.params['policy']
+ args['changed'] = changed
+ both_changed = both_changed or changed
+ if changed and not module.check_mode:
+ set_chain_policy(path, module, module.params)
+
+ # Delete the chain if there is no rule in the arguments
+ elif (args['state'] == 'absent') and not args['rule']:
chain_is_present = check_chain_present(
- iptables_path, module, module.params
+ path, module, module.params
)
- args['changed'] = not chain_is_present
+ args['changed'] = chain_is_present
+ both_changed = both_changed or chain_is_present
- if (not chain_is_present and args['chain_management'] and not module.check_mode):
- create_chain(iptables_path, module, module.params)
+ if (chain_is_present and args['chain_management'] and not module.check_mode):
+ delete_chain(path, module, module.params)
else:
- insert = (module.params['action'] == 'insert')
- rule_is_present = check_rule_present(
- iptables_path, module, module.params
- )
+ # Create the chain if there are no rule arguments
+ if (args['state'] == 'present') and not args['rule']:
+ chain_is_present = check_chain_present(
+ path, module, module.params
+ )
+ args['changed'] = not chain_is_present
+ both_changed = both_changed or not chain_is_present
- should_be_present = (args['state'] == 'present')
- # Check if target is up to date
- args['changed'] = (rule_is_present != should_be_present)
- if args['changed'] is False:
- # Target is already up to date
- module.exit_json(**args)
+ if (not chain_is_present and args['chain_management'] and not module.check_mode):
+ create_chain(path, module, module.params)
- # Modify if not check_mode
- if not module.check_mode:
- if should_be_present:
- if insert:
- insert_rule(iptables_path, module, module.params)
+ else:
+ insert = (module.params['action'] == 'insert')
+ rule_is_present = check_rule_present(
+ path, module, module.params
+ )
+
+ should_be_present = (args['state'] == 'present')
+ # Check if target is up to date
+ args['changed'] = (rule_is_present != should_be_present)
+ both_changed = both_changed or (rule_is_present != should_be_present)
+ if args['changed'] is False:
+ # Target is already up to date
+ continue
+
+ # Modify if not check_mode
+ if not module.check_mode:
+ if should_be_present:
+ if insert:
+ insert_rule(path, module, module.params)
+ else:
+ append_rule(path, module, module.params)
else:
- append_rule(iptables_path, module, module.params)
- else:
- remove_rule(iptables_path, module, module.params)
+ remove_rule(path, module, module.params)
+
+ args['changed'] = both_changed
module.exit_json(**args)
diff --git a/lib/ansible/modules/known_hosts.py b/lib/ansible/modules/known_hosts.py
index c001915115d..0c88e0f79c7 100644
--- a/lib/ansible/modules/known_hosts.py
+++ b/lib/ansible/modules/known_hosts.py
@@ -102,7 +102,6 @@ EXAMPLES = r"""
import base64
import copy
-import errno
import hashlib
import hmac
import os
@@ -169,11 +168,10 @@ def enforce_state(module, params):
if replace_or_add or found != (state == "present"):
try:
inf = open(path, "r")
- except IOError as e:
- if e.errno == errno.ENOENT:
- inf = None
- else:
- module.fail_json(msg="Failed to read %s: %s" % (path, str(e)))
+ except FileNotFoundError:
+ inf = None
+ except OSError as ex:
+ raise Exception(f"Failed to read {path!r}.") from ex
try:
with tempfile.NamedTemporaryFile(mode='w+', dir=os.path.dirname(path), delete=False) as outf:
if inf is not None:
@@ -184,8 +182,8 @@ def enforce_state(module, params):
inf.close()
if state == 'present':
outf.write(key)
- except (IOError, OSError) as e:
- module.fail_json(msg="Failed to write to file %s: %s" % (path, to_native(e)))
+ except OSError as ex:
+ raise Exception(f"Failed to write to file {path!r}.") from ex
else:
module.atomic_move(outf.name, path)
@@ -220,9 +218,8 @@ def sanity_check(module, host, key, sshkeygen):
try:
outf.write(key)
outf.flush()
- except IOError as e:
- module.fail_json(msg="Failed to write to temporary file %s: %s" %
- (outf.name, to_native(e)))
+ except OSError as ex:
+ raise Exception(f"Failed to write to temporary file {outf.name!r}.") from ex
sshkeygen_command = [sshkeygen, '-F', host, '-f', outf.name]
rc, stdout, stderr = module.run_command(sshkeygen_command)
@@ -337,9 +334,10 @@ def compute_diff(path, found_line, replace_or_add, state, key):
}
try:
inf = open(path, "r")
- except IOError as e:
- if e.errno == errno.ENOENT:
- diff['before_header'] = '/dev/null'
+ except FileNotFoundError:
+ diff['before_header'] = '/dev/null'
+ except OSError:
+ pass
else:
diff['before'] = inf.read()
inf.close()
diff --git a/lib/ansible/modules/meta.py b/lib/ansible/modules/meta.py
index b10a56e2444..64d9c1c9f61 100644
--- a/lib/ansible/modules/meta.py
+++ b/lib/ansible/modules/meta.py
@@ -22,8 +22,13 @@ options:
points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays).
- V(refresh_inventory) (added in Ansible 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be
re-executed. If the dynamic inventory script is using a cache, Ansible cannot know this and has no way of refreshing it (you can disable the cache
- or, if available for your specific inventory datasource (e.g. aws), you can use the an inventory plugin instead of an inventory script).
- This is mainly useful when additional hosts are created and users wish to use them instead of using the M(ansible.builtin.add_host) module.
+ or, if available for your specific inventory datasource (for example P(amazon.aws.aws_ec2#inventory)), you can use the an inventory plugin instead
+ of an inventory script). This is mainly useful when additional hosts are created and users wish to use them instead of using the
+ M(ansible.builtin.add_host) module.
+ - Note that neither V(refresh_inventory) nor the M(ansible.builtin.add_host) add hosts to the hosts the current play iterates over.
+ However, if needed, you can explicitly delegate tasks to new hosts with C(delegate_to). Generally,
+ C(delegate_to) can be used against hosts regardless of whether they are in the inventory or not, as long as
+ the value supplied is sufficient for the connection plugin to access the host.
- V(noop) (added in Ansible 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use.
- V(clear_facts) (added in Ansible 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared,
including the fact cache.
diff --git a/lib/ansible/modules/mount_facts.py b/lib/ansible/modules/mount_facts.py
index f5d2bf47f3a..1a5bd4c6914 100644
--- a/lib/ansible/modules/mount_facts.py
+++ b/lib/ansible/modules/mount_facts.py
@@ -359,7 +359,7 @@ def gen_mounts_from_stdout(stdout: str) -> t.Iterable[MountInfo]:
elif pattern is BSD_MOUNT_RE:
# the group containing fstype is comma separated, and may include whitespace
mount_info = match.groupdict()
- parts = re.split(r"\s*,\s*", match.group("fstype"), 1)
+ parts = re.split(r"\s*,\s*", match.group("fstype"), maxsplit=1)
if len(parts) == 1:
mount_info["fstype"] = parts[0]
else:
diff --git a/lib/ansible/modules/package.py b/lib/ansible/modules/package.py
index adc390c08fe..02a18e8672b 100644
--- a/lib/ansible/modules/package.py
+++ b/lib/ansible/modules/package.py
@@ -85,4 +85,10 @@ EXAMPLES = """
- httpd
- mariadb-server
state: latest
+
+- name: Use the dnf package manager to install httpd
+ ansible.builtin.package:
+ name: httpd
+ state: present
+ use: dnf
"""
diff --git a/lib/ansible/modules/package_facts.py b/lib/ansible/modules/package_facts.py
index df10c4694db..595d3f58465 100644
--- a/lib/ansible/modules/package_facts.py
+++ b/lib/ansible/modules/package_facts.py
@@ -23,7 +23,7 @@ options:
default: ['auto']
choices:
auto: Depending on O(strategy), will match the first or all package managers provided, in order
- rpm: For RPM based distros, requires RPM Python bindings, not installed by default on Suse (python3-rpm)
+ rpm: For RPM based distros, requires RPM Python bindings, not installed by default on Suse or Fedora 41+ (python3-rpm)
yum: Alias to rpm
dnf: Alias to rpm
dnf5: Alias to rpm
@@ -460,7 +460,7 @@ def main():
# get supported pkg managers
PKG_MANAGERS = get_all_pkg_managers()
- PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
+ PKG_MANAGER_NAMES = sorted([x.lower() for x in PKG_MANAGERS.keys()])
# add aliases
PKG_MANAGER_NAMES.extend([alias for alist in ALIASES.values() for alias in alist])
@@ -510,12 +510,24 @@ def main():
manager = PKG_MANAGERS[pkgmgr]()
try:
+ packages_found = {}
if manager.is_available(handle_exceptions=False):
- found += 1
try:
- packages.update(manager.get_packages())
+ packages_found = manager.get_packages()
except Exception as e:
module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
+
+ # only consider 'found' if it results in something
+ if packages_found:
+ found += 1
+ for k in packages_found.keys():
+ if k in packages:
+ packages[k].extend(packages_found[k])
+ else:
+ packages[k] = packages_found[k]
+ else:
+ module.warn('Found "%s" but no associated packages' % (pkgmgr))
+
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
diff --git a/lib/ansible/modules/pip.py b/lib/ansible/modules/pip.py
index 028ef3f6e3b..20e6249b759 100644
--- a/lib/ansible/modules/pip.py
+++ b/lib/ansible/modules/pip.py
@@ -60,7 +60,7 @@ options:
virtualenv_python:
description:
- The Python executable used for creating the virtual environment.
- For example V(python3.12), V(python2.7). When not specified, the
+ For example V(python3.13). When not specified, the
Python version used to run the ansible module is used. This parameter
should not be used when O(virtualenv_command) is using V(pyvenv) or
the C(-m venv) module.
@@ -93,8 +93,8 @@ options:
description:
- The explicit executable or pathname for the C(pip) executable,
if different from the Ansible Python interpreter. For
- example V(pip3.3), if there are both Python 2.7 and 3.3 installations
- in the system and you want to run pip for the Python 3.3 installation.
+ example V(pip3.13), if there are multiple Python installations
+ in the system and you want to run pip for the Python 3.13 installation.
- Mutually exclusive with O(virtualenv) (added in 2.1).
- Does not affect the Ansible Python interpreter.
- The C(setuptools) package must be installed for both the Ansible Python interpreter
@@ -134,7 +134,7 @@ notes:
the virtualenv needs to be created.
- Although it executes using the Ansible Python interpreter, the pip module shells out to
run the actual pip command, so it can use any pip version you specify with O(executable).
- By default, it uses the pip version for the Ansible Python interpreter. For example, pip3 on python 3, and pip2 or pip on python 2.
+ By default, it uses the pip version for the Ansible Python interpreter.
- The interpreter used by Ansible
(see R(ansible_python_interpreter, ansible_python_interpreter))
requires the setuptools package, regardless of the version of pip set with
@@ -197,11 +197,11 @@ EXAMPLES = """
virtualenv: /my_app/venv
virtualenv_site_packages: yes
-- name: Install bottle into the specified (virtualenv), using Python 2.7
+- name: Install bottle into the specified (virtualenv), using Python 3.13
ansible.builtin.pip:
name: bottle
virtualenv: /my_app/venv
- virtualenv_command: virtualenv-2.7
+ virtualenv_command: virtualenv-3.13
- name: Install bottle within a user home directory
ansible.builtin.pip:
@@ -227,10 +227,10 @@ EXAMPLES = """
requirements: /my_app/requirements.txt
extra_args: "--no-index --find-links=file:///my_downloaded_packages_dir"
-- name: Install bottle for Python 3.3 specifically, using the 'pip3.3' executable
+- name: Install bottle for Python 3.13 specifically, using the 'pip3.13' executable
ansible.builtin.pip:
name: bottle
- executable: pip3.3
+ executable: pip3.13
- name: Install bottle, forcing reinstallation if it's already installed
ansible.builtin.pip:
@@ -299,7 +299,6 @@ import sys
import tempfile
import operator
import shlex
-import traceback
from ansible.module_utils.compat.version import LooseVersion
@@ -309,10 +308,10 @@ HAS_SETUPTOOLS = False
try:
from packaging.requirements import Requirement as parse_requirement
HAS_PACKAGING = True
-except Exception:
+except Exception as ex:
# This is catching a generic Exception, due to packaging on EL7 raising a TypeError on import
HAS_PACKAGING = False
- PACKAGING_IMP_ERR = traceback.format_exc()
+ PACKAGING_IMP_ERR = ex
try:
from pkg_resources import Requirement
parse_requirement = Requirement.parse # type: ignore[misc,assignment]
@@ -461,9 +460,7 @@ def _get_pip(module, env=None, executable=None):
candidate_pip_basenames = (executable,)
elif executable is None and env is None and _have_pip_module():
# If no executable or virtualenv were specified, use the pip module for the current Python interpreter if available.
- # Use of `__main__` is required to support Python 2.6 since support for executing packages with `runpy` was added in Python 2.7.
- # Without it Python 2.6 gives the following error: pip is a package and cannot be directly executed
- pip = [sys.executable, '-m', 'pip.__main__']
+ pip = [sys.executable, '-m', 'pip']
if pip is None:
if env is None:
@@ -815,10 +812,8 @@ def main():
elif requirements:
cmd.extend(['-r', requirements])
else:
- module.exit_json(
- changed=False,
- warnings=["No valid name or requirements file found."],
- )
+ module.warn("No valid name or requirements file found.")
+ module.exit_json(changed=False)
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
diff --git a/lib/ansible/modules/raw.py b/lib/ansible/modules/raw.py
index 5825a465023..e926f027c51 100644
--- a/lib/ansible/modules/raw.py
+++ b/lib/ansible/modules/raw.py
@@ -73,8 +73,8 @@ author:
"""
EXAMPLES = r"""
-- name: Bootstrap a host without python2 installed
- ansible.builtin.raw: dnf install -y python2 python2-dnf libselinux-python
+- name: Bootstrap a host without Python installed
+ ansible.builtin.raw: dnf install -y python3 python3-libdnf
- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
ansible.builtin.raw: cat < /tmp/*txt
diff --git a/lib/ansible/modules/replace.py b/lib/ansible/modules/replace.py
index 61e629b26a0..ebadb934914 100644
--- a/lib/ansible/modules/replace.py
+++ b/lib/ansible/modules/replace.py
@@ -182,7 +182,6 @@ RETURN = r"""#"""
import os
import re
import tempfile
-from traceback import format_exc
from ansible.module_utils.common.text.converters import to_text, to_bytes
from ansible.module_utils.basic import AnsibleModule
@@ -257,9 +256,8 @@ def main():
try:
with open(path, 'rb') as f:
contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
- except (OSError, IOError) as e:
- module.fail_json(msg='Unable to read the contents of %s: %s' % (path, to_text(e)),
- exception=format_exc())
+ except OSError as ex:
+ raise Exception(f"Unable to read the contents of {path!r}.") from ex
pattern = u''
if params['after'] and params['before']:
@@ -286,8 +284,7 @@ def main():
try:
result = re.subn(mre, params['replace'], section, 0)
except re.error as e:
- module.fail_json(msg="Unable to process replace due to error: %s" % to_text(e),
- exception=format_exc())
+ module.fail_json(msg="Unable to process replace due to error: %s" % to_text(e))
if result[1] > 0 and section != result[0]:
if pattern:
diff --git a/lib/ansible/modules/service.py b/lib/ansible/modules/service.py
index a94b8445bde..438aeb0e1a4 100644
--- a/lib/ansible/modules/service.py
+++ b/lib/ansible/modules/service.py
@@ -180,7 +180,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.sys_info import get_platform_subclass
from ansible.module_utils.service import fail_if_missing, is_systemd_managed
-from ansible.module_utils.six import PY2, b
+from ansible.module_utils.six import b
class Service(object):
@@ -285,14 +285,8 @@ class Service(object):
os._exit(0)
# Start the command
- if PY2:
- # Python 2.6's shlex.split can't handle text strings correctly
- cmd = to_bytes(cmd, errors='surrogate_or_strict')
- cmd = shlex.split(cmd)
- else:
- # Python3.x shex.split text strings.
- cmd = to_text(cmd, errors='surrogate_or_strict')
- cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(cmd)]
+ cmd = to_text(cmd, errors='surrogate_or_strict')
+ cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(cmd)]
# In either of the above cases, pass a list of byte strings to Popen
# chkconfig localizes messages and we're screen scraping so make
diff --git a/lib/ansible/modules/service_facts.py b/lib/ansible/modules/service_facts.py
index fa0e5f22252..4b67b61e42c 100644
--- a/lib/ansible/modules/service_facts.py
+++ b/lib/ansible/modules/service_facts.py
@@ -211,8 +211,8 @@ class ServiceScanService(BaseService):
def _list_openrc(self, services):
all_services_runlevels = {}
- rc, stdout, stderr = self.module.run_command("%s -a -s -m 2>&1 | grep '^ ' | tr -d '[]'" % self.rc_status_path, use_unsafe_shell=True)
- rc_u, stdout_u, stderr_u = self.module.run_command("%s show -v 2>&1 | grep '|'" % self.rc_update_path, use_unsafe_shell=True)
+ dummy, stdout, dummy = self.module.run_command("%s -a -s -m 2>&1 | grep '^ ' | tr -d '[]'" % self.rc_status_path, use_unsafe_shell=True)
+ dummy, stdout_u, dummy = self.module.run_command("%s show -v 2>&1 | grep '|'" % self.rc_update_path, use_unsafe_shell=True)
for line in stdout_u.split('\n'):
line_data = line.split('|')
if len(line_data) < 2:
@@ -228,6 +228,9 @@ class ServiceScanService(BaseService):
if len(line_data) < 2:
continue
service_name = line_data[0]
+ # Skip lines which are not service names
+ if service_name == "*":
+ continue
service_state = line_data[1]
service_runlevels = all_services_runlevels[service_name]
service_data = {"name": service_name, "runlevels": service_runlevels, "state": service_state, "source": "openrc"}
diff --git a/lib/ansible/modules/set_fact.py b/lib/ansible/modules/set_fact.py
index ef4989c44fa..29fef156886 100644
--- a/lib/ansible/modules/set_fact.py
+++ b/lib/ansible/modules/set_fact.py
@@ -66,7 +66,7 @@ notes:
- Because of the nature of tasks, set_fact will produce 'static' values for a variable.
Unlike normal 'lazy' variables, the value gets evaluated and templated on assignment.
- Some boolean values (yes, no, true, false) will always be converted to boolean type,
- unless C(DEFAULT_JINJA2_NATIVE) is enabled. This is done so the C(var=value) booleans,
+ This is done so the C(var=value) booleans,
otherwise it would only be able to create strings, but it also prevents using those values to create YAML strings.
Using the setting will restrict k=v to strings, but will allow you to specify string or boolean in YAML.
- "To create lists/arrays or dictionary/hashes use YAML notation C(var: [val1, val2])."
diff --git a/lib/ansible/modules/slurp.py b/lib/ansible/modules/slurp.py
index 83ba4ad31f5..e7615e616e5 100644
--- a/lib/ansible/modules/slurp.py
+++ b/lib/ansible/modules/slurp.py
@@ -85,7 +85,6 @@ import base64
import errno
from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
def main():
@@ -99,20 +98,18 @@ def main():
try:
with open(source, 'rb') as source_fh:
- source_content = source_fh.read()
- except (IOError, OSError) as e:
- if e.errno == errno.ENOENT:
- msg = "file not found: %s" % source
- elif e.errno == errno.EACCES:
- msg = "file is not readable: %s" % source
- elif e.errno == errno.EISDIR:
- msg = "source is a directory and must be a file: %s" % source
+ data = base64.b64encode(source_fh.read())
+ except OSError as ex:
+ if ex.errno == errno.ENOENT:
+ msg = f"File not found: {source}"
+ elif ex.errno == errno.EACCES:
+ msg = f"File is not readable: {source}"
+ elif ex.errno == errno.EISDIR:
+ msg = f"Source is a directory and must be a file: {source}"
else:
- msg = "unable to slurp file: %s" % to_native(e, errors='surrogate_then_replace')
+ msg = "Unable to slurp file: {source}"
- module.fail_json(msg)
-
- data = base64.b64encode(source_content)
+ module.fail_json(msg, exception=ex)
module.exit_json(content=data, source=source, encoding='base64')
diff --git a/lib/ansible/modules/stat.py b/lib/ansible/modules/stat.py
index 81707af7a86..a1eb14e4030 100644
--- a/lib/ansible/modules/stat.py
+++ b/lib/ansible/modules/stat.py
@@ -44,6 +44,14 @@ options:
version_added: "2.3"
get_checksum:
version_added: "1.8"
+ get_selinux_context:
+ description:
+ - Get file SELinux context in a list V([user, role, type, range]),
+ and will get V([None, None, None, None]) if it is not possible to retrieve the context,
+ either because it does not exist or some other issue.
+ type: bool
+ default: no
+ version_added: '2.20'
extends_documentation_fragment:
- action_common_attributes
- checksum_common
@@ -346,6 +354,12 @@ stat:
type: list
sample: [ immutable, extent ]
version_added: 2.3
+ selinux_context:
+ description: The SELinux context of a path
+ returned: success, path exists and user can execute the path
+ type: list
+ sample: [ user, role, type, range ]
+ version_added: '2.20'
version:
description: The version/generation attribute of a file according to the filesystem
returned: success, path exists, user can execute the path, lsattr is available and filesystem supports
@@ -354,7 +368,6 @@ stat:
version_added: 2.3
"""
-import errno
import grp
import os
import pwd
@@ -409,7 +422,7 @@ def format_output(module, path, st):
('st_blksize', 'block_size'),
('st_rdev', 'device_type'),
('st_flags', 'flags'),
- # Some Berkley based
+ # Some Berkeley based
('st_gen', 'generation'),
('st_birthtime', 'birthtime'),
# RISCOS
@@ -435,6 +448,7 @@ def main():
get_checksum=dict(type='bool', default=True),
get_mime=dict(type='bool', default=True, aliases=['mime', 'mime_type', 'mime-type']),
get_attributes=dict(type='bool', default=True, aliases=['attr', 'attributes']),
+ get_selinux_context=dict(type='bool', default=False),
checksum_algorithm=dict(type='str', default='sha1',
choices=['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
aliases=['checksum', 'checksum_algo']),
@@ -449,6 +463,7 @@ def main():
get_attr = module.params.get('get_attributes')
get_checksum = module.params.get('get_checksum')
checksum_algorithm = module.params.get('checksum_algorithm')
+ get_selinux_context = module.params.get('get_selinux_context')
# main stat data
try:
@@ -456,12 +471,11 @@ def main():
st = os.stat(b_path)
else:
st = os.lstat(b_path)
- except OSError as e:
- if e.errno == errno.ENOENT:
- output = {'exists': False}
- module.exit_json(changed=False, stat=output)
-
- module.fail_json(msg=e.strerror)
+ except FileNotFoundError:
+ output = {'exists': False}
+ module.exit_json(changed=False, stat=output)
+ except OSError as ex:
+ module.fail_json(msg=ex.strerror, exception=ex)
# process base results
output = format_output(module, path, st)
@@ -517,6 +531,10 @@ def main():
if x in out:
output[x] = out[x]
+ # try to get SELinux context
+ if get_selinux_context:
+ output['selinux_context'] = module.selinux_context(b_path)
+
module.exit_json(changed=False, stat=output)
diff --git a/lib/ansible/modules/systemd_service.py b/lib/ansible/modules/systemd_service.py
index 4e58ba2e4d0..3aedb68825a 100644
--- a/lib/ansible/modules/systemd_service.py
+++ b/lib/ansible/modules/systemd_service.py
@@ -34,7 +34,7 @@ options:
choices: [ reloaded, restarted, started, stopped ]
enabled:
description:
- - Whether the unit should start on boot. At least one of O(state) and O(enabled) are required.
+ - Whether the unit should start on boot. At least one of O(state) or O(enabled) are required.
- If set, requires O(name).
type: bool
force:
diff --git a/lib/ansible/modules/sysvinit.py b/lib/ansible/modules/sysvinit.py
index 22621975d03..94d760ad26b 100644
--- a/lib/ansible/modules/sysvinit.py
+++ b/lib/ansible/modules/sysvinit.py
@@ -88,9 +88,9 @@ EXAMPLES = """
- name: Sleep for 5 seconds between stop and start command of badly behaving service
ansible.builtin.sysvinit:
- name: apache2
- state: restarted
- sleep: 5
+ name: apache2
+ state: restarted
+ sleep: 5
- name: Make sure apache2 is started on runlevels 3 and 5
ansible.builtin.sysvinit:
diff --git a/lib/ansible/modules/tempfile.py b/lib/ansible/modules/tempfile.py
index a9a8d644300..a7163b02ebf 100644
--- a/lib/ansible/modules/tempfile.py
+++ b/lib/ansible/modules/tempfile.py
@@ -90,7 +90,6 @@ path:
from os import close
from tempfile import mkstemp, mkdtemp
-from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
@@ -123,7 +122,7 @@ def main():
module.exit_json(changed=True, path=path)
except Exception as e:
- module.fail_json(msg=to_native(e), exception=format_exc())
+ module.fail_json(msg=to_native(e))
if __name__ == '__main__':
diff --git a/lib/ansible/modules/unarchive.py b/lib/ansible/modules/unarchive.py
index 0b192ab569e..18f90f727e0 100644
--- a/lib/ansible/modules/unarchive.py
+++ b/lib/ansible/modules/unarchive.py
@@ -241,6 +241,7 @@ uid:
import binascii
import codecs
+import ctypes
import fnmatch
import grp
import os
@@ -249,7 +250,6 @@ import pwd
import re
import stat
import time
-import traceback
from functools import partial
from zipfile import ZipFile
@@ -262,6 +262,13 @@ from ansible.module_utils.urls import fetch_file
from shlex import quote
from zipfile import BadZipFile
+try:
+ from functools import cache
+except ImportError:
+ # Python < 3.9
+ from functools import lru_cache
+ cache = lru_cache(maxsize=None)
+
# String from tar that shows the tar contents are different from the
# filesystem
OWNER_DIFF_RE = re.compile(r': Uid differs$')
@@ -279,6 +286,18 @@ CONTENT_DIFF_RE = re.compile(r': Contents differ$')
SIZE_DIFF_RE = re.compile(r': Size differs$')
+@cache
+def _y2038_impacted():
+ """Determine if the system has 64-bit time_t."""
+ if hasattr(ctypes, "c_time_t"): # Python >= 3.12
+ return ctypes.sizeof(ctypes.c_time_t) < 8
+ try:
+ time.gmtime(2**31)
+ except OverflowError:
+ return True
+ return False
+
+
def crc32(path, buffer_size):
""" Return a CRC32 checksum of a file """
@@ -414,6 +433,8 @@ class ZipArchive(object):
try:
if int(match.groups()[0]) < 1980:
date_time = epoch_date_time
+ elif int(match.groups()[0]) >= 2038 and _y2038_impacted():
+ date_time = (2038, 1, 1, 0, 0, 0, 0, 0, 0)
elif int(match.groups()[0]) > 2107:
date_time = (2107, 12, 31, 23, 59, 59, 0, 0, 0)
else:
@@ -676,7 +697,7 @@ class ZipArchive(object):
try:
mode = AnsibleModule._symbolic_mode_to_octal(st, self.file_args['mode'])
except ValueError as e:
- self.module.fail_json(path=path, msg="%s" % to_native(e), exception=traceback.format_exc())
+ self.module.fail_json(path=path, msg="%s" % to_native(e))
# Only special files require no umask-handling
elif ztype == '?':
mode = self._permstr_to_octal(permstr, 0)
@@ -1111,8 +1132,8 @@ def main():
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
- except IOError:
- module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
+ except OSError as ex:
+ module.fail_json(f"Failed to unpack {src!r} to {dest!r}.", exception=ex, **res_args)
else:
res_args['changed'] = True
@@ -1129,8 +1150,8 @@ def main():
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
- except (IOError, OSError) as e:
- module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
+ except OSError as ex:
+ module.fail_json("Unexpected error when accessing exploded file.", exception=ex, **res_args)
if '/' in filename:
top_folder_path = filename.split('/')[0]
@@ -1144,8 +1165,8 @@ def main():
file_args['path'] = "%s/%s" % (dest, f)
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
- except (IOError, OSError) as e:
- module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
+ except OSError as ex:
+ module.fail_json("Unexpected error when accessing exploded file.", exception=ex, **res_args)
if module.params['list_files']:
res_args['files'] = handler.files_in_archive
diff --git a/lib/ansible/modules/uri.py b/lib/ansible/modules/uri.py
index b19628b9aa2..e19450b358d 100644
--- a/lib/ansible/modules/uri.py
+++ b/lib/ansible/modules/uri.py
@@ -61,6 +61,7 @@ options:
or list of tuples into an C(application/x-www-form-urlencoded) string. (Added in v2.7)
- If O(body_format) is set to V(form-multipart) it will convert a dictionary
into C(multipart/form-multipart) body. (Added in v2.10)
+ - If C(body_format) is set to V(form-multipart) the option 'multipart_encoding' allows to change multipart file encoding. (Added in v2.19)
type: raw
body_format:
description:
@@ -105,18 +106,6 @@ options:
- The webservice bans or rate-limits clients that cause any HTTP 401 errors.
type: bool
default: no
- follow_redirects:
- description:
- - Whether or not the URI module should follow redirects.
- type: str
- default: safe
- choices:
- all: Will follow all redirects.
- none: Will not follow any redirects.
- safe: Only redirects doing GET or HEAD requests will be followed.
- urllib2: Defer to urllib2 behavior (As of writing this follows HTTP redirects).
- 'no': (DEPRECATED, removed in 2.22) alias of V(none).
- 'yes': (DEPRECATED, removed in 2.22) alias of V(all).
creates:
description:
- A filename, when it already exists, this step will not be run.
@@ -234,6 +223,7 @@ options:
extends_documentation_fragment:
- action_common_attributes
- files
+ - url.url_redirect
attributes:
check_mode:
support: none
@@ -262,7 +252,7 @@ EXAMPLES = r"""
url: http://www.example.com
return_content: true
register: this
- failed_when: this is failed or "'AWESOME' not in this.content"
+ failed_when: "this is failed or 'AWESOME' not in this.content"
- name: Create a JIRA issue
ansible.builtin.uri:
@@ -308,10 +298,12 @@ EXAMPLES = r"""
file1:
filename: /bin/true
mime_type: application/octet-stream
+ multipart_encoding: base64
file2:
content: text based file content
filename: fake.txt
mime_type: text/plain
+ multipart_encoding: 7or8bit
text_form_field: value
- name: Connect to website using a previously stored cookie
@@ -440,19 +432,27 @@ url:
sample: https://www.ansible.com/
"""
+import http
import json
import os
import re
import shutil
import tempfile
+from datetime import datetime, timezone
from ansible.module_utils.basic import AnsibleModule, sanitize_keys
from ansible.module_utils.six import binary_type, iteritems, string_types
-from ansible.module_utils.six.moves.urllib.parse import urlencode, urlsplit
+from ansible.module_utils.six.moves.urllib.parse import urlencode, urljoin
from ansible.module_utils.common.text.converters import to_native, to_text
-from ansible.module_utils.compat.datetime import utcnow, utcfromtimestamp
from ansible.module_utils.six.moves.collections_abc import Mapping, Sequence
-from ansible.module_utils.urls import fetch_url, get_response_filename, parse_content_type, prepare_multipart, url_argument_spec
+from ansible.module_utils.urls import (
+ fetch_url,
+ get_response_filename,
+ parse_content_type,
+ prepare_multipart,
+ url_argument_spec,
+ url_redirect_argument_spec,
+)
JSON_CANDIDATES = {'json', 'javascript'}
@@ -505,27 +505,6 @@ def write_file(module, dest, content, resp):
os.remove(tmpsrc)
-def absolute_location(url, location):
- """Attempts to create an absolute URL based on initial URL, and
- next URL, specifically in the case of a ``Location`` header.
- """
-
- if '://' in location:
- return location
-
- elif location.startswith('/'):
- parts = urlsplit(url)
- base = url.replace(parts[2], '')
- return '%s%s' % (base, location)
-
- elif not location.startswith('/'):
- base = os.path.dirname(url)
- return '%s/%s' % (base, location)
-
- else:
- return location
-
-
def kv_list(data):
""" Convert data into a list of key-value tuples """
if data is None:
@@ -579,7 +558,10 @@ def uri(module, url, dest, body, body_format, method, headers, socket_timeout, c
kwargs = {}
if dest is not None and os.path.isfile(dest):
# if destination file already exist, only download if file newer
- kwargs['last_mod_time'] = utcfromtimestamp(os.path.getmtime(dest))
+ kwargs['last_mod_time'] = datetime.fromtimestamp(
+ os.path.getmtime(dest),
+ tz=timezone.utc,
+ )
if module.params.get('follow_redirects') in ('no', 'yes'):
module.deprecate(
@@ -605,6 +587,8 @@ def uri(module, url, dest, body, body_format, method, headers, socket_timeout, c
def main():
argument_spec = url_argument_spec()
+ argument_spec['url']['required'] = True
+ argument_spec.update(url_redirect_argument_spec())
argument_spec.update(
dest=dict(type='path'),
url_username=dict(type='str', aliases=['user']),
@@ -691,12 +675,12 @@ def main():
module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)
# Make the request
- start = utcnow()
+ start = datetime.now(timezone.utc)
r, info = uri(module, url, dest, body, body_format, method,
dict_headers, socket_timeout, ca_path, unredirected_headers,
decompress, ciphers, use_netrc)
- elapsed = (utcnow() - start).seconds
+ elapsed = (datetime.now(timezone.utc) - start).seconds
if r and dest is not None and os.path.isdir(dest):
filename = get_response_filename(r) or 'index.html'
@@ -732,6 +716,8 @@ def main():
# there was no content, but the error read()
# may have been stored in the info as 'body'
content = info.pop('body', b'')
+ except http.client.HTTPException as http_err:
+ module.fail_json(msg=f"HTTP Error while fetching {url}: {to_native(http_err)}")
elif r:
content = r
else:
@@ -766,7 +752,7 @@ def main():
uresp[ukey] = value
if 'location' in uresp:
- uresp['location'] = absolute_location(url, uresp['location'])
+ uresp['location'] = urljoin(url, uresp['location'])
# Default content_encoding to try
if isinstance(content, binary_type):
diff --git a/lib/ansible/modules/user.py b/lib/ansible/modules/user.py
index aa3bbcf68fb..28eef5c31c0 100644
--- a/lib/ansible/modules/user.py
+++ b/lib/ansible/modules/user.py
@@ -490,6 +490,7 @@ uid:
import ctypes.util
+from datetime import datetime
import grp
import calendar
import os
@@ -502,13 +503,13 @@ import socket
import subprocess
import time
import math
+import typing as t
from ansible.module_utils import distro
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.sys_info import get_platform_subclass
-import ansible.module_utils.compat.typing as t
class StructSpwdType(ctypes.Structure):
@@ -1279,11 +1280,16 @@ class User(object):
env=env)
out_buffer = b''
err_buffer = b''
+ first_prompt = b'Enter passphrase'
+ second_prompt = b'Enter same passphrase again'
+ prompt = first_prompt
+ start = datetime.now()
+ timeout = 900
while p.poll() is None:
r_list = select.select([master_out_fd, master_err_fd], [], [], 1)[0]
- first_prompt = b'Enter passphrase (empty for no passphrase):'
- second_prompt = b'Enter same passphrase again'
- prompt = first_prompt
+ now = datetime.now()
+ if (now - start).seconds > timeout:
+ return (1, '', f'Timeout after {timeout} while reading passphrase for SSH key')
for fd in r_list:
if fd == master_out_fd:
chunk = os.read(master_out_fd, 10240)
@@ -1335,7 +1341,7 @@ class User(object):
try:
with open(ssh_public_key_file, 'r') as f:
ssh_public_key = f.read().strip()
- except IOError:
+ except OSError:
return None
return ssh_public_key
@@ -1370,16 +1376,24 @@ class User(object):
self.module.exit_json(failed=True, msg="%s" % to_native(e))
# get umask from /etc/login.defs and set correct home mode
if os.path.exists(self.LOGIN_DEFS):
- with open(self.LOGIN_DEFS, 'r') as f:
- for line in f:
- m = re.match(r'^UMASK\s+(\d+)$', line)
- if m:
- umask = int(m.group(1), 8)
+ # fallback if neither HOME_MODE nor UMASK are set;
+ # follow behaviour of useradd initializing UMASK = 022
+ mode = 0o755
+ with open(self.LOGIN_DEFS, 'r') as fh:
+ for line in fh:
+ # HOME_MODE has higher precedence as UMASK
+ match = re.match(r'^HOME_MODE\s+(\d+)$', line)
+ if match:
+ mode = int(match.group(1), 8)
+ break # higher precedence
+ match = re.match(r'^UMASK\s+(\d+)$', line)
+ if match:
+ umask = int(match.group(1), 8)
mode = 0o777 & ~umask
- try:
- os.chmod(path, mode)
- except OSError as e:
- self.module.exit_json(failed=True, msg="%s" % to_native(e))
+ try:
+ os.chmod(path, mode)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg=to_native(e))
def chown_homedir(self, uid, gid, path):
try:
@@ -3243,6 +3257,11 @@ class Alpine(BusyBox):
distribution = 'Alpine'
+class Buildroot(BusyBox):
+ platform = 'Linux'
+ distribution = 'Buildroot'
+
+
def main():
ssh_defaults = dict(
bits=0,
diff --git a/lib/ansible/modules/validate_argument_spec.py b/lib/ansible/modules/validate_argument_spec.py
index 8c75e8abb42..aa9fc008021 100644
--- a/lib/ansible/modules/validate_argument_spec.py
+++ b/lib/ansible/modules/validate_argument_spec.py
@@ -16,7 +16,8 @@ version_added: "2.11"
options:
argument_spec:
description:
- - A dictionary like AnsibleModule argument_spec. See R(argument spec definition,argument_spec).
+ - A dictionary like AnsibleModule argument_spec.
+ - See the C(options) parameter for the R(specification format,role_argument_spec).
required: true
provided_arguments:
description:
@@ -46,6 +47,8 @@ attributes:
support: none
platform:
platforms: all
+notes:
+ - It is unnecessary to call this module explicitly if the role contains an R(argument spec,role_argument_spec).
"""
EXAMPLES = r"""
diff --git a/lib/ansible/modules/wait_for.py b/lib/ansible/modules/wait_for.py
index 3b64142379e..a076bb14b65 100644
--- a/lib/ansible/modules/wait_for.py
+++ b/lib/ansible/modules/wait_for.py
@@ -76,6 +76,8 @@ options:
description:
- Can be used to match a string in either a file or a socket connection.
- Defaults to a multiline regex.
+ - When inspecting a system log file and a static string, remember that Ansible by default logs its own actions there;
+ see the notes and examples for information.
type: str
version_added: "1.4"
exclude_hosts:
@@ -105,13 +107,13 @@ attributes:
platform:
platforms: posix
notes:
- - The ability to use search_regex with a port connection was added in Ansible 1.7.
- - Prior to Ansible 2.4, testing for the absence of a directory or UNIX socket did not work correctly.
- - Prior to Ansible 2.4, testing for the presence of a file did not work correctly if the remote user did not have read access to that file.
- Under some circumstances when using mandatory access control, a path may always be treated as being absent even if it exists, but
can't be modified or created by the remote user either.
- When waiting for a path, symbolic links will be followed. Many other modules that manipulate files do not follow symbolic links,
so operations on the path using other modules may not work exactly as expected.
+ - When searching a static string within a system log file, it is important to account for potential self-matching against log entries
+ generated by the Ansible modules. To prevent this, add a regular expression construct into the search string. For example, to match
+ a literal string 'this thing', one could use a regular expression like 'this t[h]ing'.
seealso:
- module: ansible.builtin.wait_for_connection
- module: ansible.windows.win_wait_for
@@ -156,6 +158,11 @@ EXAMPLES = r"""
path: /tmp/foo
search_regex: completed
+- name: Wait until the string "tomcat up" is in syslog, use regex character set to avoid self match
+ ansible.builtin.wait_for:
+ path: /var/log/syslog
+ search_regex: 'tomcat [u]p'
+
- name: Wait until regex pattern matches in the file /tmp/foo and print the matched group
ansible.builtin.wait_for:
path: /tmp/foo
@@ -187,17 +194,19 @@ EXAMPLES = r"""
host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
search_regex: OpenSSH
delay: 10
- connection: local
+ timeout: 300
+ delegate_to: localhost
-# Same as above but you normally have ansible_connection set in inventory, which overrides 'connection'
+# Same as above but using config lookup for the target,
+# most plugins use 'remote_addr', but ssh uses 'host'
- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
ansible.builtin.wait_for:
port: 22
- host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
+ host: "{{ lookup('config', 'host', plugin_name='ssh', plugin_type='connection') }}"
search_regex: OpenSSH
delay: 10
- vars:
- ansible_connection: local
+ timeout: 300
+ delegate_to: localhost
"""
RETURN = r"""
@@ -224,7 +233,6 @@ match_groupdict:
import binascii
import contextlib
-import datetime
import errno
import math
import mmap
@@ -233,12 +241,12 @@ import re
import select
import socket
import time
-import traceback
+
+from datetime import datetime, timedelta, timezone
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.sys_info import get_platform_subclass
from ansible.module_utils.common.text.converters import to_bytes, to_native
-from ansible.module_utils.compat.datetime import utcnow
HAS_PSUTIL = False
@@ -247,8 +255,8 @@ try:
import psutil
HAS_PSUTIL = True
# just because we can import it on Linux doesn't mean we will use it
-except ImportError:
- PSUTIL_IMP_ERR = traceback.format_exc()
+except ImportError as ex:
+ PSUTIL_IMP_ERR = ex
class TCPConnectionInfo(object):
@@ -379,31 +387,29 @@ class LinuxTCPConnectionInfo(TCPConnectionInfo):
if not os.path.isfile(self.source_file[family]):
continue
try:
- f = open(self.source_file[family])
- for tcp_connection in f.readlines():
- tcp_connection = tcp_connection.strip().split()
- if tcp_connection[self.local_address_field] == 'local_address':
- continue
- if (tcp_connection[self.connection_state_field] not in
- [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
- continue
- (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
- if self.port != local_port:
- continue
- (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
- if (family, remote_ip) in self.exclude_ips:
- continue
- if any((
- (family, local_ip) in self.ips,
- (family, self.match_all_ips[family]) in self.ips,
- local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
- (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
- )):
- active_connections += 1
- except IOError as e:
+ with open(self.source_file[family]) as f:
+ for tcp_connection in f.readlines():
+ tcp_connection = tcp_connection.strip().split()
+ if tcp_connection[self.local_address_field] == 'local_address':
+ continue
+ if (tcp_connection[self.connection_state_field] not in
+ [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
+ continue
+ (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
+ if self.port != local_port:
+ continue
+ (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
+ if (family, remote_ip) in self.exclude_ips:
+ continue
+ if any((
+ (family, local_ip) in self.ips,
+ (family, self.match_all_ips[family]) in self.ips,
+ local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
+ (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
+ )):
+ active_connections += 1
+ except OSError:
pass
- finally:
- f.close()
return active_connections
@@ -532,7 +538,7 @@ def main():
except Exception:
module.fail_json(msg="unknown active_connection_state (%s) defined" % _connection_state, elapsed=0)
- start = utcnow()
+ start = datetime.now(timezone.utc)
if delay:
time.sleep(delay)
@@ -541,14 +547,14 @@ def main():
time.sleep(timeout)
elif state in ['absent', 'stopped']:
# first wait for the stop condition
- end = start + datetime.timedelta(seconds=timeout)
+ end = start + timedelta(seconds=timeout)
- while utcnow() < end:
+ while datetime.now(timezone.utc) < end:
if path:
try:
if not os.access(b_path, os.F_OK):
break
- except IOError:
+ except OSError:
break
elif port:
try:
@@ -560,7 +566,7 @@ def main():
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else:
- elapsed = utcnow() - start
+ elapsed = datetime.now(timezone.utc) - start
if port:
module.fail_json(msg=msg or "Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
elif path:
@@ -568,15 +574,15 @@ def main():
elif state in ['started', 'present']:
# wait for start condition
- end = start + datetime.timedelta(seconds=timeout)
- while utcnow() < end:
+ end = start + timedelta(seconds=timeout)
+ while datetime.now(timezone.utc) < end:
if path:
try:
os.stat(b_path)
except OSError as e:
# If anything except file not present, throw an error
if e.errno != 2:
- elapsed = utcnow() - start
+ elapsed = datetime.now(timezone.utc) - start
module.fail_json(msg=msg or "Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
@@ -608,12 +614,14 @@ def main():
break
except Exception as e:
module.warn('wait_for failed on "%s", unexpected exception(%s): %s.).' % (path, to_native(e.__class__), to_native(e)))
- except IOError:
+ except OSError:
pass
elif port:
- alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - utcnow()))
+ alt_connect_timeout = math.ceil(
+ _timedelta_total_seconds(end - datetime.now(timezone.utc)),
+ )
try:
- s = socket.create_connection((host, port), min(connect_timeout, alt_connect_timeout))
+ s = socket.create_connection((host, int(port)), min(connect_timeout, alt_connect_timeout))
except Exception:
# Failed to connect by connect_timeout. wait and try again
pass
@@ -622,8 +630,12 @@ def main():
if b_compiled_search_re:
b_data = b''
matched = False
- while utcnow() < end:
- max_timeout = math.ceil(_timedelta_total_seconds(end - utcnow()))
+ while datetime.now(timezone.utc) < end:
+ max_timeout = math.ceil(
+ _timedelta_total_seconds(
+ end - datetime.now(timezone.utc),
+ ),
+ )
readable = select.select([s], [], [], max_timeout)[0]
if not readable:
# No new data. Probably means our timeout
@@ -641,8 +653,8 @@ def main():
# Shutdown the client socket
try:
s.shutdown(socket.SHUT_RDWR)
- except socket.error as e:
- if e.errno != errno.ENOTCONN:
+ except OSError as ex:
+ if ex.errno != errno.ENOTCONN:
raise
# else, the server broke the connection on its end, assume it's not ready
else:
@@ -654,8 +666,8 @@ def main():
# Connection established, success!
try:
s.shutdown(socket.SHUT_RDWR)
- except socket.error as e:
- if e.errno != errno.ENOTCONN:
+ except OSError as ex:
+ if ex.errno != errno.ENOTCONN:
raise
# else, the server broke the connection on its end, assume it's not ready
else:
@@ -667,7 +679,7 @@ def main():
else: # while-else
# Timeout expired
- elapsed = utcnow() - start
+ elapsed = datetime.now(timezone.utc) - start
if port:
if search_regex:
module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
@@ -681,19 +693,19 @@ def main():
elif state == 'drained':
# wait until all active connections are gone
- end = start + datetime.timedelta(seconds=timeout)
+ end = start + timedelta(seconds=timeout)
tcpconns = TCPConnectionInfo(module)
- while utcnow() < end:
+ while datetime.now(timezone.utc) < end:
if tcpconns.get_active_connections_count() == 0:
break
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else:
- elapsed = utcnow() - start
+ elapsed = datetime.now(timezone.utc) - start
module.fail_json(msg=msg or "Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
- elapsed = utcnow() - start
+ elapsed = datetime.now(timezone.utc) - start
module.exit_json(state=state, port=port, search_regex=search_regex, match_groups=match_groups, match_groupdict=match_groupdict, path=path,
elapsed=elapsed.seconds)
diff --git a/lib/ansible/modules/yum_repository.py b/lib/ansible/modules/yum_repository.py
index c63932f1e55..013e85d2d8e 100644
--- a/lib/ansible/modules/yum_repository.py
+++ b/lib/ansible/modules/yum_repository.py
@@ -183,14 +183,6 @@ options:
- This parameter is deprecated as it has no effect with dnf as an underlying package manager
and will be removed in ansible-core 2.22.
type: bool
- keepcache:
- description:
- - Either V(1) or V(0). Determines whether or not yum keeps the cache of
- headers and packages after successful installation.
- - This parameter is deprecated as it is only valid in the main configuration
- and will be removed in ansible-core 2.20.
- choices: ['0', '1']
- type: str
metadata_expire:
description:
- Time (in seconds) after which the metadata will expire.
@@ -466,13 +458,7 @@ class YumRepo:
for key, value in sorted(self.params.items()):
if value is None:
continue
- if key == 'keepcache':
- self.module.deprecate(
- "'keepcache' parameter is deprecated as it is only valid in "
- "the main configuration.",
- version='2.20'
- )
- elif key == 'async':
+ if key == 'async':
self.module.deprecate(
"'async' parameter is deprecated as it has been removed on systems supported by ansible-core",
version='2.22',
@@ -502,10 +488,11 @@ class YumRepo:
try:
with open(self.dest, 'w') as fd:
self.repofile.write(fd)
- except IOError as e:
+ except OSError as ex:
self.module.fail_json(
- msg=f"Problems handling file {self.dest}.",
- details=to_native(e),
+ msg=f"Problems handling file {self.dest!r}.",
+ details=str(ex),
+ exception=ex,
)
else:
try:
@@ -556,7 +543,6 @@ def main():
includepkgs=dict(type='list', elements='str'),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
- keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
diff --git a/lib/ansible/parsing/ajson.py b/lib/ansible/parsing/ajson.py
index ff29240afc1..b8440b26c7f 100644
--- a/lib/ansible/parsing/ajson.py
+++ b/lib/ansible/parsing/ajson.py
@@ -1,40 +1,20 @@
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import annotations
-
-import json
-
-# Imported for backwards compat
-from ansible.module_utils.common.json import AnsibleJSONEncoder # pylint: disable=unused-import
-
-from ansible.parsing.vault import VaultLib
-from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
-from ansible.utils.unsafe_proxy import wrap_var
-
-
-class AnsibleJSONDecoder(json.JSONDecoder):
-
- _vaults = {} # type: dict[str, VaultLib]
-
- def __init__(self, *args, **kwargs):
- kwargs['object_hook'] = self.object_hook
- super(AnsibleJSONDecoder, self).__init__(*args, **kwargs)
-
- @classmethod
- def set_secrets(cls, secrets):
- cls._vaults['default'] = VaultLib(secrets=secrets)
-
- def object_hook(self, pairs):
- for key in pairs:
- value = pairs[key]
-
- if key == '__ansible_vault':
- value = AnsibleVaultEncryptedUnicode(value)
- if self._vaults:
- value.vault = self._vaults['default']
- return value
- elif key == '__ansible_unsafe':
- return wrap_var(value)
-
- return pairs
+from __future__ import annotations as _annotations
+
+# from ansible.utils.display import Display as _Display
+#
+#
+# deprecated: description='deprecate ajson' core_version='2.23'
+# _Display().deprecated(
+# msg='The `ansible.parsing.ajson` module is deprecated.',
+# version='2.27',
+# help_text="", # DTFIX-FUTURE: complete this help text
+# )
+
+# Imported for backward compat
+from ansible.module_utils.common.json import ( # pylint: disable=unused-import
+ _AnsibleJSONEncoder as AnsibleJSONEncoder,
+ _AnsibleJSONDecoder as AnsibleJSONDecoder,
+)
diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py
index 47b6cfb12ca..58dd2c7e414 100644
--- a/lib/ansible/parsing/dataloader.py
+++ b/lib/ansible/parsing/dataloader.py
@@ -2,23 +2,28 @@
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import annotations
from __future__ import annotations
import copy
import os
import os.path
+import pathlib
import re
import tempfile
import typing as t
from ansible import constants as C
from ansible.errors import AnsibleFileNotFound, AnsibleParserError
+from ansible._internal._errors import _error_utils
from ansible.module_utils.basic import is_executable
+from ansible._internal._datatag._tags import Origin, TrustedAsTemplate, SourceWasEncrypted
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.parsing.quoting import unquote
from ansible.parsing.utils.yaml import from_yaml
-from ansible.parsing.vault import VaultLib, is_encrypted, is_encrypted_file, parse_vaulttext_envelope, PromptVaultSecret
+from ansible.parsing.vault import VaultLib, is_encrypted, is_encrypted_file, PromptVaultSecret
from ansible.utils.path import unfrackpath
from ansible.utils.display import Display
@@ -73,11 +78,18 @@ class DataLoader:
def set_vault_secrets(self, vault_secrets: list[tuple[str, PromptVaultSecret]] | None) -> None:
self._vault.secrets = vault_secrets
- def load(self, data: str, file_name: str = '', show_content: bool = True, json_only: bool = False) -> t.Any:
+ def load(
+ self,
+ data: str,
+ file_name: str | None = None, # DTFIX-FUTURE: consider deprecating this in favor of tagging Origin on data
+ show_content: bool = True, # DTFIX-FUTURE: consider future deprecation, but would need RedactAnnotatedSourceContext public
+ json_only: bool = False,
+ ) -> t.Any:
"""Backwards compat for now"""
- return from_yaml(data, file_name, show_content, self._vault.secrets, json_only=json_only)
+ with _error_utils.RedactAnnotatedSourceContext.when(not show_content):
+ return from_yaml(data=data, file_name=file_name, json_only=json_only)
- def load_from_file(self, file_name: str, cache: str = 'all', unsafe: bool = False, json_only: bool = False) -> t.Any:
+ def load_from_file(self, file_name: str, cache: str = 'all', unsafe: bool = False, json_only: bool = False, trusted_as_template: bool = False) -> t.Any:
"""
Loads data from a file, which can contain either JSON or YAML.
@@ -98,16 +110,22 @@ class DataLoader:
if cache != 'none' and file_name in self._FILE_CACHE:
parsed_data = self._FILE_CACHE[file_name]
else:
- # Read the file contents and load the data structure from them
- (b_file_data, show_content) = self._get_file_contents(file_name)
+ file_data = self.get_text_file_contents(file_name)
+
+ if trusted_as_template:
+ file_data = TrustedAsTemplate().tag(file_data)
+
+ parsed_data = self.load(data=file_data, file_name=file_name, json_only=json_only)
- file_data = to_text(b_file_data, errors='surrogate_or_strict')
- parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content, json_only=json_only)
+ # only tagging the container, used by include_vars to determine if vars should be shown or not
+ # this is a temporary measure until a proper data senitivity system is in place
+ if SourceWasEncrypted.is_tagged_on(file_data):
+ parsed_data = SourceWasEncrypted().tag(parsed_data)
# Cache the file contents for next time based on the cache option
if cache == 'all':
self._FILE_CACHE[file_name] = parsed_data
- elif cache == 'vaulted' and not show_content:
+ elif cache == 'vaulted' and SourceWasEncrypted.is_tagged_on(file_data):
self._FILE_CACHE[file_name] = parsed_data
# Return the parsed data, optionally deep-copied for safety
@@ -137,18 +155,44 @@ class DataLoader:
path = self.path_dwim(path)
return is_executable(path)
- def _decrypt_if_vault_data(self, b_vault_data: bytes, b_file_name: bytes | None = None) -> tuple[bytes, bool]:
+ def _decrypt_if_vault_data(self, b_data: bytes) -> tuple[bytes, bool]:
"""Decrypt b_vault_data if encrypted and return b_data and the show_content flag"""
- if not is_encrypted(b_vault_data):
- show_content = True
- return b_vault_data, show_content
+ if encrypted_source := is_encrypted(b_data):
+ b_data = self._vault.decrypt(b_data)
- b_ciphertext, b_version, cipher_name, vault_id = parse_vaulttext_envelope(b_vault_data)
- b_data = self._vault.decrypt(b_vault_data, filename=b_file_name)
+ return b_data, not encrypted_source
- show_content = False
- return b_data, show_content
+ def get_text_file_contents(self, file_name: str, encoding: str | None = None) -> str:
+ """
+ Returns an `Origin` tagged string with the content of the specified (DWIM-expanded for relative) file path, decrypting if necessary.
+ Callers must only specify `encoding` when the user can configure it, as error messages in that case will imply configurability.
+ If `encoding` is not specified, UTF-8 will be used.
+ """
+ bytes_content, source_was_plaintext = self._get_file_contents(file_name)
+
+ if encoding is None:
+ encoding = 'utf-8'
+ help_text = 'This file must be UTF-8 encoded.'
+ else:
+ help_text = 'Ensure the correct encoding was specified.'
+
+ try:
+ str_content = bytes_content.decode(encoding=encoding, errors='strict')
+ except UnicodeDecodeError:
+ str_content = bytes_content.decode(encoding=encoding, errors='surrogateescape')
+
+ display.deprecated(
+ msg=f"File {file_name!r} could not be decoded as {encoding!r}. Invalid content has been escaped.",
+ version="2.23",
+ # obj intentionally omitted since there's no value in showing its contents
+ help_text=help_text,
+ )
+
+ if not source_was_plaintext:
+ str_content = SourceWasEncrypted().tag(str_content)
+
+ return AnsibleTagHelper.tag_copy(bytes_content, str_content)
def _get_file_contents(self, file_name: str) -> tuple[bytes, bool]:
"""
@@ -163,21 +207,22 @@ class DataLoader:
:raises AnsibleParserError: if we were unable to read the file
:return: Returns a byte string of the file contents
"""
- if not file_name or not isinstance(file_name, (binary_type, text_type)):
- raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name))
+ if not file_name or not isinstance(file_name, str):
+ raise TypeError(f"Invalid filename {file_name!r}.")
- b_file_name = to_bytes(self.path_dwim(file_name))
- # This is what we really want but have to fix unittests to make it pass
- # if not os.path.exists(b_file_name) or not os.path.isfile(b_file_name):
- if not self.path_exists(b_file_name):
- raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
+ file_name = self.path_dwim(file_name)
try:
- with open(b_file_name, 'rb') as f:
- data = f.read()
- return self._decrypt_if_vault_data(data, b_file_name)
- except (IOError, OSError) as e:
- raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)), orig_exc=e)
+ data = pathlib.Path(file_name).read_bytes()
+ except FileNotFoundError as ex:
+ # DTFIX-FUTURE: why not just let the builtin one fly?
+ raise AnsibleFileNotFound("Unable to retrieve file contents.", file_name=file_name) from ex
+ except OSError as ex:
+ raise AnsibleParserError(f"An error occurred while trying to read the file {file_name!r}.") from ex
+
+ data = Origin(path=file_name).tag(data)
+
+ return self._decrypt_if_vault_data(data)
def get_basedir(self) -> str:
""" returns the current basedir """
@@ -194,8 +239,8 @@ class DataLoader:
make relative paths work like folks expect.
"""
- given = unquote(given)
given = to_text(given, errors='surrogate_or_strict')
+ given = unquote(given)
if given.startswith(to_text(os.path.sep)) or given.startswith(u'~'):
path = given
@@ -392,19 +437,19 @@ class DataLoader:
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
- data = f.read()
+ data = Origin(path=real_path).tag(f.read())
if not self._vault.secrets:
raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % to_native(file_path))
- data = self._vault.decrypt(data, filename=real_path)
+ data = self._vault.decrypt(data)
# Make a temp file
real_path = self._create_content_tempfile(data)
self._tempfiles.add(real_path)
return real_path
- except (IOError, OSError) as e:
- raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)), orig_exc=e)
+ except OSError as ex:
+ raise AnsibleParserError(f"an error occurred while trying to read the file {to_text(real_path)!r}.") from ex
def cleanup_tmp_file(self, file_path: str) -> None:
"""
diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
index aed543d0953..09823d59dd5 100644
--- a/lib/ansible/parsing/mod_args.py
+++ b/lib/ansible/parsing/mod_args.py
@@ -19,16 +19,18 @@ from __future__ import annotations
import ansible.constants as C
from ansible.errors import AnsibleParserError, AnsibleError, AnsibleAssertionError
-from ansible.module_utils.six import string_types
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text
from ansible.parsing.splitter import parse_kv, split_args
+from ansible.parsing.vault import EncryptedString
from ansible.plugins.loader import module_loader, action_loader
-from ansible.template import Templar
+from ansible._internal._templating import _jinja_bits
+from ansible.utils.display import Display
from ansible.utils.fqcn import add_internal_fqcns
-# modules formated for user msg
+# modules formatted for user msg
_BUILTIN_RAW_PARAM_MODULES_SIMPLE = set([
'include_vars',
'include_tasks',
@@ -129,9 +131,7 @@ class ModuleArgsParser:
self._task_attrs.update(['local_action', 'static'])
self._task_attrs = frozenset(self._task_attrs)
- self.resolved_action = None
-
- def _split_module_string(self, module_string):
+ def _split_module_string(self, module_string: str) -> tuple[str, str]:
"""
when module names are expressed like:
action: copy src=a dest=b
@@ -141,47 +141,54 @@ class ModuleArgsParser:
tokens = split_args(module_string)
if len(tokens) > 1:
- return (tokens[0].strip(), " ".join(tokens[1:]))
+ result = (tokens[0].strip(), " ".join(tokens[1:]))
else:
- return (tokens[0].strip(), "")
+ result = (tokens[0].strip(), "")
+
+ return AnsibleTagHelper.tag_copy(module_string, result[0]), AnsibleTagHelper.tag_copy(module_string, result[1])
def _normalize_parameters(self, thing, action=None, additional_args=None):
"""
arguments can be fuzzy. Deal with all the forms.
"""
- additional_args = {} if additional_args is None else additional_args
-
# final args are the ones we'll eventually return, so first update
# them with any additional args specified, which have lower priority
# than those which may be parsed/normalized next
final_args = dict()
- if additional_args:
- if isinstance(additional_args, string_types):
- templar = Templar(loader=None)
- if templar.is_template(additional_args):
- final_args['_variable_params'] = additional_args
- else:
- raise AnsibleParserError("Complex args containing variables cannot use bare variables (without Jinja2 delimiters), "
- "and must use the full variable style ('{{var_name}}')")
+
+ if additional_args is not Sentinel:
+ if isinstance(additional_args, str) and _jinja_bits.is_possibly_all_template(additional_args):
+ final_args['_variable_params'] = additional_args
elif isinstance(additional_args, dict):
final_args.update(additional_args)
+ elif additional_args is None:
+ Display().deprecated(
+ msg="Ignoring empty task `args` keyword.",
+ version="2.23",
+ help_text='A mapping or template which resolves to a mapping is required.',
+ obj=self._task_ds,
+ )
else:
- raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").')
+ raise AnsibleParserError(
+ message='The value of the task `args` keyword is invalid.',
+ help_text='A mapping or template which resolves to a mapping is required.',
+ obj=additional_args,
+ )
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's a 'new style' invocation.
# otherwise, it's not
if action is not None:
- args = self._normalize_new_style_args(thing, action)
+ args = self._normalize_new_style_args(thing, action, additional_args)
else:
(action, args) = self._normalize_old_style_args(thing)
# this can occasionally happen, simplify
if args and 'args' in args:
tmp_args = args.pop('args')
- if isinstance(tmp_args, string_types):
+ if isinstance(tmp_args, str):
tmp_args = parse_kv(tmp_args)
args.update(tmp_args)
@@ -204,7 +211,7 @@ class ModuleArgsParser:
return (action, final_args)
- def _normalize_new_style_args(self, thing, action):
+ def _normalize_new_style_args(self, thing, action, additional_args):
"""
deals with fuzziness in new style module invocations
accepting key=value pairs and dictionaries, and returns
@@ -220,10 +227,24 @@ class ModuleArgsParser:
if isinstance(thing, dict):
# form is like: { xyz: { x: 2, y: 3 } }
args = thing
- elif isinstance(thing, string_types):
+ elif isinstance(thing, str):
# form is like: copy: src=a dest=b
check_raw = action in FREEFORM_ACTIONS
args = parse_kv(thing, check_raw=check_raw)
+ args_keys = set(args) - {'_raw_params'}
+
+ if args_keys and additional_args is not Sentinel:
+ kv_args = ', '.join(repr(arg) for arg in sorted(args_keys))
+
+ Display().deprecated(
+ msg=f"Merging legacy k=v args ({kv_args}) into task args.",
+ help_text="Include all task args in the task `args` mapping.",
+ version="2.23",
+ obj=thing,
+ )
+ elif isinstance(thing, EncryptedString):
+ # k=v parsing intentionally omitted
+ args = dict(_raw_params=thing)
elif thing is None:
# this can happen with modules which take no params, like ping:
args = None
@@ -249,6 +270,7 @@ class ModuleArgsParser:
if isinstance(thing, dict):
# form is like: action: { module: 'copy', src: 'a', dest: 'b' }
+ Display().deprecated("Using a mapping for `action` is deprecated.", version='2.23', help_text='Use a string value for `action`.', obj=thing)
thing = thing.copy()
if 'module' in thing:
action, module_args = self._split_module_string(thing['module'])
@@ -257,7 +279,7 @@ class ModuleArgsParser:
args.update(parse_kv(module_args, check_raw=check_raw))
del args['module']
- elif isinstance(thing, string_types):
+ elif isinstance(thing, str):
# form is like: action: copy src=a dest=b
(action, args) = self._split_module_string(thing)
check_raw = action in FREEFORM_ACTIONS
@@ -276,8 +298,6 @@ class ModuleArgsParser:
task, dealing with all sorts of levels of fuzziness.
"""
- thing = None
-
action = None
delegate_to = self._task_ds.get('delegate_to', Sentinel)
args = dict()
@@ -285,14 +305,14 @@ class ModuleArgsParser:
# This is the standard YAML form for command-type modules. We grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
- additional_args = self._task_ds.get('args', dict())
+ additional_args = self._task_ds.get('args', Sentinel)
# We can have one of action, local_action, or module specified
# action
if 'action' in self._task_ds:
# an old school 'action' statement
thing = self._task_ds['action']
- action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
+ action, args = self._normalize_parameters(thing, additional_args=additional_args)
# local_action
if 'local_action' in self._task_ds:
@@ -301,12 +321,7 @@ class ModuleArgsParser:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
delegate_to = 'localhost'
- action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
-
- if action is not None and not skip_action_validation:
- context = _get_action_context(action, self._collection_list)
- if context is not None and context.resolved:
- self.resolved_action = context.resolved_fqcn
+ action, args = self._normalize_parameters(thing, additional_args=additional_args)
# module: is the more new-style invocation
@@ -315,14 +330,13 @@ class ModuleArgsParser:
# walk the filtered input dictionary to see if we recognize a module name
for item, value in non_task_ds.items():
- context = None
- is_action_candidate = False
if item in BUILTIN_TASKS:
is_action_candidate = True
elif skip_action_validation:
is_action_candidate = True
else:
try:
+ # DTFIX-FUTURE: extract to a helper method, shared with Task.post_validate_args
context = _get_action_context(item, self._collection_list)
except AnsibleError as e:
if e.obj is None:
@@ -336,9 +350,6 @@ class ModuleArgsParser:
if action is not None:
raise AnsibleParserError("conflicting action statements: %s, %s" % (action, item), obj=self._task_ds)
- if context is not None and context.resolved:
- self.resolved_action = context.resolved_fqcn
-
action = item
thing = value
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
@@ -353,14 +364,5 @@ class ModuleArgsParser:
else:
raise AnsibleParserError("no module/action detected in task.",
obj=self._task_ds)
- elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
- templar = Templar(loader=None)
- raw_params = args.pop('_raw_params')
- if templar.is_template(raw_params):
- args['_variable_params'] = raw_params
- else:
- raise AnsibleParserError(
- "this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES_SIMPLE)),
- obj=self._task_ds)
- return (action, args, delegate_to)
+ return action, args, delegate_to
diff --git a/lib/ansible/parsing/plugin_docs.py b/lib/ansible/parsing/plugin_docs.py
index c18230806b7..e6a44aef90f 100644
--- a/lib/ansible/parsing/plugin_docs.py
+++ b/lib/ansible/parsing/plugin_docs.py
@@ -4,13 +4,15 @@
from __future__ import annotations
import ast
-import tokenize
+
+import yaml
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.utils.display import Display
+from ansible._internal._datatag import _tags
display = Display()
@@ -23,13 +25,6 @@ string_to_vars = {
}
-def _var2string(value):
- """ reverse lookup of the dict above """
- for k, v in string_to_vars.items():
- if v == value:
- return k
-
-
def _init_doc_dict():
""" initialize a return dict for docs with the expected structure """
return {k: None for k in string_to_vars.values()}
@@ -43,13 +38,14 @@ def read_docstring_from_yaml_file(filename, verbose=True, ignore_errors=True):
try:
with open(filename, 'rb') as yamlfile:
- file_data = AnsibleLoader(yamlfile.read(), file_name=filename).get_single_data()
- except Exception as e:
- msg = "Unable to parse yaml file '%s': %s" % (filename, to_native(e))
+ file_data = yaml.load(yamlfile, Loader=AnsibleLoader)
+ except Exception as ex:
+ msg = f"Unable to parse yaml file {filename}"
+ # DTFIX-FUTURE: find a better pattern for this (can we use the new optional error behavior?)
if not ignore_errors:
- raise AnsibleParserError(msg, orig_exc=e)
+ raise AnsibleParserError(f'{msg}.') from ex
elif verbose:
- display.error(msg)
+ display.error(f'{msg}: {ex}')
if file_data:
for key in string_to_vars:
@@ -58,74 +54,11 @@ def read_docstring_from_yaml_file(filename, verbose=True, ignore_errors=True):
return data
-def read_docstring_from_python_module(filename, verbose=True, ignore_errors=True):
- """
- Use tokenization to search for assignment of the documentation variables in the given file.
- Parse from YAML and return the resulting python structure or None together with examples as plain text.
- """
-
- seen = set()
- data = _init_doc_dict()
-
- next_string = None
- with tokenize.open(filename) as f:
- tokens = tokenize.generate_tokens(f.readline)
- for token in tokens:
-
- # found label that looks like variable
- if token.type == tokenize.NAME:
-
- # label is expected value, in correct place and has not been seen before
- if token.start == 1 and token.string in string_to_vars and token.string not in seen:
- # next token that is string has the docs
- next_string = string_to_vars[token.string]
- continue
-
- # previous token indicated this string is a doc string
- if next_string is not None and token.type == tokenize.STRING:
-
- # ensure we only process one case of it
- seen.add(token.string)
-
- value = token.string
-
- # strip string modifiers/delimiters
- if value.startswith(('r', 'b')):
- value = value.lstrip('rb')
-
- if value.startswith(("'", '"')):
- value = value.strip("'\"")
-
- # actually use the data
- if next_string == 'plainexamples':
- # keep as string, can be yaml, but we let caller deal with it
- data[next_string] = to_text(value)
- else:
- # yaml load the data
- try:
- data[next_string] = AnsibleLoader(value, file_name=filename).get_single_data()
- except Exception as e:
- msg = "Unable to parse docs '%s' in python file '%s': %s" % (_var2string(next_string), filename, to_native(e))
- if not ignore_errors:
- raise AnsibleParserError(msg, orig_exc=e)
- elif verbose:
- display.error(msg)
-
- next_string = None
-
- # if nothing else worked, fall back to old method
- if not seen:
- data = read_docstring_from_python_file(filename, verbose, ignore_errors)
-
- return data
-
-
def read_docstring_from_python_file(filename, verbose=True, ignore_errors=True):
"""
Use ast to search for assignment of the DOCUMENTATION and EXAMPLES variables in the given file.
Parse DOCUMENTATION from YAML and return the YAML doc or None together with EXAMPLES, as plain text.
"""
-
data = _init_doc_dict()
try:
@@ -153,16 +86,18 @@ def read_docstring_from_python_file(filename, verbose=True, ignore_errors=True):
data[varkey] = to_text(child.value.value)
else:
# string should be yaml if already not a dict
- data[varkey] = AnsibleLoader(child.value.value, file_name=filename).get_single_data()
+ child_value = _tags.Origin(path=filename, line_num=child.value.lineno).tag(child.value.value)
+ data[varkey] = yaml.load(child_value, Loader=AnsibleLoader)
display.debug('Documentation assigned: %s' % varkey)
- except Exception as e:
- msg = "Unable to parse documentation in python file '%s': %s" % (filename, to_native(e))
+ except Exception as ex:
+ msg = f"Unable to parse documentation in python file {filename!r}"
+ # DTFIX-FUTURE: better pattern to conditionally raise/display
if not ignore_errors:
- raise AnsibleParserError(msg, orig_exc=e)
+ raise AnsibleParserError(f'{msg}.') from ex
elif verbose:
- display.error(msg)
+ display.error(f'{msg}: {ex}.')
return data
@@ -174,7 +109,7 @@ def read_docstring(filename, verbose=True, ignore_errors=True):
if filename.endswith(C.YAML_DOC_EXTENSIONS):
docstring = read_docstring_from_yaml_file(filename, verbose=verbose, ignore_errors=ignore_errors)
elif filename.endswith(C.PYTHON_DOC_EXTENSIONS):
- docstring = read_docstring_from_python_module(filename, verbose=verbose, ignore_errors=ignore_errors)
+ docstring = read_docstring_from_python_file(filename, verbose=verbose, ignore_errors=ignore_errors)
elif not ignore_errors:
raise AnsibleError("Unknown documentation format: %s" % to_native(filename))
@@ -221,6 +156,6 @@ def read_docstub(filename):
in_documentation = True
short_description = r''.join(doc_stub).strip().rstrip('.')
- data = AnsibleLoader(short_description, file_name=filename).get_single_data()
+ data = yaml.load(_tags.Origin(path=str(filename)).tag(short_description), Loader=AnsibleLoader)
return data
diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py
index 3f61347a4ac..18ef976496e 100644
--- a/lib/ansible/parsing/splitter.py
+++ b/lib/ansible/parsing/splitter.py
@@ -22,6 +22,8 @@ import re
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
+from ansible._internal._datatag._tags import Origin, TrustedAsTemplate
from ansible.parsing.quoting import unquote
@@ -52,6 +54,13 @@ def parse_kv(args, check_raw=False):
they will simply be ignored.
"""
+ tags = []
+ if origin_tag := Origin.get_tag(args):
+ # NB: adjusting the column number is left as an exercise for the reader
+ tags.append(origin_tag)
+ if trusted_tag := TrustedAsTemplate.get_tag(args):
+ tags.append(trusted_tag)
+
args = to_text(args, nonstring='passthru')
options = {}
@@ -90,6 +99,12 @@ def parse_kv(args, check_raw=False):
if len(raw_params) > 0:
options[u'_raw_params'] = join_args(raw_params)
+ if tags:
+ options = {AnsibleTagHelper.tag(k, tags): AnsibleTagHelper.tag(v, tags) for k, v in options.items()}
+
+ if origin_tag:
+ options = origin_tag.tag(options)
+
return options
diff --git a/lib/ansible/parsing/utils/addresses.py b/lib/ansible/parsing/utils/addresses.py
index 33982d04852..5485c5f668d 100644
--- a/lib/ansible/parsing/utils/addresses.py
+++ b/lib/ansible/parsing/utils/addresses.py
@@ -126,7 +126,7 @@ patterns = {
'ipv6': re.compile(
r"""^
- (?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
+ ((?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
(?:{0}:){{1,6}}:| # compressed variants, which are all
(?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
(?:{0}:){{2}}(?::{0}){{1,5}}|
@@ -139,7 +139,7 @@ patterns = {
# ipv4-in-ipv6 variants
(?:0:){{6}}(?:{0}\.){{3}}{0}|
::(?:ffff:)?(?:{0}\.){{3}}{0}|
- (?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
+ (?:0:){{5}}ffff:(?:{0}\.){{3}}{0})
$
""".format(ipv6_component), re.X | re.I
),
diff --git a/lib/ansible/parsing/utils/jsonify.py b/lib/ansible/parsing/utils/jsonify.py
index 0ebd7564094..1698b0306d0 100644
--- a/lib/ansible/parsing/utils/jsonify.py
+++ b/lib/ansible/parsing/utils/jsonify.py
@@ -19,9 +19,13 @@ from __future__ import annotations
import json
+from ansible.utils.display import Display
+
+Display().deprecated(f'{__name__!r} is deprecated.', version='2.23', help_text='Call `json.dumps` directly instead.')
+
def jsonify(result, format=False):
- """ format JSON output (uncompressed or uncompressed) """
+ """Format JSON output."""
if result is None:
return "{}"
diff --git a/lib/ansible/parsing/utils/yaml.py b/lib/ansible/parsing/utils/yaml.py
index 9462eba8aa9..eca86412ed9 100644
--- a/lib/ansible/parsing/utils/yaml.py
+++ b/lib/ansible/parsing/utils/yaml.py
@@ -6,77 +6,48 @@
from __future__ import annotations
import json
+import typing as t
-from yaml import YAMLError
+import yaml
-from ansible.errors import AnsibleParserError
-from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
-from ansible.module_utils.common.text.converters import to_native
+from ansible.errors import AnsibleJSONParserError
+from ansible._internal._errors import _error_utils
+from ansible.parsing.vault import VaultSecret
from ansible.parsing.yaml.loader import AnsibleLoader
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
-from ansible.parsing.ajson import AnsibleJSONDecoder
+from ansible._internal._yaml._errors import AnsibleYAMLParserError
+from ansible._internal._datatag._tags import Origin
+from ansible._internal._json._profiles import _legacy
-__all__ = ('from_yaml',)
+def from_yaml(
+ data: str,
+ file_name: str | None = None,
+ show_content: bool = True,
+ vault_secrets: list[tuple[str, VaultSecret]] | None = None, # deprecated: description='Deprecate vault_secrets, it has no effect.' core_version='2.23'
+ json_only: bool = False,
+) -> t.Any:
+ """Creates a Python data structure from the given data, which can be either a JSON or YAML string."""
+ # FUTURE: provide Ansible-specific top-level APIs to expose JSON and YAML serialization/deserialization to hide the error handling logic
+ # once those are in place, defer deprecate this entire function
+ origin = Origin.get_or_create_tag(data, file_name)
-def _handle_error(json_exc, yaml_exc, file_name, show_content):
- """
- Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
- file name/position where a YAML exception occurred, and raises an AnsibleParserError
- to display the syntax exception information.
- """
+ data = origin.tag(data)
- # if the YAML exception contains a problem mark, use it to construct
- # an object the error class can use to display the faulty line
- err_obj = None
- if hasattr(yaml_exc, 'problem_mark'):
- err_obj = AnsibleBaseYAMLObject()
- err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
-
- n_yaml_syntax_error = YAML_SYNTAX_ERROR % to_native(getattr(yaml_exc, 'problem', u''))
- n_err_msg = 'We were unable to read either as JSON nor YAML, these are the errors we got from each:\n' \
- 'JSON: %s\n\n%s' % (to_native(json_exc), n_yaml_syntax_error)
-
- raise AnsibleParserError(n_err_msg, obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
-
-
-def _safe_load(stream, file_name=None, vault_secrets=None):
- """ Implements yaml.safe_load(), except using our custom loader class. """
-
- loader = AnsibleLoader(stream, file_name, vault_secrets)
- try:
- return loader.get_single_data()
- finally:
+ with _error_utils.RedactAnnotatedSourceContext.when(not show_content):
try:
- loader.dispose()
- except AttributeError:
- pass # older versions of yaml don't have dispose function, ignore
-
-
-def from_yaml(data, file_name='', show_content=True, vault_secrets=None, json_only=False):
- """
- Creates a python datastructure from the given data, which can be either
- a JSON or YAML string.
- """
- new_data = None
-
- try:
- # in case we have to deal with vaults
- AnsibleJSONDecoder.set_secrets(vault_secrets)
-
- # we first try to load this data as JSON.
- # Fixes issues with extra vars json strings not being parsed correctly by the yaml parser
- new_data = json.loads(data, cls=AnsibleJSONDecoder)
- except Exception as json_exc:
+ # we first try to load this data as JSON.
+ # Fixes issues with extra vars json strings not being parsed correctly by the yaml parser
+ return json.loads(data, cls=_legacy.Decoder)
+ except Exception as ex:
+ json_ex = ex
if json_only:
- raise AnsibleParserError(to_native(json_exc), orig_exc=json_exc)
+ AnsibleJSONParserError.handle_exception(json_ex, origin=origin)
- # must not be JSON, let the rest try
try:
- new_data = _safe_load(data, file_name=file_name, vault_secrets=vault_secrets)
- except YAMLError as yaml_exc:
- _handle_error(json_exc, yaml_exc, file_name, show_content)
-
- return new_data
+ return yaml.load(data, Loader=AnsibleLoader) # type: ignore[arg-type]
+ except Exception as yaml_ex:
+ # DTFIX-FUTURE: how can we indicate in Origin that the data is in-memory only, to support context information -- is that useful?
+ # we'd need to pass data to handle_exception so it could be used as the content instead of reading from disk
+ AnsibleYAMLParserError.handle_exception(yaml_ex, origin=origin)
diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
index e3121b5dbb9..0322dd7b2e0 100644
--- a/lib/ansible/parsing/vault/__init__.py
+++ b/lib/ansible/parsing/vault/__init__.py
@@ -17,8 +17,8 @@
from __future__ import annotations
-import errno
import fcntl
+import functools
import os
import random
import shlex
@@ -27,11 +27,18 @@ import subprocess
import sys
import tempfile
import warnings
+import typing as t
from binascii import hexlify
from binascii import unhexlify
from binascii import Error as BinasciiError
+from ansible.module_utils._internal._datatag import (
+ AnsibleTagHelper, AnsibleTaggedObject, _AnsibleTagsMapping, _EmptyROInternalTagsMapping, _EMPTY_INTERNAL_TAGS_MAPPING,
+)
+from ansible._internal._templating import _jinja_common
+from ansible._internal._datatag._tags import Origin, VaultedValue, TrustedAsTemplate
+
HAS_CRYPTOGRAPHY = False
CRYPTOGRAPHY_BACKEND = None
try:
@@ -141,11 +148,13 @@ def _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
vault_id = to_text(b_tmpheader[3].strip())
b_ciphertext = b''.join(b_tmpdata[1:])
+ # DTFIX7: possible candidate for propagate_origin
+ b_ciphertext = AnsibleTagHelper.tag_copy(b_vaulttext_envelope, b_ciphertext)
return b_ciphertext, b_version, cipher_name, vault_id
-def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filename=None):
+def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
"""Parse the vaulttext envelope
When data is saved, it has a header prepended and is formatted into 80
@@ -153,11 +162,8 @@ def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filena
and then removes the header and the inserted newlines. The string returned
is suitable for processing by the Cipher classes.
- :arg b_vaulttext: byte str containing the data from a save file
- :kwarg default_vault_id: The vault_id name to use if the vaulttext does not provide one.
- :kwarg filename: The filename that the data came from. This is only
- used to make better error messages in case the data cannot be
- decrypted. This is optional.
+ :arg b_vaulttext_envelope: byte str containing the data from a save file
+ :arg default_vault_id: The vault_id name to use if the vaulttext does not provide one.
:returns: A tuple of byte str of the vaulttext suitable to pass to parse_vaultext,
a byte str of the vault format version,
the name of the cipher used, and the vault_id.
@@ -168,12 +174,8 @@ def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filena
try:
return _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id)
- except Exception as exc:
- msg = "Vault envelope format error"
- if filename:
- msg += ' in %s' % (filename)
- msg += ': %s' % exc
- raise AnsibleVaultFormatError(msg)
+ except Exception as ex:
+ raise AnsibleVaultFormatError("Vault envelope format error.", obj=b_vaulttext_envelope) from ex
def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=None):
@@ -219,9 +221,10 @@ def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=
def _unhexlify(b_data):
try:
- return unhexlify(b_data)
- except (BinasciiError, TypeError) as exc:
- raise AnsibleVaultFormatError('Vault format unhexlify error: %s' % exc)
+ # DTFIX7: possible candidate for propagate_origin
+ return AnsibleTagHelper.tag_copy(b_data, unhexlify(b_data))
+ except (BinasciiError, TypeError) as ex:
+ raise AnsibleVaultFormatError('Vault format unhexlify error.', obj=b_data) from ex
def _parse_vaulttext(b_vaulttext):
@@ -247,9 +250,8 @@ def parse_vaulttext(b_vaulttext):
return _parse_vaulttext(b_vaulttext)
except AnsibleVaultFormatError:
raise
- except Exception as exc:
- msg = "Vault vaulttext format error: %s" % exc
- raise AnsibleVaultFormatError(msg)
+ except Exception as ex:
+ raise AnsibleVaultFormatError("Vault vaulttext format error.", obj=b_vaulttext) from ex
def verify_secret_is_not_empty(secret, msg=None):
@@ -411,10 +413,10 @@ class FileVaultSecret(VaultSecret):
try:
with open(filename, "rb") as f:
vault_pass = f.read().strip()
- except (OSError, IOError) as e:
- raise AnsibleError("Could not read vault password file %s: %s" % (filename, e))
+ except OSError as ex:
+ raise AnsibleError(f"Could not read vault password file {filename!r}.") from ex
- b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass, filename)
+ b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass)
vault_pass = b_vault_data.strip(b'\r\n')
@@ -568,8 +570,8 @@ def match_encrypt_secret(secrets, encrypt_vault_id=None):
return match_encrypt_vault_id_secret(secrets,
encrypt_vault_id=encrypt_vault_id)
- # Find the best/first secret from secrets since we didnt specify otherwise
- # ie, consider all of the available secrets as matches
+ # Find the best/first secret from secrets since we didn't specify otherwise
+ # ie, consider all the available secrets as matches
_vault_id_matchers = [_vault_id for _vault_id, dummy in secrets]
best_secret = match_best_secret(secrets, _vault_id_matchers)
@@ -633,58 +635,44 @@ class VaultLib:
vault_id=vault_id)
return b_vaulttext
- def decrypt(self, vaulttext, filename=None, obj=None):
+ def decrypt(self, vaulttext):
"""Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
- :kwarg filename: a filename that the data came from. This is only
- used to make better error messages in case the data cannot be
- decrypted.
- :returns: a byte string containing the decrypted data and the vault-id that was used
-
+ :returns: a byte string containing the decrypted data
"""
- plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename, obj=obj)
+ plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext)
return plaintext
- def decrypt_and_get_vault_id(self, vaulttext, filename=None, obj=None):
+ def decrypt_and_get_vault_id(self, vaulttext):
"""Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
- :kwarg filename: a filename that the data came from. This is only
- used to make better error messages in case the data cannot be
- decrypted.
:returns: a byte string containing the decrypted data and the vault-id vault-secret that was used
-
"""
- b_vaulttext = to_bytes(vaulttext, errors='strict', encoding='utf-8')
+ origin = Origin.get_tag(vaulttext)
+
+ b_vaulttext = to_bytes(vaulttext, nonstring='error') # enforce vaulttext is str/bytes, keep type check if removing type conversion
if self.secrets is None:
- msg = "A vault password must be specified to decrypt data"
- if filename:
- msg += " in file %s" % to_native(filename)
- raise AnsibleVaultError(msg)
+ raise AnsibleVaultError("A vault password must be specified to decrypt data.", obj=vaulttext)
if not is_encrypted(b_vaulttext):
- msg = "input is not vault encrypted data. "
- if filename:
- msg += "%s is not a vault encrypted file" % to_native(filename)
- raise AnsibleError(msg)
+ raise AnsibleVaultError("Input is not vault encrypted data.", obj=vaulttext)
- b_vaulttext, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename)
+ b_vaulttext, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext)
# create the cipher object, note that the cipher used for decrypt can
# be different than the cipher used for encrypt
if cipher_name in CIPHER_ALLOWLIST:
this_cipher = CIPHER_MAPPING[cipher_name]()
else:
- raise AnsibleError("{0} cipher could not be found".format(cipher_name))
-
- b_plaintext = None
+ raise AnsibleVaultError(f"Cipher {cipher_name!r} could not be found.", obj=vaulttext)
if not self.secrets:
- raise AnsibleVaultError('Attempting to decrypt but no vault secrets found')
+ raise AnsibleVaultError('Attempting to decrypt but no vault secrets found.', obj=vaulttext)
# WARNING: Currently, the vault id is not required to match the vault id in the vault blob to
# decrypt a vault properly. The vault id in the vault blob is not part of the encrypted
@@ -697,15 +685,13 @@ class VaultLib:
# we check it first.
vault_id_matchers = []
- vault_id_used = None
- vault_secret_used = None
if vault_id:
display.vvvvv(u'Found a vault_id (%s) in the vaulttext' % to_text(vault_id))
vault_id_matchers.append(vault_id)
_matches = match_secrets(self.secrets, vault_id_matchers)
if _matches:
- display.vvvvv(u'We have a secret associated with vault id (%s), will try to use to decrypt %s' % (to_text(vault_id), to_text(filename)))
+ display.vvvvv(u'We have a secret associated with vault id (%s), will try to use to decrypt %s' % (to_text(vault_id), to_text(origin)))
else:
display.vvvvv(u'Found a vault_id (%s) in the vault text, but we do not have a associated secret (--vault-id)' % to_text(vault_id))
@@ -719,45 +705,32 @@ class VaultLib:
# for vault_secret_id in vault_secret_ids:
for vault_secret_id, vault_secret in matched_secrets:
- display.vvvvv(u'Trying to use vault secret=(%s) id=%s to decrypt %s' % (to_text(vault_secret), to_text(vault_secret_id), to_text(filename)))
+ display.vvvvv(u'Trying to use vault secret=(%s) id=%s to decrypt %s' % (to_text(vault_secret), to_text(vault_secret_id), to_text(origin)))
try:
# secret = self.secrets[vault_secret_id]
display.vvvv(u'Trying secret %s for vault_id=%s' % (to_text(vault_secret), to_text(vault_secret_id)))
b_plaintext = this_cipher.decrypt(b_vaulttext, vault_secret)
+ # DTFIX7: possible candidate for propagate_origin
+ b_plaintext = AnsibleTagHelper.tag_copy(vaulttext, b_plaintext)
if b_plaintext is not None:
vault_id_used = vault_secret_id
vault_secret_used = vault_secret
file_slug = ''
- if filename:
- file_slug = ' of "%s"' % filename
+ if origin:
+ file_slug = ' of "%s"' % origin
display.vvvvv(
u'Decrypt%s successful with secret=%s and vault_id=%s' % (to_text(file_slug), to_text(vault_secret), to_text(vault_secret_id))
)
break
- except AnsibleVaultFormatError as exc:
- exc.obj = obj
- msg = u"There was a vault format error"
- if filename:
- msg += u' in %s' % (to_text(filename))
- msg += u': %s' % to_text(exc)
- display.warning(msg, formatted=True)
+ except AnsibleVaultFormatError:
raise
except AnsibleError as e:
display.vvvv(u'Tried to use the vault secret (%s) to decrypt (%s) but it failed. Error: %s' %
- (to_text(vault_secret_id), to_text(filename), e))
+ (to_text(vault_secret_id), to_text(origin), e))
continue
else:
- msg = "Decryption failed (no vault secrets were found that could decrypt)"
- if filename:
- msg += " on %s" % to_native(filename)
- raise AnsibleVaultError(msg)
-
- if b_plaintext is None:
- msg = "Decryption failed"
- if filename:
- msg += " on %s" % to_native(filename)
- raise AnsibleError(msg)
+ raise AnsibleVaultError("Decryption failed (no vault secrets were found that could decrypt).", obj=vaulttext)
return b_plaintext, vault_id_used, vault_secret_used
@@ -916,7 +889,7 @@ class VaultEditor:
ciphertext = self.read_data(filename)
try:
- plaintext = self.vault.decrypt(ciphertext, filename=filename)
+ plaintext = self.vault.decrypt(ciphertext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
self.write_data(plaintext, output_file or filename, shred=False)
@@ -956,7 +929,7 @@ class VaultEditor:
# Figure out the vault id from the file, to select the right secret to re-encrypt it
# (duplicates parts of decrypt, but alas...)
- dummy, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename)
+ dummy, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext)
# vault id here may not be the vault id actually used for decrypting
# as when the edited file has no vault-id but is decrypted by non-default id in secrets
@@ -974,7 +947,7 @@ class VaultEditor:
vaulttext = to_text(b_vaulttext)
try:
- plaintext = self.vault.decrypt(vaulttext, filename=filename)
+ plaintext = self.vault.decrypt(vaulttext)
return plaintext
except AnsibleError as e:
raise AnsibleVaultError("%s for %s" % (to_native(e), to_native(filename)))
@@ -1024,10 +997,12 @@ class VaultEditor:
try:
if filename == '-':
- data = sys.stdin.buffer.read()
+ data = Origin(description='').tag(sys.stdin.buffer.read())
else:
+ filename = os.path.abspath(filename)
+
with open(filename, "rb") as fh:
- data = fh.read()
+ data = Origin(path=filename).tag(fh.read())
except Exception as e:
msg = to_native(e)
if not msg:
@@ -1095,13 +1070,10 @@ class VaultEditor:
try:
# create file with secure permissions
fd = os.open(thefile, os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC, mode)
- except OSError as ose:
- # Want to catch FileExistsError, which doesn't exist in Python 2, so catch OSError
- # and compare the error number to get equivalent behavior in Python 2/3
- if ose.errno == errno.EEXIST:
- raise AnsibleError('Vault file got recreated while we were operating on it: %s' % to_native(ose))
-
- raise AnsibleError('Problem creating temporary vault file: %s' % to_native(ose))
+ except FileExistsError as ex:
+ raise AnsibleError('Vault file got recreated while we were operating on it.') from ex
+ except OSError as ex:
+ raise AnsibleError('Problem creating temporary vault file.') from ex
try:
# now write to the file and ensure ours is only data in it
@@ -1170,6 +1142,7 @@ class VaultAES256:
return b_derivedkey
@classmethod
+ @functools.cache # Concurrent first-use by multiple threads will all execute the method body.
def _gen_key_initctr(cls, b_password, b_salt):
# 16 for AES 128, 32 for AES256
key_length = 32
@@ -1302,3 +1275,258 @@ class VaultAES256:
CIPHER_MAPPING = {
u'AES256': VaultAES256,
}
+
+
+class VaultSecretsContext:
+ """Provides context-style access to vault secrets."""
+ _current: t.ClassVar[t.Self | None] = None
+
+ def __init__(self, secrets: list[tuple[str, VaultSecret]]) -> None:
+ self.secrets = secrets
+
+ @classmethod
+ def initialize(cls, value: t.Self) -> None:
+ """
+ Initialize VaultSecretsContext with the specified instance and secrets (since it's not a lazy or per-thread context).
+ This method will fail if called more than once.
+ """
+ if cls._current:
+ raise RuntimeError(f"The {cls.__name__} context is already initialized.")
+
+ cls._current = value
+
+ @classmethod
+ def current(cls, optional: bool = False) -> t.Self:
+ """Access vault secrets, if initialized, ala `AmbientContextBase.current()`."""
+ if not cls._current and not optional:
+ raise ReferenceError(f"A required {cls.__name__} context is not active.")
+
+ return cls._current
+
+
+@t.final
+class EncryptedString(AnsibleTaggedObject):
+ """
+ An encrypted string which supports tagging and on-demand decryption.
+ All methods provided by Python's built-in `str` are supported, all of which operate on the decrypted value.
+ Any attempt to use this value when it cannot be decrypted will raise an exception.
+ Despite supporting `str` methods, access to an instance of this type through templating is recommended over direct access.
+ """
+
+ __slots__ = ('_ciphertext', '_plaintext', '_ansible_tags_mapping')
+
+ _subclasses_native_type: t.ClassVar[bool] = False
+ _empty_tags_as_native: t.ClassVar[bool] = False
+
+ _ciphertext: str
+ _plaintext: str | None
+ _ansible_tags_mapping: _AnsibleTagsMapping | _EmptyROInternalTagsMapping
+
+ def __init__(self, *, ciphertext: str) -> None:
+ if type(ciphertext) is not str: # pylint: disable=unidiomatic-typecheck
+ raise TypeError(f'ciphertext must be {str} instead of {type(ciphertext)}')
+
+ object.__setattr__(self, '_ciphertext', ciphertext)
+ object.__setattr__(self, '_plaintext', None)
+ object.__setattr__(self, '_ansible_tags_mapping', _EMPTY_INTERNAL_TAGS_MAPPING)
+
+ @classmethod
+ def _instance_factory(cls, value: t.Any, tags_mapping: _AnsibleTagsMapping) -> EncryptedString:
+ instance = EncryptedString.__new__(EncryptedString)
+
+ # In 2.18 and earlier, vaulted values were not trusted.
+ # This maintains backwards compatibility with that.
+ # Additionally, supporting templating on vaulted values could be problematic for a few cases:
+ # 1) There's no way to compose YAML tags, so you can't use `!unsafe` and `!vault` together.
+ # 2) It would make composing `EncryptedString` with a possible future `TemplateString` more difficult.
+ tags_mapping.pop(TrustedAsTemplate, None)
+
+ object.__setattr__(instance, '_ciphertext', value._ciphertext)
+ object.__setattr__(instance, '_plaintext', value._plaintext)
+ object.__setattr__(instance, '_ansible_tags_mapping', tags_mapping)
+
+ return instance
+
+ def __setstate__(self, state: tuple[None, dict[str, t.Any]]) -> None:
+ for key, value in state[1].items():
+ object.__setattr__(self, key, value)
+
+ def __delattr__(self, item: str) -> t.NoReturn:
+ raise AttributeError(f'{self.__class__.__name__!r} object is read-only')
+
+ def __setattr__(self, key: str, value: object) -> t.NoReturn:
+ raise AttributeError(f'{self.__class__.__name__!r} object is read-only')
+
+ @classmethod
+ def _init_class(cls) -> None:
+ """
+ Add proxies for the specified `str` methods.
+ These proxies operate on the plaintext, which is decrypted on-demand.
+ """
+ cls._native_type = cls
+
+ operator_method_names = (
+ '__eq__',
+ '__ge__',
+ '__gt__',
+ '__le__',
+ '__lt__',
+ '__ne__',
+ )
+
+ method_names = (
+ '__add__',
+ '__contains__',
+ '__format__',
+ '__getitem__',
+ '__hash__',
+ '__iter__',
+ '__len__',
+ '__mod__',
+ '__mul__',
+ '__rmod__',
+ '__rmul__',
+ 'capitalize',
+ 'casefold',
+ 'center',
+ 'count',
+ 'encode',
+ 'endswith',
+ 'expandtabs',
+ 'find',
+ 'format',
+ 'format_map',
+ 'index',
+ 'isalnum',
+ 'isalpha',
+ 'isascii',
+ 'isdecimal',
+ 'isdigit',
+ 'isidentifier',
+ 'islower',
+ 'isnumeric',
+ 'isprintable',
+ 'isspace',
+ 'istitle',
+ 'isupper',
+ 'join',
+ 'ljust',
+ 'lower',
+ 'lstrip',
+ 'maketrans', # static, but implemented for simplicity/consistency
+ 'partition',
+ 'removeprefix',
+ 'removesuffix',
+ 'replace',
+ 'rfind',
+ 'rindex',
+ 'rjust',
+ 'rpartition',
+ 'rsplit',
+ 'rstrip',
+ 'split',
+ 'splitlines',
+ 'startswith',
+ 'strip',
+ 'swapcase',
+ 'title',
+ 'translate',
+ 'upper',
+ 'zfill',
+ )
+
+ for method_name in operator_method_names:
+ setattr(cls, method_name, functools.partialmethod(cls._proxy_str_operator_method, getattr(str, method_name)))
+
+ for method_name in method_names:
+ setattr(cls, method_name, functools.partialmethod(cls._proxy_str_method, getattr(str, method_name)))
+
+ def _decrypt(self) -> str:
+ """
+ Attempt to decrypt the ciphertext and return the plaintext, which will be cached.
+ If decryption fails an exception will be raised and no result will be cached.
+ """
+ if self._plaintext is None:
+ vault = VaultLib(secrets=VaultSecretsContext.current().secrets)
+ # use the utility method to ensure that origin tags are available
+ plaintext = to_text(vault.decrypt(VaultHelper.get_ciphertext(self, with_tags=True))) # raises if the ciphertext cannot be decrypted
+
+ # propagate source value tags plus VaultedValue for round-tripping ciphertext
+ plaintext = AnsibleTagHelper.tag(plaintext, AnsibleTagHelper.tags(self) | {VaultedValue(ciphertext=self._ciphertext)})
+
+ object.__setattr__(self, '_plaintext', plaintext)
+
+ return self._plaintext
+
+ def _as_dict(self) -> t.Dict[str, t.Any]:
+ return dict(
+ value=self._ciphertext,
+ tags=list(self._ansible_tags_mapping.values()),
+ )
+
+ def _native_copy(self) -> str:
+ return AnsibleTagHelper.untag(self._decrypt())
+
+ def _proxy_str_operator_method(self, method: t.Callable, other) -> t.Any:
+ obj = self._decrypt()
+
+ if type(other) is EncryptedString: # pylint: disable=unidiomatic-typecheck
+ other = other._decrypt()
+
+ return method(obj, other)
+
+ def _proxy_str_method(self, method: t.Callable, *args, **kwargs) -> t.Any:
+ obj = self._decrypt()
+ return method(obj, *args, **kwargs)
+
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}(ciphertext={self._ciphertext!r})'
+
+ def __str__(self) -> str:
+ return self._decrypt()
+
+ def __float__(self) -> float:
+ return float(self._decrypt())
+
+ def __int__(self) -> int:
+ return int(self._decrypt())
+
+ def __radd__(self, other: t.Any) -> str:
+ return other + self._decrypt()
+
+ def __fspath__(self) -> str:
+ return self._decrypt()
+
+
+class VaultHelper:
+ """Vault specific utility methods."""
+
+ @staticmethod
+ def get_ciphertext(value: t.Any, *, with_tags: bool) -> str | None:
+ """
+ If the given value is an `EncryptedString`, `VaultExceptionMarker` or tagged with `VaultedValue`, return the ciphertext, otherwise return `None`.
+ Tags on the value other than `VaultedValue` will be included on the ciphertext if `with_tags` is `True`, otherwise it will be tagless.
+ """
+ value_type = type(value)
+ ciphertext: str | None
+ tags = AnsibleTagHelper.tags(value)
+
+ if value_type is _jinja_common.VaultExceptionMarker:
+ ciphertext = value._marker_undecryptable_ciphertext
+ tags = AnsibleTagHelper.tags(ciphertext) # ciphertext has tags but value does not
+ elif value_type is EncryptedString:
+ ciphertext = value._ciphertext
+ elif value_type in _jinja_common.Marker._concrete_subclasses: # avoid wasteful raise/except of Marker when calling get_tag below
+ ciphertext = None
+ elif vaulted_value := VaultedValue.get_tag(value):
+ ciphertext = vaulted_value.ciphertext
+ else:
+ ciphertext = None
+
+ if ciphertext:
+ if with_tags:
+ ciphertext = VaultedValue.untag(AnsibleTagHelper.tag(ciphertext, tags))
+ else:
+ ciphertext = AnsibleTagHelper.untag(ciphertext)
+
+ return ciphertext
diff --git a/lib/ansible/parsing/yaml/__init__.py b/lib/ansible/parsing/yaml/__init__.py
index 64fee52484f..e69de29bb2d 100644
--- a/lib/ansible/parsing/yaml/__init__.py
+++ b/lib/ansible/parsing/yaml/__init__.py
@@ -1,18 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import annotations
diff --git a/lib/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py
deleted file mode 100644
index 300dad38ca9..00000000000
--- a/lib/ansible/parsing/yaml/constructor.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import annotations
-
-from yaml.constructor import SafeConstructor, ConstructorError
-from yaml.nodes import MappingNode
-
-from ansible import constants as C
-from ansible.module_utils.common.text.converters import to_bytes, to_native
-from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode, AnsibleVaultEncryptedUnicode
-from ansible.parsing.vault import VaultLib
-from ansible.utils.display import Display
-from ansible.utils.unsafe_proxy import wrap_var
-
-display = Display()
-
-
-class AnsibleConstructor(SafeConstructor):
- def __init__(self, file_name=None, vault_secrets=None):
- self._ansible_file_name = file_name
- super(AnsibleConstructor, self).__init__()
- self._vaults = {}
- self.vault_secrets = vault_secrets or []
- self._vaults['default'] = VaultLib(secrets=self.vault_secrets)
-
- def construct_yaml_map(self, node):
- data = AnsibleMapping()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
- data.ansible_pos = self._node_position_info(node)
-
- def construct_mapping(self, node, deep=False):
- # Most of this is from yaml.constructor.SafeConstructor. We replicate
- # it here so that we can warn users when they have duplicate dict keys
- # (pyyaml silently allows overwriting keys)
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- self.flatten_mapping(node)
- mapping = AnsibleMapping()
-
- # Add our extra information to the returned value
- mapping.ansible_pos = self._node_position_info(node)
-
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- try:
- hash(key)
- except TypeError as exc:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" % exc, key_node.start_mark)
-
- if key in mapping:
- msg = (u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}).'
- u' Using last defined value only.'.format(key, *mapping.ansible_pos))
- if C.DUPLICATE_YAML_DICT_KEY == 'warn':
- display.warning(msg)
- elif C.DUPLICATE_YAML_DICT_KEY == 'error':
- raise ConstructorError(context=None, context_mark=None,
- problem=to_native(msg),
- problem_mark=node.start_mark,
- note=None)
- else:
- # when 'ignore'
- display.debug(msg)
-
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
-
- return mapping
-
- def construct_yaml_str(self, node):
- # Override the default string handling function
- # to always return unicode objects
- value = self.construct_scalar(node)
- ret = AnsibleUnicode(value)
-
- ret.ansible_pos = self._node_position_info(node)
-
- return ret
-
- def construct_vault_encrypted_unicode(self, node):
- value = self.construct_scalar(node)
- b_ciphertext_data = to_bytes(value)
- # could pass in a key id here to choose the vault to associate with
- # TODO/FIXME: plugin vault selector
- vault = self._vaults['default']
- if vault.secrets is None:
- raise ConstructorError(context=None, context_mark=None,
- problem="found !vault but no vault password provided",
- problem_mark=node.start_mark,
- note=None)
- ret = AnsibleVaultEncryptedUnicode(b_ciphertext_data)
- ret.vault = vault
- ret.ansible_pos = self._node_position_info(node)
- return ret
-
- def construct_yaml_seq(self, node):
- data = AnsibleSequence()
- yield data
- data.extend(self.construct_sequence(node))
- data.ansible_pos = self._node_position_info(node)
-
- def construct_yaml_unsafe(self, node):
- try:
- constructor = getattr(node, 'id', 'object')
- if constructor is not None:
- constructor = getattr(self, 'construct_%s' % constructor)
- except AttributeError:
- constructor = self.construct_object
-
- value = constructor(node)
-
- return wrap_var(value)
-
- def _node_position_info(self, node):
- # the line number where the previous token has ended (plus empty lines)
- # Add one so that the first line is line 1 rather than line 0
- column = node.start_mark.column + 1
- line = node.start_mark.line + 1
-
- # in some cases, we may have pre-read the data and then
- # passed it to the load() call for YAML, in which case we
- # want to override the default datasource (which would be
- # '') to the actual filename we read in
- datasource = self._ansible_file_name or node.start_mark.name
-
- return (datasource, line, column)
-
-
-AnsibleConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- AnsibleConstructor.construct_yaml_map) # type: ignore[type-var]
-
-AnsibleConstructor.add_constructor(
- u'tag:yaml.org,2002:python/dict',
- AnsibleConstructor.construct_yaml_map) # type: ignore[type-var]
-
-AnsibleConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- AnsibleConstructor.construct_yaml_str) # type: ignore[type-var]
-
-AnsibleConstructor.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- AnsibleConstructor.construct_yaml_str) # type: ignore[type-var]
-
-AnsibleConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- AnsibleConstructor.construct_yaml_seq) # type: ignore[type-var]
-
-AnsibleConstructor.add_constructor(
- u'!unsafe',
- AnsibleConstructor.construct_yaml_unsafe) # type: ignore[type-var]
-
-AnsibleConstructor.add_constructor(
- u'!vault',
- AnsibleConstructor.construct_vault_encrypted_unicode) # type: ignore[type-var]
-
-AnsibleConstructor.add_constructor(
- u'!vault-encrypted',
- AnsibleConstructor.construct_vault_encrypted_unicode) # type: ignore[type-var]
diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py
index 4888e4fd10c..c51ac605e3f 100644
--- a/lib/ansible/parsing/yaml/dumper.py
+++ b/lib/ansible/parsing/yaml/dumper.py
@@ -1,120 +1,10 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+from __future__ import annotations as _annotations
-from __future__ import annotations
+import typing as _t
-import yaml
+from ansible._internal._yaml import _dumper
-from ansible.module_utils.six import text_type, binary_type
-from ansible.module_utils.common.yaml import SafeDumper
-from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping, AnsibleVaultEncryptedUnicode
-from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes, NativeJinjaUnsafeText, NativeJinjaText
-from ansible.template import AnsibleUndefined
-from ansible.vars.hostvars import HostVars, HostVarsVars
-from ansible.vars.manager import VarsWithSources
-
-class AnsibleDumper(SafeDumper):
- """
- A simple stub class that allows us to add representers
- for our overridden object types.
- """
-
-
-def represent_hostvars(self, data):
- return self.represent_dict(dict(data))
-
-
-# Note: only want to represent the encrypted data
-def represent_vault_encrypted_unicode(self, data):
- return self.represent_scalar(u'!vault', data._ciphertext.decode(), style='|')
-
-
-def represent_unicode(self, data):
- return yaml.representer.SafeRepresenter.represent_str(self, text_type(data))
-
-
-def represent_binary(self, data):
- return yaml.representer.SafeRepresenter.represent_binary(self, binary_type(data))
-
-
-def represent_undefined(self, data):
- # Here bool will ensure _fail_with_undefined_error happens
- # if the value is Undefined.
- # This happens because Jinja sets __bool__ on StrictUndefined
- return bool(data)
-
-
-AnsibleDumper.add_representer(
- AnsibleUnicode,
- represent_unicode,
-)
-
-AnsibleDumper.add_representer(
- AnsibleUnsafeText,
- represent_unicode,
-)
-
-AnsibleDumper.add_representer(
- AnsibleUnsafeBytes,
- represent_binary,
-)
-
-AnsibleDumper.add_representer(
- HostVars,
- represent_hostvars,
-)
-
-AnsibleDumper.add_representer(
- HostVarsVars,
- represent_hostvars,
-)
-
-AnsibleDumper.add_representer(
- VarsWithSources,
- represent_hostvars,
-)
-
-AnsibleDumper.add_representer(
- AnsibleSequence,
- yaml.representer.SafeRepresenter.represent_list,
-)
-
-AnsibleDumper.add_representer(
- AnsibleMapping,
- yaml.representer.SafeRepresenter.represent_dict,
-)
-
-AnsibleDumper.add_representer(
- AnsibleVaultEncryptedUnicode,
- represent_vault_encrypted_unicode,
-)
-
-AnsibleDumper.add_representer(
- AnsibleUndefined,
- represent_undefined,
-)
-
-AnsibleDumper.add_representer(
- NativeJinjaUnsafeText,
- represent_unicode,
-)
-
-AnsibleDumper.add_representer(
- NativeJinjaText,
- represent_unicode,
-)
+def AnsibleDumper(*args, **kwargs) -> _t.Any:
+ """Compatibility factory function; returns an Ansible YAML dumper instance."""
+ return _dumper.AnsibleDumper(*args, **kwargs)
diff --git a/lib/ansible/parsing/yaml/loader.py b/lib/ansible/parsing/yaml/loader.py
index b9bd3e1c6e3..ee878b9fca1 100644
--- a/lib/ansible/parsing/yaml/loader.py
+++ b/lib/ansible/parsing/yaml/loader.py
@@ -1,43 +1,10 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+from __future__ import annotations as _annotations
-from __future__ import annotations
+import typing as _t
-from yaml.resolver import Resolver
+from ansible._internal._yaml import _loader
-from ansible.parsing.yaml.constructor import AnsibleConstructor
-from ansible.module_utils.common.yaml import HAS_LIBYAML, Parser
-if HAS_LIBYAML:
- class AnsibleLoader(Parser, AnsibleConstructor, Resolver): # type: ignore[misc] # pylint: disable=inconsistent-mro
- def __init__(self, stream, file_name=None, vault_secrets=None):
- Parser.__init__(self, stream)
- AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
- Resolver.__init__(self)
-else:
- from yaml.composer import Composer
- from yaml.reader import Reader
- from yaml.scanner import Scanner
-
- class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver): # type: ignore[misc,no-redef] # pylint: disable=inconsistent-mro
- def __init__(self, stream, file_name=None, vault_secrets=None):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
- Resolver.__init__(self)
+def AnsibleLoader(*args, **kwargs) -> _t.Any:
+ """Compatibility factory function; returns an Ansible YAML loader instance."""
+ return _loader.AnsibleLoader(*args, **kwargs)
diff --git a/lib/ansible/parsing/yaml/objects.py b/lib/ansible/parsing/yaml/objects.py
index f3ebcb8fc07..f90ebfd82af 100644
--- a/lib/ansible/parsing/yaml/objects.py
+++ b/lib/ansible/parsing/yaml/objects.py
@@ -1,359 +1,67 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+"""Backwards compatibility types, which will be deprecated a future release. Do not use these in new code."""
-from __future__ import annotations
+from __future__ import annotations as _annotations
-import sys as _sys
+import typing as _t
-from collections.abc import Sequence
+from ansible.module_utils._internal import _datatag
+from ansible.module_utils.common.text import converters as _converters
+from ansible.parsing import vault as _vault
-from ansible.module_utils.six import text_type
-from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
+_UNSET = _t.cast(_t.Any, object())
-class AnsibleBaseYAMLObject(object):
- """
- the base class used to sub-class python built-in objects
- so that we can add attributes to them during yaml parsing
+class _AnsibleMapping(dict):
+ """Backwards compatibility type."""
- """
- _data_source = None
- _line_number = 0
- _column_number = 0
+ def __new__(cls, value=_UNSET, /, **kwargs):
+ if value is _UNSET:
+ return dict(**kwargs)
- def _get_ansible_position(self):
- return (self._data_source, self._line_number, self._column_number)
+ return _datatag.AnsibleTagHelper.tag_copy(value, dict(value, **kwargs))
- def _set_ansible_position(self, obj):
- try:
- (src, line, col) = obj
- except (TypeError, ValueError):
- raise AssertionError(
- 'ansible_pos can only be set with a tuple/list '
- 'of three values: source, line number, column number'
- )
- self._data_source = src
- self._line_number = line
- self._column_number = col
- ansible_pos = property(_get_ansible_position, _set_ansible_position)
+class _AnsibleUnicode(str):
+ """Backwards compatibility type."""
+ def __new__(cls, object=_UNSET, **kwargs):
+ if object is _UNSET:
+ return str(**kwargs)
-class AnsibleMapping(AnsibleBaseYAMLObject, dict):
- """ sub class for dictionaries """
- pass
+ return _datatag.AnsibleTagHelper.tag_copy(object, str(object, **kwargs))
-class AnsibleUnicode(AnsibleBaseYAMLObject, text_type):
- """ sub class for unicode objects """
- pass
+class _AnsibleSequence(list):
+ """Backwards compatibility type."""
+ def __new__(cls, value=_UNSET, /):
+ if value is _UNSET:
+ return list()
-class AnsibleSequence(AnsibleBaseYAMLObject, list):
- """ sub class for lists """
- pass
+ return _datatag.AnsibleTagHelper.tag_copy(value, list(value))
-class AnsibleVaultEncryptedUnicode(Sequence, AnsibleBaseYAMLObject):
- """Unicode like object that is not evaluated (decrypted) until it needs to be"""
- __UNSAFE__ = True
- __ENCRYPTED__ = True
- yaml_tag = u'!vault'
+class _AnsibleVaultEncryptedUnicode:
+ """Backwards compatibility type."""
- @classmethod
- def from_plaintext(cls, seq, vault, secret):
- if not vault:
- raise vault.AnsibleVaultError('Error creating AnsibleVaultEncryptedUnicode, invalid vault (%s) provided' % vault)
+ def __new__(cls, ciphertext: str | bytes):
+ encrypted_string = _vault.EncryptedString(ciphertext=_converters.to_text(_datatag.AnsibleTagHelper.untag(ciphertext)))
- ciphertext = vault.encrypt(seq, secret)
- avu = cls(ciphertext)
- avu.vault = vault
- return avu
+ return _datatag.AnsibleTagHelper.tag_copy(ciphertext, encrypted_string)
- def __init__(self, ciphertext):
- """A AnsibleUnicode with a Vault attribute that can decrypt it.
- ciphertext is a byte string (str on PY2, bytestring on PY3).
+def __getattr__(name: str) -> _t.Any:
+ """Inject import-time deprecation warnings."""
+ if (value := globals().get(f'_{name}', None)) and name.startswith('Ansible'):
+ # deprecated: description='enable deprecation of everything in this module', core_version='2.23'
+ # from ansible.utils.display import Display
+ #
+ # Display().deprecated(
+ # msg=f"Importing {name!r} is deprecated.",
+ # help_text="Instances of this type cannot be created and will not be encountered.",
+ # version="2.27",
+ # )
- The .data attribute is a property that returns the decrypted plaintext
- of the ciphertext as a PY2 unicode or PY3 string object.
- """
- super(AnsibleVaultEncryptedUnicode, self).__init__()
+ return value
- # after construction, calling code has to set the .vault attribute to a vaultlib object
- self.vault = None
- self._ciphertext = to_bytes(ciphertext)
-
- @property
- def data(self):
- if not self.vault:
- return to_text(self._ciphertext)
- return to_text(self.vault.decrypt(self._ciphertext, obj=self))
-
- @data.setter
- def data(self, value):
- self._ciphertext = to_bytes(value)
-
- def is_encrypted(self):
- return self.vault and self.vault.is_encrypted(self._ciphertext)
-
- def __eq__(self, other):
- if self.vault:
- return other == self.data
- return False
-
- def __ne__(self, other):
- if self.vault:
- return other != self.data
- return True
-
- def __reversed__(self):
- # This gets inherited from ``collections.Sequence`` which returns a generator
- # make this act more like the string implementation
- return to_text(self[::-1], errors='surrogate_or_strict')
-
- def __str__(self):
- return to_native(self.data, errors='surrogate_or_strict')
-
- def __unicode__(self):
- return to_text(self.data, errors='surrogate_or_strict')
-
- def encode(self, encoding=None, errors=None):
- return to_bytes(self.data, encoding=encoding, errors=errors)
-
- # Methods below are a copy from ``collections.UserString``
- # Some are copied as is, where others are modified to not
- # auto wrap with ``self.__class__``
- def __repr__(self):
- return repr(self.data)
-
- def __int__(self, base=10):
- return int(self.data, base=base)
-
- def __float__(self):
- return float(self.data)
-
- def __complex__(self):
- return complex(self.data)
-
- def __hash__(self):
- return hash(self.data)
-
- # This breaks vault, do not define it, we cannot satisfy this
- # def __getnewargs__(self):
- # return (self.data[:],)
-
- def __lt__(self, string):
- if isinstance(string, AnsibleVaultEncryptedUnicode):
- return self.data < string.data
- return self.data < string
-
- def __le__(self, string):
- if isinstance(string, AnsibleVaultEncryptedUnicode):
- return self.data <= string.data
- return self.data <= string
-
- def __gt__(self, string):
- if isinstance(string, AnsibleVaultEncryptedUnicode):
- return self.data > string.data
- return self.data > string
-
- def __ge__(self, string):
- if isinstance(string, AnsibleVaultEncryptedUnicode):
- return self.data >= string.data
- return self.data >= string
-
- def __contains__(self, char):
- if isinstance(char, AnsibleVaultEncryptedUnicode):
- char = char.data
- return char in self.data
-
- def __len__(self):
- return len(self.data)
-
- def __getitem__(self, index):
- return self.data[index]
-
- def __getslice__(self, start, end):
- start = max(start, 0)
- end = max(end, 0)
- return self.data[start:end]
-
- def __add__(self, other):
- if isinstance(other, AnsibleVaultEncryptedUnicode):
- return self.data + other.data
- elif isinstance(other, text_type):
- return self.data + other
- return self.data + to_text(other)
-
- def __radd__(self, other):
- if isinstance(other, text_type):
- return other + self.data
- return to_text(other) + self.data
-
- def __mul__(self, n):
- return self.data * n
-
- __rmul__ = __mul__
-
- def __mod__(self, args):
- return self.data % args
-
- def __rmod__(self, template):
- return to_text(template) % self
-
- # the following methods are defined in alphabetical order:
- def capitalize(self):
- return self.data.capitalize()
-
- def casefold(self):
- return self.data.casefold()
-
- def center(self, width, *args):
- return self.data.center(width, *args)
-
- def count(self, sub, start=0, end=_sys.maxsize):
- if isinstance(sub, AnsibleVaultEncryptedUnicode):
- sub = sub.data
- return self.data.count(sub, start, end)
-
- def endswith(self, suffix, start=0, end=_sys.maxsize):
- return self.data.endswith(suffix, start, end)
-
- def expandtabs(self, tabsize=8):
- return self.data.expandtabs(tabsize)
-
- def find(self, sub, start=0, end=_sys.maxsize):
- if isinstance(sub, AnsibleVaultEncryptedUnicode):
- sub = sub.data
- return self.data.find(sub, start, end)
-
- def format(self, *args, **kwds):
- return self.data.format(*args, **kwds)
-
- def format_map(self, mapping):
- return self.data.format_map(mapping)
-
- def index(self, sub, start=0, end=_sys.maxsize):
- return self.data.index(sub, start, end)
-
- def isalpha(self):
- return self.data.isalpha()
-
- def isalnum(self):
- return self.data.isalnum()
-
- def isascii(self):
- return self.data.isascii()
-
- def isdecimal(self):
- return self.data.isdecimal()
-
- def isdigit(self):
- return self.data.isdigit()
-
- def isidentifier(self):
- return self.data.isidentifier()
-
- def islower(self):
- return self.data.islower()
-
- def isnumeric(self):
- return self.data.isnumeric()
-
- def isprintable(self):
- return self.data.isprintable()
-
- def isspace(self):
- return self.data.isspace()
-
- def istitle(self):
- return self.data.istitle()
-
- def isupper(self):
- return self.data.isupper()
-
- def join(self, seq):
- return self.data.join(seq)
-
- def ljust(self, width, *args):
- return self.data.ljust(width, *args)
-
- def lower(self):
- return self.data.lower()
-
- def lstrip(self, chars=None):
- return self.data.lstrip(chars)
-
- maketrans = str.maketrans
-
- def partition(self, sep):
- return self.data.partition(sep)
-
- def replace(self, old, new, maxsplit=-1):
- if isinstance(old, AnsibleVaultEncryptedUnicode):
- old = old.data
- if isinstance(new, AnsibleVaultEncryptedUnicode):
- new = new.data
- return self.data.replace(old, new, maxsplit)
-
- def rfind(self, sub, start=0, end=_sys.maxsize):
- if isinstance(sub, AnsibleVaultEncryptedUnicode):
- sub = sub.data
- return self.data.rfind(sub, start, end)
-
- def rindex(self, sub, start=0, end=_sys.maxsize):
- return self.data.rindex(sub, start, end)
-
- def rjust(self, width, *args):
- return self.data.rjust(width, *args)
-
- def rpartition(self, sep):
- return self.data.rpartition(sep)
-
- def rstrip(self, chars=None):
- return self.data.rstrip(chars)
-
- def split(self, sep=None, maxsplit=-1):
- return self.data.split(sep, maxsplit)
-
- def rsplit(self, sep=None, maxsplit=-1):
- return self.data.rsplit(sep, maxsplit)
-
- def splitlines(self, keepends=False):
- return self.data.splitlines(keepends)
-
- def startswith(self, prefix, start=0, end=_sys.maxsize):
- return self.data.startswith(prefix, start, end)
-
- def strip(self, chars=None):
- return self.data.strip(chars)
-
- def swapcase(self):
- return self.data.swapcase()
-
- def title(self):
- return self.data.title()
-
- def translate(self, *args):
- return self.data.translate(*args)
-
- def upper(self):
- return self.data.upper()
-
- def zfill(self, width):
- return self.data.zfill(width)
+ raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
index e125df1ba9a..3f28654cced 100644
--- a/lib/ansible/playbook/__init__.py
+++ b/lib/ansible/playbook/__init__.py
@@ -66,7 +66,7 @@ class Playbook:
self._file_name = file_name
try:
- ds = self._loader.load_from_file(os.path.basename(file_name))
+ ds = self._loader.load_from_file(os.path.basename(file_name), trusted_as_template=True)
except UnicodeDecodeError as e:
raise AnsibleParserError("Could not read playbook (%s) due to encoding issues: %s" % (file_name, to_native(e)))
diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py
index ee797c27ef4..3dbbef555ba 100644
--- a/lib/ansible/playbook/attribute.py
+++ b/lib/ansible/playbook/attribute.py
@@ -17,7 +17,12 @@
from __future__ import annotations
-from ansible.module_utils.common.sentinel import Sentinel
+import typing as t
+
+from ansible.utils.sentinel import Sentinel
+
+if t.TYPE_CHECKING:
+ from ansible.playbook.base import FieldAttributeBase
_CONTAINERS = frozenset(('list', 'dict', 'set'))
@@ -105,7 +110,7 @@ class Attribute:
def __ge__(self, other):
return other.priority >= self.priority
- def __get__(self, obj, obj_type=None):
+ def __get__(self, obj: FieldAttributeBase, obj_type=None):
method = f'_get_attr_{self.name}'
if hasattr(obj, method):
# NOTE this appears to be not used in the codebase,
@@ -127,7 +132,7 @@ class Attribute:
return value
- def __set__(self, obj, value):
+ def __set__(self, obj: FieldAttributeBase, value):
setattr(obj, f'_{self.name}', value)
if self.alias is not None:
setattr(obj, f'_{self.alias}', value)
@@ -180,7 +185,7 @@ class FieldAttribute(Attribute):
class ConnectionFieldAttribute(FieldAttribute):
def __get__(self, obj, obj_type=None):
- from ansible.module_utils.compat.paramiko import paramiko
+ from ansible.module_utils.compat.paramiko import _paramiko as paramiko
from ansible.utils.ssh_functions import check_for_controlpersist
value = super().__get__(obj, obj_type)
diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py
index a762548fddf..955962ea324 100644
--- a/lib/ansible/playbook/base.py
+++ b/lib/ansible/playbook/base.py
@@ -9,14 +9,16 @@ import itertools
import operator
import os
+import typing as t
+
from copy import copy as shallowcopy
from functools import cache
-from jinja2.exceptions import UndefinedError
-
from ansible import constants as C
from ansible import context
-from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionError, AnsibleValueOmittedError, AnsibleFieldAttributeError
+from ansible.module_utils.datatag import native_type_name
+from ansible._internal._datatag._tags import Origin
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.common.sentinel import Sentinel
@@ -26,7 +28,8 @@ from ansible.playbook.attribute import Attribute, FieldAttribute, ConnectionFiel
from ansible.plugins.loader import module_loader, action_loader
from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, AnsibleCollectionRef
from ansible.utils.display import Display
-from ansible.utils.vars import combine_vars, isidentifier, get_unique_id
+from ansible.utils.vars import combine_vars, get_unique_id, validate_variable_name
+from ansible._internal._templating._engine import TemplateEngine
display = Display()
@@ -80,6 +83,11 @@ class _ClassProperty:
class FieldAttributeBase:
+ _post_validate_object = False
+ """
+ `False` skips FieldAttribute post-validation on intermediate objects and mixins for attributes without `always_post_validate`.
+ Leaf objects (e.g., `Task`) should set this attribute `True` to opt-in to post-validation.
+ """
fattributes = _ClassProperty()
@classmethod
@@ -96,12 +104,13 @@ class FieldAttributeBase:
fattributes[attr.alias] = attr
return fattributes
- def __init__(self):
+ def __init__(self) -> None:
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self._loader = None
self._variable_manager = None
+ self._origin: Origin | None = None
# other internal params
self._validated = False
@@ -111,9 +120,6 @@ class FieldAttributeBase:
# every object gets a random uuid:
self._uuid = get_unique_id()
- # init vars, avoid using defaults in field declaration as it lives across plays
- self.vars = dict()
-
@property
def finalized(self):
return self._finalized
@@ -148,6 +154,7 @@ class FieldAttributeBase:
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
+ self._origin = Origin.get_tag(ds)
# the data loader class is used to parse data from strings and files
if loader is not None:
@@ -191,7 +198,11 @@ class FieldAttributeBase:
return self._variable_manager
def _post_validate_debugger(self, attr, value, templar):
- value = templar.template(value)
+ try:
+ value = templar.template(value)
+ except AnsibleValueOmittedError:
+ value = self.set_to_context(attr.name)
+
valid_values = frozenset(('always', 'on_failed', 'on_unreachable', 'on_skipped', 'never'))
if value and isinstance(value, string_types) and value not in valid_values:
raise AnsibleParserError("'%s' is not a valid value for debugger. Must be one of %s" % (value, ', '.join(valid_values)), obj=self.get_ds())
@@ -206,12 +217,10 @@ class FieldAttributeBase:
valid_attrs = frozenset(self.fattributes)
for key in ds:
if key not in valid_attrs:
- raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
+ raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=key)
def validate(self, all_vars=None):
""" validation that is done at parse time, not load time """
- all_vars = {} if all_vars is None else all_vars
-
if not self._validated:
# walk all fields in the object
for (name, attribute) in self.fattributes.items():
@@ -244,7 +253,8 @@ class FieldAttributeBase:
raise AnsibleParserError(
"The field 'module_defaults' is supposed to be a dictionary or list of dictionaries, "
"the keys of which must be static action, module, or group names. Only the values may contain "
- "templates. For example: {'ping': \"{{ ping_defaults }}\"}"
+ "templates. For example: {'ping': \"{{ ping_defaults }}\"}",
+ obj=defaults_dict,
)
validated_defaults_dict = {}
@@ -419,14 +429,15 @@ class FieldAttributeBase:
try:
new_me = self.__class__()
- except RuntimeError as e:
- raise AnsibleError("Exceeded maximum object depth. This may have been caused by excessive role recursion", orig_exc=e)
+ except RecursionError as ex:
+ raise AnsibleError("Exceeded maximum object depth. This may have been caused by excessive role recursion.") from ex
for name in self.fattributes:
setattr(new_me, name, shallowcopy(getattr(self, f'_{name}', Sentinel)))
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
+ new_me._origin = self._origin
new_me._validated = self._validated
new_me._finalized = self._finalized
new_me._uuid = self._uuid
@@ -438,6 +449,12 @@ class FieldAttributeBase:
return new_me
def get_validated_value(self, name, attribute, value, templar):
+ try:
+ return self._get_validated_value(name, attribute, value, templar)
+ except (TypeError, ValueError):
+ raise AnsibleError(f"The value {value!r} could not be converted to {attribute.isa!r}.", obj=value)
+
+ def _get_validated_value(self, name, attribute, value, templar):
if attribute.isa == 'string':
value = to_text(value)
elif attribute.isa == 'int':
@@ -466,28 +483,23 @@ class FieldAttributeBase:
if attribute.listof is not None:
for item in value:
if not isinstance(item, attribute.listof):
- raise AnsibleParserError("the field '%s' should be a list of %s, "
- "but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
- elif attribute.required and attribute.listof == string_types:
+ type_names = ' or '.join(f'{native_type_name(attribute_type)!r}' for attribute_type in attribute.listof)
+
+ raise AnsibleParserError(
+ message=f"Keyword {name!r} items must be of type {type_names}, not {native_type_name(item)!r}.",
+ obj=Origin.first_tagged_on(item, value, self.get_ds()),
+ )
+ elif attribute.required and attribute.listof == (str,):
if item is None or item.strip() == "":
- raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
- elif attribute.isa == 'set':
- if value is None:
- value = set()
- elif not isinstance(value, (list, set)):
- if isinstance(value, string_types):
- value = value.split(',')
- else:
- # Making a list like this handles strings of
- # text and bytes properly
- value = [value]
- if not isinstance(value, set):
- value = set(value)
+ raise AnsibleParserError(
+ message=f"Keyword {name!r} is required, and cannot have empty values.",
+ obj=Origin.first_tagged_on(item, value, self.get_ds()),
+ )
elif attribute.isa == 'dict':
if value is None:
value = dict()
elif not isinstance(value, dict):
- raise TypeError("%s is not a dictionary" % value)
+ raise AnsibleError(f"{value!r} is not a dictionary")
elif attribute.isa == 'class':
if not isinstance(value, attribute.class_type):
raise TypeError("%s is not a valid %s (got a %s instead)" % (name, attribute.class_type, type(value)))
@@ -496,19 +508,22 @@ class FieldAttributeBase:
raise AnsibleAssertionError(f"Unknown value for attribute.isa: {attribute.isa}")
return value
- def set_to_context(self, name):
+ def set_to_context(self, name: str) -> t.Any:
""" set to parent inherited value or Sentinel as appropriate"""
attribute = self.fattributes[name]
if isinstance(attribute, NonInheritableFieldAttribute):
# setting to sentinel will trigger 'default/default()' on getter
- setattr(self, name, Sentinel)
+ value = Sentinel
else:
try:
- setattr(self, name, self._get_parent_attribute(name, omit=True))
+ value = self._get_parent_attribute(name, omit=True)
except AttributeError:
# mostly playcontext as only tasks/handlers/blocks really resolve parent
- setattr(self, name, Sentinel)
+ value = Sentinel
+
+ setattr(self, name, value)
+ return value
def post_validate(self, templar):
"""
@@ -517,91 +532,101 @@ class FieldAttributeBase:
any _post_validate_ functions.
"""
- # save the omit value for later checking
- omit_value = templar.available_variables.get('omit')
+ for name in self.fattributes:
+ value = self.post_validate_attribute(name, templar=templar)
- for (name, attribute) in self.fattributes.items():
- if attribute.static:
- value = getattr(self, name)
+ if value is not Sentinel:
+ # and assign the massaged value back to the attribute field
+ setattr(self, name, value)
- # we don't template 'vars' but allow template as values for later use
- if name not in ('vars',) and templar.is_template(value):
- display.warning('"%s" is not templatable, but we found: %s, '
- 'it will not be templated and will be used "as is".' % (name, value))
- continue
+ self._finalized = True
- if getattr(self, name) is None:
- if not attribute.required:
- continue
- else:
- raise AnsibleParserError("the field '%s' is required but was not set" % name)
- elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
- # Intermediate objects like Play() won't have their fields validated by
- # default, as their values are often inherited by other objects and validated
- # later, so we don't want them to fail out early
- continue
+ def post_validate_attribute(self, name: str, *, templar: TemplateEngine):
+ attribute: FieldAttribute = self.fattributes[name]
- try:
- # Run the post-validator if present. These methods are responsible for
- # using the given templar to template the values, if required.
- method = getattr(self, '_post_validate_%s' % name, None)
- if method:
- value = method(attribute, getattr(self, name), templar)
- elif attribute.isa == 'class':
- value = getattr(self, name)
- else:
+ # DTFIX-FUTURE: this can probably be used in many getattr cases below, but the value may be out-of-date in some cases
+ original_value = getattr(self, name) # we save this original (likely Origin-tagged) value to pass as `obj` for errors
+
+ if attribute.static:
+ value = getattr(self, name)
+
+ # we don't template 'vars' but allow template as values for later use
+ if name not in ('vars',) and templar.is_template(value):
+ display.warning('"%s" is not templatable, but we found: %s, '
+ 'it will not be templated and will be used "as is".' % (name, value))
+ return Sentinel
+
+ if getattr(self, name) is None:
+ if not attribute.required:
+ return Sentinel
+
+ raise AnsibleFieldAttributeError(f'The field {name!r} is required but was not set.', obj=self.get_ds())
+
+ from .role_include import IncludeRole
+
+ if not attribute.always_post_validate and isinstance(self, IncludeRole) and self.statically_loaded: # import_role
+ # normal field attributes should not go through post validation on import_role/import_tasks
+ # only import_role is checked here because import_tasks never reaches this point
+ return Sentinel
+
+ # Skip post validation unless always_post_validate is True, or the object requires post validation.
+ if not attribute.always_post_validate and not self._post_validate_object:
+ # Intermediate objects like Play() won't have their fields validated by
+ # default, as their values are often inherited by other objects and validated
+ # later, so we don't want them to fail out early
+ return Sentinel
+
+ try:
+ # Run the post-validator if present. These methods are responsible for
+ # using the given templar to template the values, if required.
+ method = getattr(self, '_post_validate_%s' % name, None)
+
+ if method:
+ value = method(attribute, getattr(self, name), templar)
+ elif attribute.isa == 'class':
+ value = getattr(self, name)
+ else:
+ try:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
+ except AnsibleValueOmittedError:
+ # If this evaluated to the omit value, set the value back to inherited by context
+ # or default specified in the FieldAttribute and move on
+ value = self.set_to_context(name)
- # If this evaluated to the omit value, set the value back to inherited by context
- # or default specified in the FieldAttribute and move on
- if omit_value is not None and value == omit_value:
- self.set_to_context(name)
- continue
+ if value is Sentinel:
+ return value
- # and make sure the attribute is of the type it should be
- if value is not None:
- value = self.get_validated_value(name, attribute, value, templar)
+ # and make sure the attribute is of the type it should be
+ if value is not None:
+ value = self.get_validated_value(name, attribute, value, templar)
- # and assign the massaged value back to the attribute field
- setattr(self, name, value)
- except (TypeError, ValueError) as e:
- value = getattr(self, name)
- raise AnsibleParserError(f"the field '{name}' has an invalid value ({value!r}), and could not be converted to {attribute.isa}.",
- obj=self.get_ds(), orig_exc=e)
- except (AnsibleUndefinedVariable, UndefinedError) as e:
- if templar._fail_on_undefined_errors and name != 'name':
- if name == 'args':
- msg = "The task includes an option with an undefined variable."
- else:
- msg = f"The field '{name}' has an invalid value, which includes an undefined variable."
- raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e)
+ # returning the value results in assigning the massaged value back to the attribute field
+ return value
+ except Exception as ex:
+ if name == 'args':
+ raise # no useful information to contribute, raise the original exception
- self._finalized = True
+ raise AnsibleFieldAttributeError(f'Error processing keyword {name!r}.', obj=original_value) from ex
def _load_vars(self, attr, ds):
"""
Vars in a play must be specified as a dictionary.
"""
- def _validate_variable_keys(ds):
- for key in ds:
- if not isidentifier(key):
- raise TypeError("'%s' is not a valid variable name" % key)
-
try:
if isinstance(ds, dict):
- _validate_variable_keys(ds)
+ for key in ds:
+ validate_variable_name(key)
return combine_vars(self.vars, ds)
elif ds is None:
return {}
else:
raise ValueError
- except ValueError as e:
- raise AnsibleParserError("Vars in a %s must be specified as a dictionary" % self.__class__.__name__,
- obj=ds, orig_exc=e)
- except TypeError as e:
- raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds, orig_exc=e)
+ except ValueError as ex:
+ raise AnsibleParserError(f"Vars in a {self.__class__.__name__} must be specified as a dictionary.", obj=ds) from ex
+ except TypeError as ex:
+ raise AnsibleParserError(f"Invalid variable name in vars specified for {self.__class__.__name__}.", obj=ds) from ex
def _extend_value(self, value, new_value, prepend=False):
"""
@@ -654,6 +679,8 @@ class FieldAttributeBase:
setattr(self, attr, obj)
else:
setattr(self, attr, value)
+ else:
+ setattr(self, attr, value) # overridden dump_attrs in derived types may dump attributes which are not field attributes
# from_attrs is only used to create a finalized task
# from attrs from the Worker/TaskExecutor
@@ -713,7 +740,7 @@ class Base(FieldAttributeBase):
remote_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('remote_user'))
# variables
- vars = NonInheritableFieldAttribute(isa='dict', priority=100, static=True)
+ vars = NonInheritableFieldAttribute(isa='dict', priority=100, static=True, default=dict)
# module default params
module_defaults = FieldAttribute(isa='list', extend=True, prepend=True)
@@ -743,17 +770,43 @@ class Base(FieldAttributeBase):
# used to hold sudo/su stuff
DEPRECATED_ATTRIBUTES = [] # type: list[str]
- def get_path(self):
+ def update_result_no_log(self, templar: TemplateEngine, result: dict[str, t.Any]) -> None:
+ """Set the post-validated no_log value for the result, falling back to a default on validation/templating failure with a warning."""
+
+ if self.finalized:
+ no_log = self.no_log
+ else:
+ try:
+ no_log = self.post_validate_attribute('no_log', templar=templar)
+ except Exception as ex:
+ display.error_as_warning('Invalid no_log value for task, output will be masked.', exception=ex)
+ no_log = True
+
+ result_no_log = result.get('_ansible_no_log', False)
+
+ if not isinstance(result_no_log, bool):
+ display.warning(f'Invalid _ansible_no_log value of type {type(result_no_log).__name__!r} in task result, output will be masked.')
+ no_log = True
+
+ no_log = no_log or result_no_log
+
+ result.update(_ansible_no_log=no_log)
+
+ def get_path(self) -> str:
""" return the absolute path of the playbook object and its line number """
+ origin = self._origin
- path = ""
- try:
- path = "%s:%s" % (self._ds._data_source, self._ds._line_number)
- except AttributeError:
+ if not origin:
try:
- path = "%s:%s" % (self._parent._play._ds._data_source, self._parent._play._ds._line_number)
+ origin = self._parent._play._origin
except AttributeError:
pass
+
+ if origin and origin.path:
+ path = f"{origin.path}:{origin.line_num or 1}"
+ else:
+ path = ""
+
return path
def get_dep_chain(self):
diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py
index f7dd8994e2e..a47bdc31e45 100644
--- a/lib/ansible/playbook/block.py
+++ b/lib/ansible/playbook/block.py
@@ -113,6 +113,8 @@ class Block(Base, Conditional, CollectionSearch, Taggable, Notifiable, Delegatab
return super(Block, self).preprocess_data(ds)
+ # FIXME: these do nothing but augment the exception message; DRY and nuke
+
def _load_block(self, attr, ds):
try:
return load_list_of_tasks(
@@ -125,8 +127,8 @@ class Block(Base, Conditional, CollectionSearch, Taggable, Notifiable, Delegatab
loader=self._loader,
use_handlers=self._use_handlers,
)
- except AssertionError as e:
- raise AnsibleParserError("A malformed block was encountered while loading a block", obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed block was encountered while loading a block", obj=self._ds) from ex
def _load_rescue(self, attr, ds):
try:
@@ -140,8 +142,8 @@ class Block(Base, Conditional, CollectionSearch, Taggable, Notifiable, Delegatab
loader=self._loader,
use_handlers=self._use_handlers,
)
- except AssertionError as e:
- raise AnsibleParserError("A malformed block was encountered while loading rescue.", obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed block was encountered while loading rescue.", obj=self._ds) from ex
def _load_always(self, attr, ds):
try:
@@ -155,8 +157,8 @@ class Block(Base, Conditional, CollectionSearch, Taggable, Notifiable, Delegatab
loader=self._loader,
use_handlers=self._use_handlers,
)
- except AssertionError as e:
- raise AnsibleParserError("A malformed block was encountered while loading always", obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed block was encountered while loading always", obj=self._ds) from ex
def _validate_always(self, attr, name, value):
if value and not self.block:
@@ -177,7 +179,7 @@ class Block(Base, Conditional, CollectionSearch, Taggable, Notifiable, Delegatab
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
- new_task = task.copy(exclude_parent=True)
+ new_task = task.copy(exclude_parent=True, exclude_tasks=exclude_tasks)
if task._parent:
new_task._parent = task._parent.copy(exclude_tasks=True)
if task._parent == new_block:
diff --git a/lib/ansible/playbook/collectionsearch.py b/lib/ansible/playbook/collectionsearch.py
index c6ab50907bf..d5bc9450ef2 100644
--- a/lib/ansible/playbook/collectionsearch.py
+++ b/lib/ansible/playbook/collectionsearch.py
@@ -6,11 +6,8 @@ from __future__ import annotations
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.utils.collection_loader import AnsibleCollectionConfig
-from ansible.template import is_template
from ansible.utils.display import Display
-from jinja2.nativetypes import NativeEnvironment
-
display = Display()
@@ -35,8 +32,7 @@ def _ensure_default_collection(collection_list=None):
class CollectionSearch:
# this needs to be populated before we can resolve tasks/roles/etc
- collections = FieldAttribute(isa='list', listof=string_types, priority=100, default=_ensure_default_collection,
- always_post_validate=True, static=True)
+ collections = FieldAttribute(isa='list', listof=string_types, priority=100, default=_ensure_default_collection, always_post_validate=True, static=True)
def _load_collections(self, attr, ds):
# We are always a mixin with Base, so we can validate this untemplated
@@ -49,14 +45,4 @@ class CollectionSearch:
if not ds: # don't return an empty collection list, just return None
return None
- # This duplicates static attr checking logic from post_validate()
- # because if the user attempts to template a collection name, it may
- # error before it ever gets to the post_validate() warning (e.g. trying
- # to import a role from the collection).
- env = NativeEnvironment()
- for collection_name in ds:
- if is_template(collection_name, env):
- display.warning('"collections" is not templatable, but we found: %s, '
- 'it will not be templated and will be used "as is".' % (collection_name))
-
return ds
diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
index 21a9cf4c17c..ac59259acb3 100644
--- a/lib/ansible/playbook/conditional.py
+++ b/lib/ansible/playbook/conditional.py
@@ -17,12 +17,7 @@
from __future__ import annotations
-import typing as t
-
-from ansible.errors import AnsibleError, AnsibleUndefinedVariable
-from ansible.module_utils.common.text.converters import to_native
from ansible.playbook.attribute import FieldAttribute
-from ansible.template import Templar
from ansible.utils.display import Display
display = Display()
@@ -36,78 +31,9 @@ class Conditional:
when = FieldAttribute(isa='list', default=list, extend=True, prepend=True)
- def __init__(self, loader=None):
- # when used directly, this class needs a loader, but we want to
- # make sure we don't trample on the existing one if this class
- # is used as a mix-in with a playbook base class
- if not hasattr(self, '_loader'):
- if loader is None:
- raise AnsibleError("a loader must be specified when using Conditional() directly")
- else:
- self._loader = loader
+ def __init__(self, *args, **kwargs):
super().__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [value])
-
- def evaluate_conditional(self, templar: Templar, all_vars: dict[str, t.Any]) -> bool:
- """
- Loops through the conditionals set on this object, returning
- False if any of them evaluate as such.
- """
- return self.evaluate_conditional_with_result(templar, all_vars)[0]
-
- def evaluate_conditional_with_result(self, templar: Templar, all_vars: dict[str, t.Any]) -> tuple[bool, t.Optional[str]]:
- """Loops through the conditionals set on this object, returning
- False if any of them evaluate as such as well as the condition
- that was false.
- """
- for conditional in self.when:
- if conditional is None or conditional == "":
- res = True
- elif isinstance(conditional, bool):
- res = conditional
- else:
- try:
- res = self._check_conditional(conditional, templar, all_vars)
- except AnsibleError as e:
- raise AnsibleError(
- "The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)),
- obj=getattr(self, '_ds', None)
- )
-
- display.debug("Evaluated conditional (%s): %s" % (conditional, res))
- if not res:
- return res, conditional
-
- return True, None
-
- def _check_conditional(self, conditional: str, templar: Templar, all_vars: dict[str, t.Any]) -> bool:
- original = conditional
- templar.available_variables = all_vars
- try:
- if templar.is_template(conditional):
- display.warning(
- "conditional statements should not include jinja2 "
- "templating delimiters such as {{ }} or {%% %%}. "
- "Found: %s" % conditional
- )
- conditional = templar.template(conditional)
- if isinstance(conditional, bool):
- return conditional
- elif conditional == "":
- return False
-
- # If the result of the first-pass template render (to resolve inline templates) is marked unsafe,
- # explicitly disable lookups on the final pass to prevent evaluation of untrusted content in the
- # constructed template.
- disable_lookups = hasattr(conditional, '__UNSAFE__')
-
- # NOTE The spaces around True and False are intentional to short-circuit literal_eval for
- # jinja2_native=False and avoid its expensive calls.
- return templar.template(
- "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional,
- disable_lookups=disable_lookups).strip() == "True"
- except AnsibleUndefinedVariable as e:
- raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
diff --git a/lib/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py
index 6686d4f2423..f4d7a82a8ec 100644
--- a/lib/ansible/playbook/helpers.py
+++ b/lib/ansible/playbook/helpers.py
@@ -21,9 +21,9 @@ import os
from ansible import constants as C
from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
-from ansible.module_utils.common.text.converters import to_native
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.utils.display import Display
+from ansible._internal._templating._engine import TemplateEngine
display = Display()
@@ -92,7 +92,6 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.playbook.handler_task_include import HandlerTaskInclude
- from ansible.template import Templar
if not isinstance(ds, list):
raise AnsibleAssertionError('The ds (%s) should be a list but was a %s' % (ds, type(ds)))
@@ -105,7 +104,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
if 'block' in task_ds:
if use_handlers:
raise AnsibleParserError("Using a block as a handler is not supported.", obj=task_ds)
- t = Block.load(
+ task = Block.load(
task_ds,
play=play,
parent_block=block,
@@ -115,18 +114,20 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
variable_manager=variable_manager,
loader=loader,
)
- task_list.append(t)
+ task_list.append(task)
else:
args_parser = ModuleArgsParser(task_ds)
try:
(action, args, delegate_to) = args_parser.parse(skip_action_validation=True)
- except AnsibleParserError as e:
+ except AnsibleParserError as ex:
# if the raises exception was created with obj=ds args, then it includes the detail
# so we dont need to add it so we can just re raise.
- if e.obj:
+ if ex.obj is not None:
raise
# But if it wasn't, we can add the yaml object now to get more detail
- raise AnsibleParserError(to_native(e), obj=task_ds, orig_exc=e)
+ # DTFIX-FUTURE: this *should* be unnecessary- check code coverage.
+ # Will definitely be unnecessary once we have proper contexts to consult.
+ raise AnsibleParserError("Error loading tasks.", obj=task_ds) from ex
if action in C._ACTION_ALL_INCLUDE_IMPORT_TASKS:
@@ -135,7 +136,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
else:
include_class = TaskInclude
- t = include_class.load(
+ task = include_class.load(
task_ds,
block=block,
role=role,
@@ -144,16 +145,16 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
loader=loader
)
- all_vars = variable_manager.get_vars(play=play, task=t)
- templar = Templar(loader=loader, variables=all_vars)
+ all_vars = variable_manager.get_vars(play=play, task=task)
+ templar = TemplateEngine(loader=loader, variables=all_vars)
# check to see if this include is dynamic or static:
if action in C._ACTION_IMPORT_TASKS:
- if t.loop is not None:
+ if task.loop is not None:
raise AnsibleParserError("You cannot use loops on 'import_tasks' statements. You should use 'include_tasks' instead.", obj=task_ds)
# we set a flag to indicate this include was static
- t.statically_loaded = True
+ task.statically_loaded = True
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
@@ -168,26 +169,14 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
if not isinstance(parent_include, TaskInclude):
parent_include = parent_include._parent
continue
- try:
- parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
- except AnsibleUndefinedVariable as e:
- if not parent_include.statically_loaded:
- raise AnsibleParserError(
- "Error when evaluating variable in dynamic parent include path: %s. "
- "When using static imports, the parent dynamic include cannot utilize host facts "
- "or variables from inventory" % parent_include.args.get('_raw_params'),
- obj=task_ds,
- suppress_extended_error=True,
- orig_exc=e
- )
- raise
+ parent_include_dir = os.path.dirname(parent_include.args.get('_raw_params'))
if cumulative_path is None:
cumulative_path = parent_include_dir
elif not os.path.isabs(cumulative_path):
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
- include_target = templar.template(t.args['_raw_params'])
- if t._role:
- new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path)
+ include_target = templar.template(task.args['_raw_params'])
+ if task._role:
+ new_basedir = os.path.join(task._role._role_path, subdir, cumulative_path)
include_file = loader.path_dwim_relative(new_basedir, subdir, include_target)
else:
include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
@@ -200,22 +189,21 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
if not found:
try:
- include_target = templar.template(t.args['_raw_params'])
- except AnsibleUndefinedVariable as e:
+ include_target = templar.template(task.args['_raw_params'])
+ except AnsibleUndefinedVariable as ex:
raise AnsibleParserError(
- "Error when evaluating variable in import path: %s.\n\n"
- "When using static imports, ensure that any variables used in their names are defined in vars/vars_files\n"
+ message=f"Error when evaluating variable in import path {task.args['_raw_params']!r}.",
+ help_text="When using static imports, ensure that any variables used in their names are defined in vars/vars_files\n"
"or extra-vars passed in from the command line. Static imports cannot use variables from facts or inventory\n"
- "sources like group or host vars." % t.args['_raw_params'],
+ "sources like group or host vars.",
obj=task_ds,
- suppress_extended_error=True,
- orig_exc=e)
- if t._role:
- include_file = loader.path_dwim_relative(t._role._role_path, subdir, include_target)
+ ) from ex
+ if task._role:
+ include_file = loader.path_dwim_relative(task._role._role_path, subdir, include_target)
else:
include_file = loader.path_dwim(include_target)
- data = loader.load_from_file(include_file)
+ data = loader.load_from_file(include_file, trusted_as_template=True)
if not data:
display.warning('file %s is empty and had no tasks to include' % include_file)
continue
@@ -228,7 +216,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
# nested includes, and we want the include order printed correctly
display.vv("statically imported: %s" % include_file)
- ti_copy = t.copy(exclude_parent=True)
+ ti_copy = task.copy(exclude_parent=True)
ti_copy._parent = block
included_blocks = load_list_of_blocks(
data,
@@ -246,7 +234,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
# now we extend the tags on each of the included blocks
for b in included_blocks:
b.tags = list(set(b.tags).union(tags))
- # END FIXME
+ # FIXME - END
# FIXME: handlers shouldn't need this special handling, but do
# right now because they don't iterate blocks correctly
@@ -256,7 +244,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
else:
task_list.extend(included_blocks)
else:
- task_list.append(t)
+ task_list.append(task)
elif action in C._ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES:
if use_handlers:
@@ -280,7 +268,7 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
# template the role name now, if needed
all_vars = variable_manager.get_vars(play=play, task=ir)
- templar = Templar(loader=loader, variables=all_vars)
+ templar = TemplateEngine(loader=loader, variables=all_vars)
ir.post_validate(templar=templar)
ir._role_name = templar.template(ir._role_name)
@@ -292,15 +280,15 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h
task_list.append(ir)
else:
if use_handlers:
- t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
- if t.action in C._ACTION_META and t.args.get('_raw_params') == "end_role":
- raise AnsibleParserError("Cannot execute 'end_role' from a handler")
+ task = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
+ if task._get_meta() == "end_role":
+ raise AnsibleParserError("Cannot execute 'end_role' from a handler", obj=task)
else:
- t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
- if t.action in C._ACTION_META and t.args.get('_raw_params') == "end_role" and role is None:
- raise AnsibleParserError("Cannot execute 'end_role' from outside of a role")
+ task = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
+ if task._get_meta() == "end_role" and role is None:
+ raise AnsibleParserError("Cannot execute 'end_role' from outside of a role", obj=task)
- task_list.append(t)
+ task_list.append(task)
return task_list
diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py
index d2fdb76364d..ace611d86f2 100644
--- a/lib/ansible/playbook/included_file.py
+++ b/lib/ansible/playbook/included_file.py
@@ -21,35 +21,42 @@ import os
from ansible import constants as C
from ansible.errors import AnsibleError
-from ansible.executor.task_executor import remove_omit
+from ansible.executor.task_result import _RawTaskResult
+from ansible.inventory.host import Host
from ansible.module_utils.common.text.converters import to_text
+from ansible.parsing.dataloader import DataLoader
from ansible.playbook.handler import Handler
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
-from ansible.template import Templar
+from ansible._internal._templating._engine import TemplateEngine
from ansible.utils.display import Display
+from ansible.vars.manager import VariableManager
display = Display()
class IncludedFile:
- def __init__(self, filename, args, vars, task, is_role=False):
+ def __init__(self, filename, args, vars, task, is_role: bool = False) -> None:
self._filename = filename
self._args = args
self._vars = vars
self._task = task
- self._hosts = []
+ self._hosts: list[Host] = []
self._is_role = is_role
- self._results = []
+ self._results: list[_RawTaskResult] = []
- def add_host(self, host):
+ def add_host(self, host: Host) -> None:
if host not in self._hosts:
self._hosts.append(host)
return
+
raise ValueError()
def __eq__(self, other):
+ if not isinstance(other, IncludedFile):
+ return False
+
return (other._filename == self._filename and
other._args == self._args and
other._vars == self._vars and
@@ -60,23 +67,28 @@ class IncludedFile:
return "%s (args=%s vars=%s): %s" % (self._filename, self._args, self._vars, self._hosts)
@staticmethod
- def process_include_results(results, iterator, loader, variable_manager):
- included_files = []
- task_vars_cache = {}
+ def process_include_results(
+ results: list[_RawTaskResult],
+ iterator,
+ loader: DataLoader,
+ variable_manager: VariableManager,
+ ) -> list[IncludedFile]:
+ included_files: list[IncludedFile] = []
+ task_vars_cache: dict[tuple, dict] = {}
for res in results:
- original_host = res._host
- original_task = res._task
+ original_host = res.host
+ original_task = res.task
if original_task.action in C._ACTION_ALL_INCLUDES:
if original_task.loop:
- if 'results' not in res._result:
+ if 'results' not in res._return_data:
continue
- include_results = res._result['results']
+ include_results = res._loop_results
else:
- include_results = [res._result]
+ include_results = [res._return_data]
for include_result in include_results:
# if the task result was skipped or failed, continue
@@ -114,7 +126,7 @@ class IncludedFile:
if loader.get_basedir() not in task_vars['ansible_search_path']:
task_vars['ansible_search_path'].append(loader.get_basedir())
- templar = Templar(loader=loader, variables=task_vars)
+ templar = TemplateEngine(loader=loader, variables=task_vars)
if original_task.action in C._ACTION_INCLUDE_TASKS:
include_file = None
@@ -132,6 +144,8 @@ class IncludedFile:
parent_include_dir = parent_include._role_path
else:
try:
+ # FUTURE: Since the parent include path has already been resolved, it should be used here.
+ # Unfortunately it's not currently stored anywhere, so it must be calculated again.
parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
except AnsibleError as e:
parent_include_dir = ''
@@ -144,7 +158,7 @@ class IncludedFile:
cumulative_path = os.path.join(parent_include_dir, cumulative_path)
else:
cumulative_path = parent_include_dir
- include_target = templar.template(include_result['include'])
+ include_target = include_result['include']
if original_task._role:
dirname = 'handlers' if isinstance(original_task, Handler) else 'tasks'
new_basedir = os.path.join(original_task._role._role_path, dirname, cumulative_path)
@@ -170,7 +184,7 @@ class IncludedFile:
if include_file is None:
if original_task._role:
- include_target = templar.template(include_result['include'])
+ include_target = include_result['include']
include_file = loader.path_dwim_relative(
original_task._role._role_path,
'handlers' if isinstance(original_task, Handler) else 'tasks',
@@ -179,25 +193,17 @@ class IncludedFile:
else:
include_file = loader.path_dwim(include_result['include'])
- include_file = templar.template(include_file)
inc_file = IncludedFile(include_file, include_args, special_vars, original_task)
else:
# template the included role's name here
role_name = include_args.pop('name', include_args.pop('role', None))
- if role_name is not None:
- role_name = templar.template(role_name)
-
new_task = original_task.copy()
new_task.post_validate(templar=templar)
new_task._role_name = role_name
for from_arg in new_task.FROM_ARGS:
if from_arg in include_args:
from_key = from_arg.removesuffix('_from')
- new_task._from_files[from_key] = templar.template(include_args.pop(from_arg))
-
- omit_token = task_vars.get('omit')
- if omit_token:
- new_task._from_files = remove_omit(new_task._from_files, omit_token)
+ new_task._from_files[from_key] = include_args.pop(from_arg)
inc_file = IncludedFile(role_name, include_args, special_vars, new_task, is_role=True)
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index a76365bfcc3..461a0a39258 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -19,8 +19,8 @@ from __future__ import annotations
from ansible import constants as C
from ansible import context
-from ansible.errors import AnsibleParserError, AnsibleAssertionError, AnsibleError
-from ansible.module_utils.common.text.converters import to_native
+from ansible.errors import AnsibleError
+from ansible.errors import AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.six import binary_type, string_types, text_type
from ansible.playbook.attribute import NonInheritableFieldAttribute
@@ -31,7 +31,7 @@ from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.playbook.taggable import Taggable
-from ansible.vars.manager import preprocess_vars
+from ansible.parsing.vault import EncryptedString
from ansible.utils.display import Display
display = Display()
@@ -123,7 +123,7 @@ class Play(Base, Taggable, CollectionSearch):
elif not isinstance(entry, (binary_type, text_type)):
raise AnsibleParserError("Hosts list contains an invalid host value: '{host!s}'".format(host=entry))
- elif not isinstance(value, (binary_type, text_type)):
+ elif not isinstance(value, (binary_type, text_type, EncryptedString)):
raise AnsibleParserError("Hosts list must be a sequence or string. Please check your playbook.")
def get_name(self):
@@ -168,6 +168,8 @@ class Play(Base, Taggable, CollectionSearch):
return super(Play, self).preprocess_data(ds)
+ # DTFIX-FUTURE: these do nothing but augment the exception message; DRY and nuke
+
def _load_tasks(self, attr, ds):
"""
Loads a list of blocks from a list which may be mixed tasks/blocks.
@@ -175,8 +177,8 @@ class Play(Base, Taggable, CollectionSearch):
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
- except AssertionError as e:
- raise AnsibleParserError("A malformed block was encountered while loading tasks: %s" % to_native(e), obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed block was encountered while loading tasks.", obj=self._ds) from ex
def _load_pre_tasks(self, attr, ds):
"""
@@ -185,8 +187,8 @@ class Play(Base, Taggable, CollectionSearch):
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
- except AssertionError as e:
- raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed block was encountered while loading pre_tasks.", obj=self._ds) from ex
def _load_post_tasks(self, attr, ds):
"""
@@ -195,8 +197,8 @@ class Play(Base, Taggable, CollectionSearch):
"""
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
- except AssertionError as e:
- raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed block was encountered while loading post_tasks.", obj=self._ds) from ex
def _load_handlers(self, attr, ds):
"""
@@ -209,8 +211,8 @@ class Play(Base, Taggable, CollectionSearch):
load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
prepend=True
)
- except AssertionError as e:
- raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed block was encountered while loading handlers.", obj=self._ds) from ex
def _load_roles(self, attr, ds):
"""
@@ -224,8 +226,8 @@ class Play(Base, Taggable, CollectionSearch):
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager,
loader=self._loader, collection_search_list=self.collections)
- except AssertionError as e:
- raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds) from ex
roles = []
for ri in role_includes:
@@ -236,6 +238,9 @@ class Play(Base, Taggable, CollectionSearch):
return self.roles
def _load_vars_prompt(self, attr, ds):
+ # avoid circular dep
+ from ansible.vars.manager import preprocess_vars
+
new_ds = preprocess_vars(ds)
vars_prompts = []
if new_ds is not None:
@@ -296,7 +301,7 @@ class Play(Base, Taggable, CollectionSearch):
# of the playbook execution
flush_block = Block(play=self)
- t = Task()
+ t = Task(block=flush_block)
t.action = 'meta'
t.resolved_action = 'ansible.builtin.meta'
t.args['_raw_params'] = 'flush_handlers'
@@ -316,6 +321,9 @@ class Play(Base, Taggable, CollectionSearch):
else:
flush_block.block = [t]
+ # NOTE keep flush_handlers tasks even if a section has no regular tasks,
+ # there may be notified handlers from the previous section
+ # (typically when a handler notifies a handler defined before)
block_list = []
if self.force_handlers:
noop_task = Task()
@@ -325,18 +333,33 @@ class Play(Base, Taggable, CollectionSearch):
noop_task.set_loader(self._loader)
b = Block(play=self)
- b.block = self.pre_tasks or [noop_task]
+ if self.pre_tasks:
+ b.block = self.pre_tasks
+ else:
+ nt = noop_task.copy(exclude_parent=True)
+ nt._parent = b
+ b.block = [nt]
b.always = [flush_block]
block_list.append(b)
tasks = self._compile_roles() + self.tasks
b = Block(play=self)
- b.block = tasks or [noop_task]
+ if tasks:
+ b.block = tasks
+ else:
+ nt = noop_task.copy(exclude_parent=True)
+ nt._parent = b
+ b.block = [nt]
b.always = [flush_block]
block_list.append(b)
b = Block(play=self)
- b.block = self.post_tasks or [noop_task]
+ if self.post_tasks:
+ b.block = self.post_tasks
+ else:
+ nt = noop_task.copy(exclude_parent=True)
+ nt._parent = b
+ b.block = [nt]
b.always = [flush_block]
block_list.append(b)
diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py
index e384ce0fb2f..42ffa56a153 100644
--- a/lib/ansible/playbook/play_context.py
+++ b/lib/ansible/playbook/play_context.py
@@ -71,6 +71,8 @@ class PlayContext(Base):
connection/authentication information.
"""
+ _post_validate_object = True
+
# base
module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
shell = FieldAttribute(isa='string')
@@ -88,7 +90,7 @@ class PlayContext(Base):
# networking modules
network_os = FieldAttribute(isa='string')
- # docker FIXME: remove these
+ # FIXME: docker - remove these
docker_extra_args = FieldAttribute(isa='string')
# ???
@@ -103,10 +105,6 @@ class PlayContext(Base):
become_flags = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_FLAGS)
prompt = FieldAttribute(isa='string')
- # general flags
- only_tags = FieldAttribute(isa='set', default=set)
- skip_tags = FieldAttribute(isa='set', default=set)
-
start_at_task = FieldAttribute(isa='string')
step = FieldAttribute(isa='bool', default=False)
@@ -201,8 +199,7 @@ class PlayContext(Base):
# In the case of a loop, the delegated_to host may have been
# templated based on the loop variable, so we try and locate
# the host name in the delegated variable dictionary here
- delegated_host_name = templar.template(task.delegate_to)
- delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
+ delegated_vars = variables.get('ansible_delegated_vars', dict()).get(task.delegate_to, dict())
delegated_transport = C.DEFAULT_TRANSPORT
for transport_var in C.MAGIC_VARIABLE_MAPPING.get('connection'):
@@ -218,8 +215,8 @@ class PlayContext(Base):
if address_var in delegated_vars:
break
else:
- display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
- delegated_vars['ansible_host'] = delegated_host_name
+ display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % task.delegate_to)
+ delegated_vars['ansible_host'] = task.delegate_to
# reset the port back to the default if none was specified, to prevent
# the delegated host from inheriting the original host's setting
diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
index 8e7c6c05082..4ec9090de65 100644
--- a/lib/ansible/playbook/playbook_include.py
+++ b/lib/ansible/playbook/playbook_include.py
@@ -19,28 +19,35 @@ from __future__ import annotations
import os
-import ansible.constants as C
-from ansible.errors import AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.common.text.converters import to_bytes
-from ansible.module_utils.six import string_types
-from ansible.parsing.splitter import split_args
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
-from ansible.template import Templar
-from ansible.utils.display import Display
-
-display = Display()
+from ansible._internal._templating._engine import TemplateEngine
+from ansible.errors import AnsibleError
+from ansible import constants as C
class PlaybookInclude(Base, Conditional, Taggable):
- import_playbook = NonInheritableFieldAttribute(isa='string')
- vars_val = NonInheritableFieldAttribute(isa='dict', default=dict, alias='vars')
+ import_playbook = NonInheritableFieldAttribute(isa='string', required=True)
+
+ _post_validate_object = True # manually post_validate to get free arg validation/coercion
+
+ def preprocess_data(self, ds):
+ keys = {action for action in C._ACTION_IMPORT_PLAYBOOK if action in ds}
+
+ if len(keys) != 1:
+ raise AnsibleError(f'Found conflicting import_playbook actions: {", ".join(sorted(keys))}')
+
+ key = next(iter(keys))
+
+ ds['import_playbook'] = ds.pop(key)
+
+ return ds
@staticmethod
def load(data, basedir, variable_manager=None, loader=None):
@@ -62,18 +69,22 @@ class PlaybookInclude(Base, Conditional, Taggable):
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
all_vars = self.vars.copy()
+
if variable_manager:
all_vars |= variable_manager.get_vars()
- templar = Templar(loader=loader, variables=all_vars)
+ templar = TemplateEngine(loader=loader, variables=all_vars)
+
+ new_obj.post_validate(templar)
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
- file_name = templar.template(new_obj.import_playbook)
+ file_name = new_obj.import_playbook
# check for FQCN
resource = _get_collection_playbook_path(file_name)
+
if resource is not None:
playbook = resource[1]
playbook_collection = resource[2]
@@ -92,6 +103,7 @@ class PlaybookInclude(Base, Conditional, Taggable):
else:
# it is NOT a collection playbook, setup adjacent paths
AnsibleCollectionConfig.playbook_paths.append(os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict'))))
+ # broken, see: https://github.com/ansible/ansible/issues/85357
pb._load_playbook_data(file_name=playbook, variable_manager=variable_manager, vars=self.vars.copy())
@@ -120,50 +132,3 @@ class PlaybookInclude(Base, Conditional, Taggable):
task_block._when = new_obj.when[:] + task_block.when[:]
return pb
-
- def preprocess_data(self, ds):
- """
- Reorganizes the data for a PlaybookInclude datastructure to line
- up with what we expect the proper attributes to be
- """
-
- if not isinstance(ds, dict):
- raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
-
- # the new, cleaned datastructure, which will have legacy
- # items reduced to a standard structure
- new_ds = AnsibleMapping()
- if isinstance(ds, AnsibleBaseYAMLObject):
- new_ds.ansible_pos = ds.ansible_pos
-
- for (k, v) in ds.items():
- if k in C._ACTION_IMPORT_PLAYBOOK:
- self._preprocess_import(ds, new_ds, k, v)
- else:
- # some basic error checking, to make sure vars are properly
- # formatted and do not conflict with k=v parameters
- if k == 'vars':
- if 'vars' in new_ds:
- raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
- elif not isinstance(v, dict):
- raise AnsibleParserError("vars for import_playbook statements must be specified as a dictionary", obj=ds)
- new_ds[k] = v
-
- return super(PlaybookInclude, self).preprocess_data(new_ds)
-
- def _preprocess_import(self, ds, new_ds, k, v):
- """
- Splits the playbook import line up into filename and parameters
- """
- if v is None:
- raise AnsibleParserError("playbook import parameter is missing", obj=ds)
- elif not isinstance(v, string_types):
- raise AnsibleParserError("playbook import parameter must be a string indicating a file path, got %s instead" % type(v), obj=ds)
-
- # The import_playbook line must include at least one item, which is the filename
- # to import. Anything after that should be regarded as a parameter to the import
- items = split_args(v)
- if len(items) == 0:
- raise AnsibleParserError("import_playbook statements must specify the file name to import", obj=ds)
-
- new_ds['import_playbook'] = items[0].strip()
diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
index 0887a77d7ab..a86bcd9234a 100644
--- a/lib/ansible/playbook/role/__init__.py
+++ b/lib/ansible/playbook/role/__init__.py
@@ -18,6 +18,7 @@
from __future__ import annotations
import os
+import typing as _t
from collections.abc import Container, Mapping, Set, Sequence
from types import MappingProxyType
@@ -27,7 +28,6 @@ from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionErr
from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import binary_type, text_type
-from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.conditional import Conditional
@@ -40,6 +40,16 @@ from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.path import is_subpath
from ansible.utils.vars import combine_vars
+# NOTE: This import is only needed for the type-checking in __init__. While there's an alternative
+# available by using forward references this seems not to work well with commonly used IDEs.
+# Therefore the TYPE_CHECKING hack seems to be a more universal approach, even if not being very elegant.
+# References:
+# * https://stackoverflow.com/q/39740632/199513
+# * https://peps.python.org/pep-0484/#forward-references
+if _t.TYPE_CHECKING:
+ from ansible.playbook.block import Block
+ from ansible.playbook.play import Play
+
__all__ = ['Role', 'hash_params']
# TODO: this should be a utility function, but can't be a member of
@@ -98,13 +108,19 @@ def hash_params(params):
class Role(Base, Conditional, Taggable, CollectionSearch, Delegatable):
- def __init__(self, play=None, from_files=None, from_include=False, validate=True, public=None, static=True):
- self._role_name = None
- self._role_path = None
- self._role_collection = None
- self._role_params = dict()
+ def __init__(self,
+ play: Play = None,
+ from_files: dict[str, list[str]] = None,
+ from_include: bool = False,
+ validate: bool = True,
+ public: bool = None,
+ static: bool = True) -> None:
+ self._role_name: str = None
+ self._role_path: str = None
+ self._role_collection: str = None
+ self._role_params: dict[str, dict[str, str]] = dict()
self._loader = None
- self.static = static
+ self.static: bool = static
# includes (static=false) default to private, while imports (static=true) default to public
# but both can be overridden by global config if set
@@ -117,26 +133,26 @@ class Role(Base, Conditional, Taggable, CollectionSearch, Delegatable):
else:
self.public = public
- self._metadata = RoleMetadata()
- self._play = play
- self._parents = []
- self._dependencies = []
- self._all_dependencies = None
- self._task_blocks = []
- self._handler_blocks = []
- self._compiled_handler_blocks = None
- self._default_vars = dict()
- self._role_vars = dict()
- self._had_task_run = dict()
- self._completed = dict()
- self._should_validate = validate
+ self._metadata: RoleMetadata = RoleMetadata()
+ self._play: Play = play
+ self._parents: list[Role] = []
+ self._dependencies: list[Role] = []
+ self._all_dependencies: list[Role] | None = None
+ self._task_blocks: list[Block] = []
+ self._handler_blocks: list[Block] = []
+ self._compiled_handler_blocks: list[Block] | None = None
+ self._default_vars: dict[str, str] | None = dict()
+ self._role_vars: dict[str, str] | None = dict()
+ self._had_task_run: dict[str, bool] = dict()
+ self._completed: dict[str, bool] = dict()
+ self._should_validate: bool = validate
if from_files is None:
from_files = {}
- self._from_files = from_files
+ self._from_files: dict[str, list[str]] = from_files
# Indicates whether this role was included via include/import_role
- self.from_include = from_include
+ self.from_include: bool = from_include
self._hash = None
@@ -200,9 +216,9 @@ class Role(Base, Conditional, Taggable, CollectionSearch, Delegatable):
return r
- except RuntimeError:
+ except RecursionError as ex:
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
- obj=role_include._ds)
+ obj=role_include._ds) from ex
def _load_role_data(self, role_include, parent_role=None):
self._role_name = role_include.role
@@ -274,18 +290,17 @@ class Role(Base, Conditional, Taggable, CollectionSearch, Delegatable):
if task_data:
try:
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
- except AssertionError as e:
- raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
- obj=task_data, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError(f"The tasks/main.yml file for role {self._role_name!r} must contain a list of tasks.", obj=task_data) from ex
handler_data = self._load_role_yaml('handlers', main=self._from_files.get('handlers'))
if handler_data:
try:
self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
variable_manager=self._variable_manager)
- except AssertionError as e:
- raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
- obj=handler_data, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError(f"The handlers/main.yml file for role {self._role_name!r} must contain a list of tasks.",
+ obj=handler_data) from ex
def _get_role_argspecs(self):
"""Get the role argument spec data.
@@ -359,8 +374,8 @@ class Role(Base, Conditional, Taggable, CollectionSearch, Delegatable):
task_name = task_name + ' - ' + argument_spec['short_description']
return {
- 'action': {
- 'module': 'ansible.builtin.validate_argument_spec',
+ 'action': 'ansible.builtin.validate_argument_spec',
+ 'args': {
# Pass only the 'options' portion of the arg spec to the module.
'argument_spec': argument_spec.get('options', {}),
'provided_arguments': self._role_params,
@@ -412,7 +427,7 @@ class Role(Base, Conditional, Taggable, CollectionSearch, Delegatable):
raise AnsibleParserError("Failed loading '%s' for role (%s) as it is not inside the expected role path: '%s'" %
(to_text(found), self._role_name, to_text(file_path)))
- new_data = self._loader.load_from_file(found)
+ new_data = self._loader.load_from_file(found, trusted_as_template=True)
if new_data:
if data is not None and isinstance(new_data, Mapping):
data = combine_vars(data, new_data)
diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py
index 50758869b3b..670a4e101ca 100644
--- a/lib/ansible/playbook/role/definition.py
+++ b/lib/ansible/playbook/role/definition.py
@@ -21,14 +21,14 @@ import os
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
from ansible.module_utils.six import string_types
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
-from ansible.template import Templar
+from ansible._internal._templating._engine import TemplateEngine
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.collection_loader._collection_finder import _get_collection_role_path
from ansible.utils.path import unfrackpath
@@ -70,7 +70,7 @@ class RoleDefinition(Base, Conditional, Taggable, CollectionSearch):
if isinstance(ds, int):
ds = "%s" % ds
- if not isinstance(ds, dict) and not isinstance(ds, string_types) and not isinstance(ds, AnsibleBaseYAMLObject):
+ if not isinstance(ds, dict) and not isinstance(ds, string_types):
raise AnsibleAssertionError()
if isinstance(ds, dict):
@@ -79,12 +79,9 @@ class RoleDefinition(Base, Conditional, Taggable, CollectionSearch):
# save the original ds for use later
self._ds = ds
- # we create a new data structure here, using the same
- # object used internally by the YAML parsing code so we
- # can preserve file:line:column information if it exists
- new_ds = AnsibleMapping()
- if isinstance(ds, AnsibleBaseYAMLObject):
- new_ds.ansible_pos = ds.ansible_pos
+ # the new, cleaned datastructure, which will have legacy items reduced to a standard structure suitable for the
+ # attributes of the task class; copy any tagged data to preserve things like origin
+ new_ds = AnsibleTagHelper.tag_copy(ds, {})
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
@@ -127,7 +124,7 @@ class RoleDefinition(Base, Conditional, Taggable, CollectionSearch):
# contains a variable, try and template it now
if self._variable_manager:
all_vars = self._variable_manager.get_vars(play=self._play)
- templar = Templar(loader=self._loader, variables=all_vars)
+ templar = TemplateEngine(loader=self._loader, variables=all_vars)
role_name = templar.template(role_name)
return role_name
@@ -147,7 +144,7 @@ class RoleDefinition(Base, Conditional, Taggable, CollectionSearch):
else:
all_vars = dict()
- templar = Templar(loader=self._loader, variables=all_vars)
+ templar = TemplateEngine(loader=self._loader, variables=all_vars)
role_name = templar.template(role_name)
role_tuple = None
@@ -198,6 +195,7 @@ class RoleDefinition(Base, Conditional, Taggable, CollectionSearch):
return (role_name, role_path)
searches = (self._collection_list or []) + role_search_paths
+
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(searches)), obj=self._ds)
def _split_role_params(self, ds):
diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py
index 934b53ce9b4..3ab3d153a39 100644
--- a/lib/ansible/playbook/role/include.py
+++ b/lib/ansible/playbook/role/include.py
@@ -19,10 +19,8 @@ from __future__ import annotations
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six import string_types
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.playbook.delegatable import Delegatable
from ansible.playbook.role.definition import RoleDefinition
-from ansible.module_utils.common.text.converters import to_native
__all__ = ['RoleInclude']
@@ -42,8 +40,8 @@ class RoleInclude(RoleDefinition, Delegatable):
@staticmethod
def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None, collection_list=None):
- if not (isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)):
- raise AnsibleParserError("Invalid role definition: %s" % to_native(data))
+ if not (isinstance(data, string_types) or isinstance(data, dict)):
+ raise AnsibleParserError("Invalid role definition.", obj=data)
if isinstance(data, string_types) and ',' in data:
raise AnsibleError("Invalid old style role requirement: %s" % data)
diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py
index 6606d862c9f..0125ae2e084 100644
--- a/lib/ansible/playbook/role/metadata.py
+++ b/lib/ansible/playbook/role/metadata.py
@@ -20,7 +20,6 @@ from __future__ import annotations
import os
from ansible.errors import AnsibleParserError, AnsibleError
-from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
@@ -80,8 +79,8 @@ class RoleMetadata(Base, CollectionSearch):
if def_parsed.get('name'):
role_def['name'] = def_parsed['name']
roles.append(role_def)
- except AnsibleError as exc:
- raise AnsibleParserError(to_native(exc), obj=role_def, orig_exc=exc)
+ except AnsibleError as ex:
+ raise AnsibleParserError("Error parsing role dependencies.", obj=role_def) from ex
current_role_path = None
collection_search_list = None
@@ -105,8 +104,8 @@ class RoleMetadata(Base, CollectionSearch):
return load_list_of_roles(roles, play=self._owner._play, current_role_path=current_role_path,
variable_manager=self._variable_manager, loader=self._loader,
collection_search_list=collection_search_list)
- except AssertionError as e:
- raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds, orig_exc=e)
+ except AssertionError as ex:
+ raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds) from ex
def serialize(self):
return dict(
diff --git a/lib/ansible/playbook/role_include.py b/lib/ansible/playbook/role_include.py
index 1894d6df8f9..48003db7dff 100644
--- a/lib/ansible/playbook/role_include.py
+++ b/lib/ansible/playbook/role_include.py
@@ -24,7 +24,7 @@ from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
from ansible.utils.display import Display
from ansible.module_utils.six import string_types
-from ansible.template import Templar
+from ansible._internal._templating._engine import TemplateEngine
__all__ = ['IncludeRole']
@@ -79,7 +79,7 @@ class IncludeRole(TaskInclude):
available_variables = variable_manager.get_vars(play=myplay, task=self)
else:
available_variables = {}
- templar = Templar(loader=loader, variables=available_variables)
+ templar = TemplateEngine(loader=loader, variables=available_variables)
from_files = templar.template(self._from_files)
# build role
diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py
index 79810a41eaf..5823b775947 100644
--- a/lib/ansible/playbook/taggable.py
+++ b/lib/ansible/playbook/taggable.py
@@ -17,14 +17,16 @@
from __future__ import annotations
+import typing as t
+
from ansible.errors import AnsibleError
-from ansible.module_utils.six import string_types
from ansible.module_utils.common.sentinel import Sentinel
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
from ansible.playbook.attribute import FieldAttribute
-from ansible.template import Templar
+from ansible._internal._templating._engine import TemplateEngine
-def _flatten_tags(tags: list) -> list:
+def _flatten_tags(tags: list[str | int]) -> list[str | int]:
rv = set()
for tag in tags:
if isinstance(tag, list):
@@ -37,26 +39,39 @@ def _flatten_tags(tags: list) -> list:
class Taggable:
untagged = frozenset(['untagged'])
- tags = FieldAttribute(isa='list', default=list, listof=(string_types, int), extend=True)
+ tags = FieldAttribute(isa='list', default=list, listof=(str, int), extend=True)
def _load_tags(self, attr, ds):
if isinstance(ds, list):
return ds
- elif isinstance(ds, string_types):
- return [x.strip() for x in ds.split(',')]
- else:
- raise AnsibleError('tags must be specified as a list', obj=ds)
+
+ if isinstance(ds, str):
+ return [AnsibleTagHelper.tag_copy(ds, item.strip()) for item in ds.split(',')]
+
+ raise AnsibleError('tags must be specified as a list', obj=ds)
+
+ def _get_all_taggable_objects(self) -> t.Iterable[Taggable]:
+ obj = self
+ while obj is not None:
+ yield obj
+
+ if (role := getattr(obj, "_role", Sentinel)) is not Sentinel:
+ yield role # type: ignore[misc]
+
+ obj = obj._parent
+
+ yield self.get_play()
def evaluate_tags(self, only_tags, skip_tags, all_vars):
- """ this checks if the current item should be executed depending on tag options """
+ """Check if the current item should be executed depending on the specified tags.
+ NOTE this method is assumed to be called only on Task objects.
+ """
if self.tags:
- templar = Templar(loader=self._loader, variables=all_vars)
- obj = self
- while obj is not None:
+ templar = TemplateEngine(loader=self._loader, variables=all_vars)
+ for obj in self._get_all_taggable_objects():
if (_tags := getattr(obj, "_tags", Sentinel)) is not Sentinel:
obj._tags = _flatten_tags(templar.template(_tags))
- obj = obj._parent
tags = set(self.tags)
else:
# this makes isdisjoint work for untagged
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index 3f43bfbe7ca..4f97e268194 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -17,14 +17,18 @@
from __future__ import annotations
+import typing as t
+
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.common.sentinel import Sentinel
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError, AnsibleValueOmittedError
+from ansible.executor.module_common import _get_action_arg_defaults
from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils._internal._datatag import AnsibleTagHelper
from ansible.module_utils.six import string_types
-from ansible.parsing.mod_args import ModuleArgsParser
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
-from ansible.plugins.loader import lookup_loader
+from ansible.parsing.mod_args import ModuleArgsParser, RAW_PARAM_MODULES
+from ansible.plugins.action import ActionBase
+from ansible.plugins.loader import action_loader, module_loader, lookup_loader
from ansible.playbook.attribute import NonInheritableFieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.block import Block
@@ -35,10 +39,14 @@ from ansible.playbook.loop_control import LoopControl
from ansible.playbook.notifiable import Notifiable
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
+from ansible._internal import _task
+from ansible._internal._templating import _marker_behaviors
+from ansible._internal._templating._jinja_bits import is_possibly_all_template
+from ansible._internal._templating._engine import TemplateEngine, TemplateOptions
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
-from ansible.utils.vars import isidentifier
+from ansible.utils.vars import validate_variable_name
__all__ = ['Task']
@@ -57,6 +65,8 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
Task.something(...)
"""
+ _post_validate_object = True
+
# =================================================================================
# ATTRIBUTES
# load_ and
@@ -68,8 +78,8 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
# inheritance is only triggered if the 'current value' is Sentinel,
# default can be set at play/top level object and inheritance will take it's course.
- args = NonInheritableFieldAttribute(isa='dict', default=dict)
- action = NonInheritableFieldAttribute(isa='string')
+ args = t.cast(dict, NonInheritableFieldAttribute(isa='dict', default=dict))
+ action = t.cast(str, NonInheritableFieldAttribute(isa='string'))
async_val = NonInheritableFieldAttribute(isa='int', default=0, alias='async')
changed_when = NonInheritableFieldAttribute(isa='list', default=list)
@@ -85,13 +95,13 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
# deprecated, used to be loop and loop_args but loop has been repurposed
loop_with = NonInheritableFieldAttribute(isa='string', private=True)
- def __init__(self, block=None, role=None, task_include=None):
+ def __init__(self, block=None, role=None, task_include=None) -> None:
""" constructors a task, without the Task.load classmethod, it will be pretty blank """
self._role = role
self._parent = None
self.implicit = False
- self.resolved_action = None
+ self.resolved_action: str | None = None
if task_include:
self._parent = task_include
@@ -132,13 +142,80 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
- t = Task(block=block, role=role, task_include=task_include)
- return t.load_data(data, variable_manager=variable_manager, loader=loader)
+ task = Task(block=block, role=role, task_include=task_include)
+ return task.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def _post_validate_module_defaults(self, attr: str, value: t.Any, templar: TemplateEngine) -> t.Any:
+ """Override module_defaults post validation to disable templating, which is handled by args post validation."""
+ return value
+
+ def _post_validate_args(self, attr: str, value: t.Any, templar: TemplateEngine) -> dict[str, t.Any]:
+ try:
+ self.action = templar.template(self.action)
+ except AnsibleValueOmittedError:
+ # some strategies may trigger this error when templating task.action, but backstop here if not
+ raise AnsibleParserError("Omit is not valid for the `action` keyword.", obj=self.action) from None
+
+ action_context = action_loader.get_with_context(self.action, collection_list=self.collections, class_only=True)
+
+ if not action_context.plugin_load_context.resolved:
+ module_or_action_context = module_loader.find_plugin_with_context(self.action, collection_list=self.collections)
+
+ if not module_or_action_context.resolved:
+ raise AnsibleError(f"Cannot resolve {self.action!r} to an action or module.", obj=self.action)
+
+ action_context = action_loader.get_with_context('ansible.legacy.normal', collection_list=self.collections, class_only=True)
+ else:
+ module_or_action_context = action_context.plugin_load_context
+
+ self.resolved_action = module_or_action_context.resolved_fqcn
+
+ action_type: type[ActionBase] = action_context.object
+
+ vp = value.pop('_variable_params', None)
+
+ supports_raw_params = action_type.supports_raw_params or module_or_action_context.resolved_fqcn in RAW_PARAM_MODULES
+
+ if supports_raw_params:
+ raw_params_to_finalize = None
+ else:
+ raw_params_to_finalize = value.pop('_raw_params', None) # always str or None
+
+ # TaskArgsFinalizer performs more thorough type checking, but this provides a friendlier error message for a subset of detected cases.
+ if raw_params_to_finalize and not is_possibly_all_template(raw_params_to_finalize):
+ raise AnsibleError(f'Action {module_or_action_context.resolved_fqcn!r} does not support raw params.', obj=self.action)
+
+ args_finalizer = _task.TaskArgsFinalizer(
+ _get_action_arg_defaults(module_or_action_context.resolved_fqcn, self, templar),
+ vp,
+ raw_params_to_finalize,
+ value,
+ templar=templar,
+ )
+
+ try:
+ with action_type.get_finalize_task_args_context() as finalize_context:
+ args = args_finalizer.finalize(action_type.finalize_task_arg, context=finalize_context)
+ except Exception as ex:
+ raise AnsibleError(f'Finalization of task args for {module_or_action_context.resolved_fqcn!r} failed.', obj=self.action) from ex
+
+ if self._origin:
+ args = self._origin.tag(args)
+
+ return args
+
+ def _get_meta(self) -> str | None:
+ # FUTURE: validate meta and return an enum instead of a str
+ # meta currently does not support being templated, so we can cheat
+ if self.action in C._ACTION_META:
+ return self.args.get('_raw_params')
+
+ return None
def __repr__(self):
""" returns a human-readable representation of the task """
- if self.action in C._ACTION_META:
- return "TASK: meta (%s)" % self.args['_raw_params']
+ if meta := self._get_meta():
+ return f"TASK: meta ({meta})"
else:
return "TASK: %s" % self.get_name()
@@ -152,8 +229,6 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
new_ds['loop_with'] = loop_name
new_ds['loop'] = v
- # display.deprecated("with_ type loops are being phased out, use the 'loop' keyword instead",
- # version="2.10", collection_name='ansible.builtin')
def preprocess_data(self, ds):
"""
@@ -164,12 +239,9 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
if not isinstance(ds, dict):
raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
- # the new, cleaned datastructure, which will have legacy
- # items reduced to a standard structure suitable for the
- # attributes of the task class
- new_ds = AnsibleMapping()
- if isinstance(ds, AnsibleBaseYAMLObject):
- new_ds.ansible_pos = ds.ansible_pos
+ # the new, cleaned datastructure, which will have legacy items reduced to a standard structure suitable for the
+ # attributes of the task class; copy any tagged data to preserve things like origin
+ new_ds = AnsibleTagHelper.tag_copy(ds, {})
# since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator
default_collection = AnsibleCollectionConfig.default_collection
@@ -202,26 +274,13 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
args_parser = ModuleArgsParser(task_ds=ds, collection_list=collections_list)
try:
(action, args, delegate_to) = args_parser.parse()
- except AnsibleParserError as e:
+ except AnsibleParserError as ex:
# if the raises exception was created with obj=ds args, then it includes the detail
# so we dont need to add it so we can just re raise.
- if e.obj:
+ if ex.obj:
raise
# But if it wasn't, we can add the yaml object now to get more detail
- raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)
- else:
- # Set the resolved action plugin (or if it does not exist, module) for callbacks.
- self.resolved_action = args_parser.resolved_action
-
- # the command/shell/script modules used to support the `cmd` arg,
- # which corresponds to what we now call _raw_params, so move that
- # value over to _raw_params (assuming it is empty)
- if action in C._ACTION_HAS_CMD:
- if 'cmd' in args:
- if args.get('_raw_params', '') != '':
- raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified."
- " Please put everything in one or the other place.", obj=ds)
- args['_raw_params'] = args.pop('cmd')
+ raise AnsibleParserError("Error parsing task arguments.", obj=ds) from ex
new_ds['action'] = action
new_ds['args'] = args
@@ -277,8 +336,11 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
setattr(self, name, [value])
def _validate_register(self, attr, name, value):
- if value is not None and not isidentifier(value):
- raise AnsibleParserError(f"Invalid variable name in 'register' specified: '{value}'")
+ if value is not None:
+ try:
+ validate_variable_name(value)
+ except Exception as ex:
+ raise AnsibleParserError("Invalid 'register' specified.", obj=value) from ex
def post_validate(self, templar):
"""
@@ -289,9 +351,6 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
if self._parent:
self._parent.post_validate(templar)
- if AnsibleCollectionConfig.default_collection:
- pass
-
super(Task, self).post_validate(templar)
def _post_validate_loop(self, attr, value, templar):
@@ -301,44 +360,53 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
"""
return value
+ def _post_validate_name(self, attr, value, templar):
+ """
+ Override post-validation behavior for `name` to be best-effort for the vars available.
+ Direct access via `post_validate_attribute` writes the value back to provide a stable value.
+ This value is individually post-validated early by strategies for the benefit of callbacks.
+ """
+ with _marker_behaviors.ReplacingMarkerBehavior.warning_context() as replacing_behavior:
+ self.name = templar.extend(marker_behavior=replacing_behavior).template(value, options=TemplateOptions(value_for_omit=None))
+
+ return self.name
+
def _post_validate_environment(self, attr, value, templar):
"""
Override post validation of vars on the play, as we don't want to
template these too early.
"""
env = {}
- if value is not None:
- def _parse_env_kv(k, v):
- try:
- env[k] = templar.template(v, convert_bare=False)
- except AnsibleUndefinedVariable as e:
- error = to_native(e)
- if self.action in C._ACTION_FACT_GATHERING and 'ansible_facts.env' in error or 'ansible_env' in error:
- # ignore as fact gathering is required for 'env' facts
- return
- raise
-
- if isinstance(value, list):
- for env_item in value:
- if isinstance(env_item, dict):
- for k in env_item:
- _parse_env_kv(k, env_item[k])
- else:
- isdict = templar.template(env_item, convert_bare=False)
- if isinstance(isdict, dict):
- env |= isdict
- else:
- display.warning("could not parse environment value, skipping: %s" % value)
-
- elif isinstance(value, dict):
- # should not really happen
- env = dict()
- for env_item in value:
- _parse_env_kv(env_item, value[env_item])
+ # FUTURE: kill this with fire
+ def _parse_env_kv(k, v):
+ try:
+ env[k] = templar.template(v)
+ except AnsibleValueOmittedError:
+ # skip this value
+ return
+ except AnsibleUndefinedVariable as e:
+ error = to_native(e)
+ if self.action in C._ACTION_FACT_GATHERING and 'ansible_facts.env' in error or 'ansible_env' in error:
+ # ignore as fact gathering is required for 'env' facts
+ return
+ raise
+
+ # NB: the environment FieldAttribute definition ensures that value is always a list
+ for env_item in value:
+ if isinstance(env_item, dict):
+ for k in env_item:
+ _parse_env_kv(k, env_item[k])
else:
- # at this point it should be a simple string, also should not happen
- env = templar.template(value, convert_bare=False)
+ try:
+ isdict = templar.template(env_item)
+ except AnsibleValueOmittedError:
+ continue
+
+ if isinstance(isdict, dict):
+ env |= isdict
+ else:
+ display.warning("could not parse environment value, skipping: %s" % value)
return env
@@ -385,7 +453,7 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
all_vars |= self.vars
return all_vars
- def copy(self, exclude_parent=False, exclude_tasks=False):
+ def copy(self, exclude_parent: bool = False, exclude_tasks: bool = False) -> Task:
new_me = super(Task, self).copy()
new_me._parent = None
@@ -519,3 +587,28 @@ class Task(Base, Conditional, Taggable, CollectionSearch, Notifiable, Delegatabl
while not isinstance(parent, Block):
parent = parent._parent
return parent._play
+
+ def dump_attrs(self):
+ """Override to smuggle important non-FieldAttribute values back to the controller."""
+ attrs = super().dump_attrs()
+ attrs.update(resolved_action=self.resolved_action)
+ return attrs
+
+ def _resolve_conditional(
+ self,
+ conditional: list[str | bool],
+ variables: dict[str, t.Any],
+ *,
+ result_context: dict[str, t.Any] | None = None,
+ ) -> bool:
+ """Loops through the conditionals set on this object, returning False if any of them evaluate as such, as well as the condition that was False."""
+ engine = TemplateEngine(self._loader, variables=variables)
+
+ for item in conditional:
+ if not engine.evaluate_conditional(item):
+ if result_context is not None:
+ result_context.update(false_condition=item)
+
+ return False
+
+ return True
diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py
index ff9068effa6..833f18e34e6 100644
--- a/lib/ansible/plugins/__init__.py
+++ b/lib/ansible/plugins/__init__.py
@@ -19,36 +19,56 @@
from __future__ import annotations
-from abc import ABC
-
+import abc
+import functools
import types
import typing as t
from ansible import constants as C
from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.six import string_types
from ansible.utils.display import Display
+from ansible.utils import display as _display
+
+from ansible.module_utils._internal import _plugin_info
display = Display()
if t.TYPE_CHECKING:
- from .loader import PluginPathContext
+ from . import loader as _t_loader
# Global so that all instances of a PluginLoader will share the caches
MODULE_CACHE = {} # type: dict[str, dict[str, types.ModuleType]]
-PATH_CACHE = {} # type: dict[str, list[PluginPathContext] | None]
-PLUGIN_PATH_CACHE = {} # type: dict[str, dict[str, dict[str, PluginPathContext]]]
+PATH_CACHE = {} # type: dict[str, list[_t_loader.PluginPathContext] | None]
+PLUGIN_PATH_CACHE = {} # type: dict[str, dict[str, dict[str, _t_loader.PluginPathContext]]]
def get_plugin_class(obj):
- if isinstance(obj, string_types):
+ if isinstance(obj, str):
return obj.lower().replace('module', '')
else:
return obj.__class__.__name__.lower().replace('module', '')
-class AnsiblePlugin(ABC):
+class _ConfigurablePlugin(t.Protocol):
+ """Protocol to provide type-safe access to config for plugin-related mixins."""
+
+ def get_option(self, option: str, hostvars: dict[str, object] | None = None) -> t.Any: ...
+
+
+class _AnsiblePluginInfoMixin(_plugin_info.HasPluginInfo):
+ """Mixin to provide type annotations and default values for existing PluginLoader-set load-time attrs."""
+ _original_path: str | None = None
+ _load_name: str | None = None
+ _redirected_names: list[str] | None = None
+ ansible_aliases: list[str] | None = None
+ ansible_name: str | None = None
+
+ @property
+ def plugin_type(self) -> str:
+ return self.__class__.__name__.lower().replace('module', '')
+
+
+class AnsiblePlugin(_AnsiblePluginInfoMixin, _ConfigurablePlugin, metaclass=abc.ABCMeta):
# Set by plugin loader
_load_name: str
@@ -81,9 +101,17 @@ class AnsiblePlugin(ABC):
try:
option_value, origin = C.config.get_config_value_and_origin(option, plugin_type=self.plugin_type, plugin_name=self._load_name, variables=hostvars)
except AnsibleError as e:
- raise KeyError(to_native(e))
+ raise KeyError(str(e))
return option_value, origin
+ @functools.cached_property
+ def __plugin_info(self):
+ """
+ Internal cached property to retrieve `PluginInfo` for this plugin instance.
+ Only for use by the `AnsiblePlugin` base class.
+ """
+ return _plugin_info.get_plugin_info(self)
+
def get_option(self, option, hostvars=None):
if option not in self._options:
@@ -99,7 +127,7 @@ class AnsiblePlugin(ABC):
def set_option(self, option, value):
self._options[option] = C.config.get_config_value(option, plugin_type=self.plugin_type, plugin_name=self._load_name, direct={option: value})
- C.handle_config_noise(display)
+ _display._report_config_warnings(self.__plugin_info)
def set_options(self, task_keys=None, var_options=None, direct=None):
"""
@@ -116,20 +144,16 @@ class AnsiblePlugin(ABC):
if self.allow_extras and var_options and '_extras' in var_options:
# these are largely unvalidated passthroughs, either plugin or underlying API will validate
self._options['_extras'] = var_options['_extras']
- C.handle_config_noise(display)
+ _display._report_config_warnings(self.__plugin_info)
def has_option(self, option):
if not self._options:
self.set_options()
return option in self._options
- @property
- def plugin_type(self):
- return self.__class__.__name__.lower().replace('module', '')
-
@property
def option_definitions(self):
- if self._defs is None:
+ if (not hasattr(self, "_defs")) or self._defs is None:
self._defs = C.config.get_configuration_definitions(plugin_type=self.plugin_type, name=self._load_name)
return self._defs
@@ -137,23 +161,31 @@ class AnsiblePlugin(ABC):
# FIXME: standardize required check based on config
pass
+ def __repr__(self):
+ ansible_name = getattr(self, 'ansible_name', '(unknown)')
+ load_name = getattr(self, '_load_name', '(unknown)')
+ return f'{type(self).__name__}(plugin_type={self.plugin_type!r}, {ansible_name=!r}, {load_name=!r})'
-class AnsibleJinja2Plugin(AnsiblePlugin):
-
- def __init__(self, function):
+class AnsibleJinja2Plugin(AnsiblePlugin, metaclass=abc.ABCMeta):
+ def __init__(self, function: t.Callable) -> None:
super(AnsibleJinja2Plugin, self).__init__()
self._function = function
+ # Declare support for markers. Plugins with `False` here will never be invoked with markers for top-level arguments.
+ self.accept_args_markers = getattr(self._function, 'accept_args_markers', False)
+ self.accept_lazy_markers = getattr(self._function, 'accept_lazy_markers', False)
+
@property
- def plugin_type(self):
- return self.__class__.__name__.lower().replace('ansiblejinja2', '')
+ @abc.abstractmethod
+ def plugin_type(self) -> str:
+ ...
- def _no_options(self, *args, **kwargs):
+ def _no_options(self, *args, **kwargs) -> t.NoReturn:
raise NotImplementedError()
has_option = get_option = get_options = option_definitions = set_option = set_options = _no_options
@property
- def j2_function(self):
+ def j2_function(self) -> t.Callable:
return self._function
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
index e0d500a8a8a..b719000f66a 100644
--- a/lib/ansible/plugins/action/__init__.py
+++ b/lib/ansible/plugins/action/__init__.py
@@ -6,6 +6,7 @@
from __future__ import annotations
import base64
+import contextlib
import json
import os
import re
@@ -13,29 +14,42 @@ import secrets
import shlex
import stat
import tempfile
+import typing as t
from abc import ABC, abstractmethod
from collections.abc import Sequence
from ansible import constants as C
+from ansible._internal._errors import _captured, _error_utils
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail, AnsibleAuthenticationFailure
-from ansible.executor.module_common import modify_module
+from ansible.executor.module_common import modify_module, _BuiltModule
from ansible.executor.interpreter_discovery import discover_interpreter, InterpreterDiscoveryRequiredError
+from ansible.module_utils._internal import _traceback
from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
from ansible.module_utils.errors import UnsupportedError
from ansible.module_utils.json_utils import _filter_non_json_lines
+from ansible.module_utils.common.json import Direction, get_module_encoder, get_module_decoder
from ansible.module_utils.six import binary_type, string_types, text_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
-from ansible.parsing.utils.jsonify import jsonify
from ansible.release import __version__
from ansible.utils.collection_loader import resource_from_fqcr
from ansible.utils.display import Display
-from ansible.utils.unsafe_proxy import wrap_var, AnsibleUnsafeText
from ansible.vars.clean import remove_internal_keys
from ansible.utils.plugin_docs import get_versioned_doclink
+from ansible import _internal
+from ansible._internal._templating import _engine
+
+from .. import _AnsiblePluginInfoMixin
display = Display()
+if t.TYPE_CHECKING:
+ from ansible.parsing.dataloader import DataLoader
+ from ansible.playbook.play_context import PlayContext
+ from ansible.playbook.task import Task
+ from ansible.plugins.connection import ConnectionBase
+ from ansible.template import Templar
+
def _validate_utf8_json(d):
if isinstance(d, text_type):
@@ -49,8 +63,7 @@ def _validate_utf8_json(d):
_validate_utf8_json(o)
-class ActionBase(ABC):
-
+class ActionBase(ABC, _AnsiblePluginInfoMixin):
"""
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
@@ -67,28 +80,30 @@ class ActionBase(ABC):
_requires_connection = True
_supports_check_mode = True
_supports_async = False
+ supports_raw_params = False
- def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
+ def __init__(self, task: Task, connection: ConnectionBase, play_context: PlayContext, loader: DataLoader, templar: Templar, shared_loader_obj=None):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
- self._shared_loader_obj = shared_loader_obj
+
+ from ansible.plugins import loader as plugin_loaders # avoid circular global import since PluginLoader needs ActionBase
+
+ self._shared_loader_obj = plugin_loaders # shared_loader_obj was just a ref to `ansible.plugins.loader` anyway; this lets us inherit its type
self._cleanup_remote_tmp = False
# interpreter discovery state
- self._discovered_interpreter_key = None
+ self._discovered_interpreter_key: str | None = None
self._discovered_interpreter = False
- self._discovery_deprecation_warnings = []
- self._discovery_warnings = []
- self._used_interpreter = None
+ self._used_interpreter: str | None = None
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
@abstractmethod
- def run(self, tmp=None, task_vars=None):
+ def run(self, tmp: str | None = None, task_vars: dict[str, t.Any] | None = None) -> dict[str, t.Any]:
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
@@ -104,14 +119,13 @@ class ActionBase(ABC):
* Module parameters. These are stored in self._task.args
"""
-
- # does not default to {'changed': False, 'failed': False}, as it breaks async
- result = {}
+ # does not default to {'changed': False, 'failed': False}, as it used to break async
+ result: dict[str, t.Any] = {}
if tmp is not None:
- result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action'
- ' plugins should set self._connection._shell.tmpdir to share'
- ' the tmpdir']
+ display.warning('ActionModule.run() no longer honors the tmp parameter. Action'
+ ' plugins should set self._connection._shell.tmpdir to share'
+ ' the tmpdir.')
del tmp
if self._task.async_val and not self._supports_async:
@@ -177,7 +191,7 @@ class ActionBase(ABC):
if isinstance(error, UnsupportedError):
msg = f"Unsupported parameters for ({self._load_name}) module: {msg}"
- raise AnsibleActionFail(msg)
+ raise AnsibleActionFail(msg, obj=self._task.args)
return validation_result, new_module_args
@@ -193,6 +207,28 @@ class ActionBase(ABC):
if force or not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
+ @classmethod
+ @contextlib.contextmanager
+ @_internal.experimental
+ def get_finalize_task_args_context(cls) -> t.Any:
+ """
+ EXPERIMENTAL: Unstable API subject to change at any time without notice.
+ Wraps task arg finalization with (optional) stateful context.
+ The context manager is entered during `Task.post_validate_args, and may yield a single value to be passed
+ as `context` to Task.finalize_task_arg for each task arg.
+ """
+ yield None
+
+ @classmethod
+ @_internal.experimental
+ def finalize_task_arg(cls, name: str, value: t.Any, templar: _engine.TemplateEngine, context: t.Any) -> t.Any:
+ """
+ EXPERIMENTAL: Unstable API subject to change at any time without notice.
+ Called for each task arg to allow for custom templating.
+ The optional `context` value is sourced from `Task.get_finalize_task_args_context`.
+ """
+ return templar.template(value)
+
def get_plugin_option(self, plugin, option, default=None):
"""Helper to get an option from a plugin without having to use
the try/except dance everywhere to set a default
@@ -218,7 +254,7 @@ class ActionBase(ABC):
return True
return False
- def _configure_module(self, module_name, module_args, task_vars):
+ def _configure_module(self, module_name, module_args, task_vars) -> tuple[_BuiltModule, str]:
"""
Handles the loading and templating of the module code through the
modify_module() function.
@@ -276,38 +312,29 @@ class ActionBase(ABC):
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
# insert shared code and arguments into the module
- final_environment = dict()
+ final_environment: dict[str, t.Any] = {}
self._compute_environment_string(final_environment)
- become_kwargs = {}
- if self._connection.become:
- become_kwargs['become'] = True
- become_kwargs['become_method'] = self._connection.become.name
- become_kwargs['become_user'] = self._connection.become.get_option('become_user',
- playcontext=self._play_context)
- become_kwargs['become_password'] = self._connection.become.get_option('become_pass',
- playcontext=self._play_context)
- become_kwargs['become_flags'] = self._connection.become.get_option('become_flags',
- playcontext=self._play_context)
-
# modify_module will exit early if interpreter discovery is required; re-run after if necessary
- for dummy in (1, 2):
+ for _dummy in (1, 2):
try:
- (module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, self._templar,
- task_vars=use_vars,
- module_compression=C.config.get_config_value('DEFAULT_MODULE_COMPRESSION',
- variables=task_vars),
- async_timeout=self._task.async_val,
- environment=final_environment,
- remote_is_local=bool(getattr(self._connection, '_remote_is_local', False)),
- **become_kwargs)
+ module_bits = modify_module(
+ module_name=module_name,
+ module_path=module_path,
+ module_args=module_args,
+ templar=self._templar,
+ task_vars=use_vars,
+ module_compression=C.config.get_config_value('DEFAULT_MODULE_COMPRESSION', variables=task_vars),
+ async_timeout=self._task.async_val,
+ environment=final_environment,
+ remote_is_local=bool(getattr(self._connection, '_remote_is_local', False)),
+ become_plugin=self._connection.become,
+ )
+
break
except InterpreterDiscoveryRequiredError as idre:
- self._discovered_interpreter = AnsibleUnsafeText(discover_interpreter(
- action=self,
- interpreter_name=idre.interpreter_name,
- discovery_mode=idre.discovery_mode,
- task_vars=use_vars))
+ self._discovered_interpreter = discover_interpreter(action=self, interpreter_name=idre.interpreter_name,
+ discovery_mode=idre.discovery_mode, task_vars=use_vars)
# update the local task_vars with the discovered interpreter (which might be None);
# we'll propagate back to the controller in the task result
@@ -327,7 +354,7 @@ class ActionBase(ABC):
else:
task_vars['ansible_delegated_vars'][self._task.delegate_to]['ansible_facts'][discovered_key] = self._discovered_interpreter
- return (module_style, module_shebang, module_data, module_path)
+ return module_bits, module_path
def _compute_environment_string(self, raw_environment_out=None):
"""
@@ -369,35 +396,11 @@ class ActionBase(ABC):
return getattr(self, 'TRANSFERS_FILES', False)
- def _is_pipelining_enabled(self, module_style, wrap_async=False):
+ def _is_pipelining_enabled(self, module_style: str, wrap_async: bool = False) -> bool:
"""
- Determines if we are required and can do pipelining
+ Determines if we are required and can do pipelining, only 'new' style modules can support pipelining
"""
-
- try:
- is_enabled = self._connection.get_option('pipelining')
- except (KeyError, AttributeError, ValueError):
- is_enabled = self._play_context.pipelining
-
- # winrm supports async pipeline
- # TODO: make other class property 'has_async_pipelining' to separate cases
- always_pipeline = self._connection.always_pipeline_modules
-
- # su does not work with pipelining
- # TODO: add has_pipelining class prop to become plugins
- become_exception = (self._connection.become.name if self._connection.become else '') != 'su'
-
- # any of these require a true
- conditions = [
- self._connection.has_pipelining, # connection class supports it
- is_enabled or always_pipeline, # enabled via config or forced via connection (eg winrm)
- module_style == "new", # old style modules do not support pipelining
- not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files
- not wrap_async or always_pipeline, # async does not normally support pipelining unless it does (eg winrm)
- become_exception,
- ]
-
- return all(conditions)
+ return bool(module_style == 'new' and self._connection.is_pipelining_enabled(wrap_async))
def _get_admin_users(self):
"""
@@ -470,8 +473,8 @@ class ActionBase(ABC):
become_unprivileged = self._is_become_unprivileged()
basefile = self._connection._shell._generate_temp_dir_name()
- cmd = self._connection._shell.mkdtemp(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
- result = self._low_level_execute_command(cmd, sudoable=False)
+ cmd = self._connection._shell._mkdtemp2(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
+ result = self._low_level_execute_command(cmd.command, in_data=cmd.input_data, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
@@ -556,18 +559,19 @@ class ActionBase(ABC):
self._connection.put_file(local_path, remote_path)
return remote_path
- def _transfer_data(self, remote_path, data):
+ def _transfer_data(self, remote_path: str | bytes, data: str | bytes) -> str | bytes:
"""
Copies the module data out to the temporary module path.
"""
- if isinstance(data, dict):
- data = jsonify(data)
+ if isinstance(data, str):
+ data = data.encode(errors='surrogateescape')
+ elif not isinstance(data, bytes):
+ raise TypeError('data must be either a string or bytes')
afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
afo = os.fdopen(afd, 'wb')
try:
- data = to_bytes(data, errors='surrogate_or_strict')
afo.write(data)
except Exception as e:
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e))
@@ -634,12 +638,12 @@ class ActionBase(ABC):
# done. Make the files +x if we're asked to, and return.
if not self._is_become_unprivileged():
if execute:
- # Can't depend on the file being transferred with execute permissions.
+ # Can't depend on the file being transferred with required permissions.
# Only need user perms because no become was used here
- res = self._remote_chmod(remote_paths, 'u+x')
+ res = self._remote_chmod(remote_paths, 'u+rwx')
if res['rc'] != 0:
raise AnsibleError(
- 'Failed to set execute bit on remote files '
+ 'Failed to set permissions on remote files '
'(rc: {0}, err: {1})'.format(
res['rc'],
to_native(res['stderr'])))
@@ -680,10 +684,10 @@ class ActionBase(ABC):
return remote_paths
# Step 3b: Set execute if we need to. We do this before anything else
- # because some of the methods below might work but not let us set +x
- # as part of them.
+ # because some of the methods below might work but not let us set
+ # permissions as part of them.
if execute:
- res = self._remote_chmod(remote_paths, 'u+x')
+ res = self._remote_chmod(remote_paths, 'u+rwx')
if res['rc'] != 0:
raise AnsibleError(
'Failed to set file mode or acl on remote temporary files '
@@ -901,8 +905,8 @@ class ActionBase(ABC):
expand_path = '~%s' % (self._get_remote_user() or '')
# use shell to construct appropriate command and execute
- cmd = self._connection._shell.expand_user(expand_path)
- data = self._low_level_execute_command(cmd, sudoable=False)
+ cmd = self._connection._shell._expand_user2(expand_path)
+ data = self._low_level_execute_command(cmd.command, in_data=cmd.input_data, sudoable=False)
try:
initial_fragment = data['stdout'].strip().splitlines()[-1]
@@ -972,9 +976,6 @@ class ActionBase(ABC):
# let module know about filesystems that selinux treats specially
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
- # what to do when parameter values are converted to strings
- module_args['_ansible_string_conversion_action'] = C.STRING_CONVERSION_ACTION
-
# give the module the socket for persistent connections
module_args['_ansible_socket'] = getattr(self._connection, 'socket_path')
if not module_args['_ansible_socket']:
@@ -998,9 +999,11 @@ class ActionBase(ABC):
# tells the module to ignore options that are not in its argspec.
module_args['_ansible_ignore_unknown_opts'] = ignore_unknown_opts
- # allow user to insert string to add context to remote loggging
+ # allow user to insert string to add context to remote logging
module_args['_ansible_target_log_info'] = C.config.get_config_value('TARGET_LOG_INFO', variables=task_vars)
+ module_args['_ansible_tracebacks_for'] = _traceback.traceback_for()
+
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False,
ignore_unknown_opts: bool = False):
"""
@@ -1047,7 +1050,8 @@ class ActionBase(ABC):
self._task.environment.append({"ANSIBLE_ASYNC_DIR": async_dir})
# FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
- (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
+ module_bits, module_path = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
+ (module_style, shebang, module_data) = (module_bits.module_style, module_bits.shebang, module_bits.b_module_data)
display.vvv("Using module file %s" % module_path)
if not shebang and module_style != 'binary':
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
@@ -1083,7 +1087,8 @@ class ActionBase(ABC):
args_data += '%s=%s ' % (k, shlex.quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style in ('non_native_want_json', 'binary'):
- self._transfer_data(args_file_path, json.dumps(module_args))
+ profile_encoder = get_module_encoder(module_bits.serialization_profile, Direction.CONTROLLER_TO_MODULE)
+ self._transfer_data(args_file_path, json.dumps(module_args, cls=profile_encoder))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
@@ -1106,8 +1111,8 @@ class ActionBase(ABC):
if wrap_async and not self._connection.always_pipeline_modules:
# configure, upload, and chmod the async_wrapper module
- (async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(
- module_name='ansible.legacy.async_wrapper', module_args=dict(), task_vars=task_vars)
+ (async_module_bits, async_module_path) = self._configure_module(module_name='ansible.legacy.async_wrapper', module_args=dict(), task_vars=task_vars)
+ (shebang, async_module_data) = (async_module_bits.shebang, async_module_bits.b_module_data)
async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename)
self._transfer_data(remote_async_module_path, async_module_data)
@@ -1156,7 +1161,7 @@ class ActionBase(ABC):
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
# parse the main result
- data = self._parse_returned_data(res)
+ data = self._parse_returned_data(res, module_bits.serialization_profile)
# NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
# get internal info before cleaning
@@ -1197,71 +1202,66 @@ class ActionBase(ABC):
data['ansible_facts'][self._discovered_interpreter_key] = self._discovered_interpreter
- if self._discovery_warnings:
- if data.get('warnings') is None:
- data['warnings'] = []
- data['warnings'].extend(self._discovery_warnings)
-
- if self._discovery_deprecation_warnings:
- if data.get('deprecations') is None:
- data['deprecations'] = []
- data['deprecations'].extend(self._discovery_deprecation_warnings)
-
- # mark the entire module results untrusted as a template right here, since the current action could
- # possibly template one of these values.
- data = wrap_var(data)
-
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
- def _parse_returned_data(self, res):
+ def _parse_returned_data(self, res: dict[str, t.Any], profile: str) -> dict[str, t.Any]:
try:
- filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''), objects_only=True)
+ filtered_output, warnings = _filter_non_json_lines(res.get('stdout', ''), objects_only=True)
+
for w in warnings:
display.warning(w)
- data = json.loads(filtered_output)
-
- if C.MODULE_STRICT_UTF8_RESPONSE and not data.pop('_ansible_trusted_utf8', None):
- try:
- _validate_utf8_json(data)
- except UnicodeEncodeError:
- # When removing this, also remove the loop and latin-1 from ansible.module_utils.common.text.converters.jsonify
- display.deprecated(
- f'Module "{self._task.resolved_action or self._task.action}" returned non UTF-8 data in '
- 'the JSON response. This will become an error in the future',
- version='2.18',
- )
-
- data['_ansible_parsed'] = True
- except ValueError:
- # not valid json, lets try to capture error
- data = dict(failed=True, _ansible_parsed=False)
- data['module_stdout'] = res.get('stdout', u'')
- if 'stderr' in res:
- data['module_stderr'] = res['stderr']
- if res['stderr'].startswith(u'Traceback'):
- data['exception'] = res['stderr']
-
- # in some cases a traceback will arrive on stdout instead of stderr, such as when using ssh with -tt
- if 'exception' not in data and data['module_stdout'].startswith(u'Traceback'):
- data['exception'] = data['module_stdout']
-
- # The default
- data['msg'] = "MODULE FAILURE"
-
- # try to figure out if we are missing interpreter
+ decoder = get_module_decoder(profile, Direction.MODULE_TO_CONTROLLER)
+
+ data = json.loads(filtered_output, cls=decoder)
+
+ _captured.AnsibleModuleCapturedError.normalize_result_exception(data)
+
+ data.update(_ansible_parsed=True) # this must occur after normalize_result_exception, since it checks the type of data to ensure it's a dict
+ except ValueError as ex:
+ message = "Module result deserialization failed."
+ help_text = ""
+ include_cause_message = True
+
if self._used_interpreter is not None:
- interpreter = re.escape(self._used_interpreter.lstrip('!#'))
- match = re.compile('%s: (?:No such file or directory|not found)' % interpreter)
- if match.search(data['module_stderr']) or match.search(data['module_stdout']):
- data['msg'] = "The module failed to execute correctly, you probably need to set the interpreter."
+ interpreter = self._used_interpreter.lstrip('!#')
+ # "not found" case is currently not tested; it was once reproducible
+ # see: https://github.com/ansible/ansible/pull/53534
+ not_found_err_re = re.compile(rf'{re.escape(interpreter)}: (?:No such file or directory|not found|command not found)')
+
+ if not_found_err_re.search(res.get('stderr', '')) or not_found_err_re.search(res.get('stdout', '')):
+ message = f"The module interpreter {interpreter!r} was not found."
+ help_text = 'Consider overriding the configured interpreter path for this host. '
+ include_cause_message = False # cause context *might* be useful in the traceback, but the JSON deserialization failure message is not
+
+ try:
+ # Because the underlying action API is built on result dicts instead of exceptions (for all but the most catastrophic failures),
+ # we're using a tweaked version of the module exception handler to get new ErrorDetail-backed errors from this part of the code.
+ # Ideally this would raise immediately on failure, but this would likely break actions that assume `ActionBase._execute_module()`
+ # does not raise on module failure.
+
+ error = AnsibleError(
+ message=message,
+ help_text=help_text + "See stdout/stderr for the returned output.",
+ )
+
+ error._include_cause_message = include_cause_message
+
+ raise error from ex
+ except AnsibleError as ansible_ex:
+ sentinel = object()
+
+ data = _error_utils.result_dict_from_exception(ansible_ex)
+ data.update(
+ _ansible_parsed=False,
+ module_stdout=res.get('stdout', ''),
+ module_stderr=res.get('stderr', sentinel),
+ rc=res.get('rc', sentinel),
+ )
- # always append hint
- data['msg'] += '\nSee stdout/stderr for the exact error'
+ data = {k: v for k, v in data.items() if v is not sentinel}
- if 'rc' in res:
- data['rc'] = res['rc']
return data
# FIXME: move to connection base
@@ -1375,7 +1375,7 @@ class ActionBase(ABC):
elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
- display.debug(u"Slurping the file %s" % source)
+ display.debug(u"Slurping the file %s" % destination)
dest_result = self._execute_module(
module_name='ansible.legacy.slurp', module_args=dict(path=destination),
task_vars=task_vars, persist_files=True)
diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py
index 7ed64c8166c..1e80fa68e24 100644
--- a/lib/ansible/plugins/action/add_host.py
+++ b/lib/ansible/plugins/action/add_host.py
@@ -77,7 +77,7 @@ class ActionModule(ActionBase):
elif isinstance(groups, string_types):
group_list = groups.split(",")
else:
- raise AnsibleActionFail("Groups must be specified as a list.", obj=self._task)
+ raise AnsibleActionFail("Groups must be specified as a list.", obj=groups)
for group_name in group_list:
if group_name not in new_groups:
diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py
index bedf8191093..4d8bdc00df4 100644
--- a/lib/ansible/plugins/action/assemble.py
+++ b/lib/ansible/plugins/action/assemble.py
@@ -25,8 +25,8 @@ import re
import tempfile
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
-from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.errors import AnsibleActionFail
+from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum_s
@@ -81,9 +81,10 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
- self._supports_check_mode = False
+ self._supports_check_mode = True
+
+ super(ActionModule, self).run(tmp, task_vars)
- result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if task_vars is None:
@@ -104,13 +105,9 @@ class ActionModule(ActionBase):
if boolean(remote_src, strict=False):
# call assemble via ansible.legacy to allow library/ overrides of the module without collection search
- result.update(self._execute_module(module_name='ansible.legacy.assemble', task_vars=task_vars))
- raise _AnsibleActionDone()
- else:
- try:
- src = self._find_needle('files', src)
- except AnsibleError as e:
- raise AnsibleActionFail(to_native(e))
+ return self._execute_module(module_name='ansible.legacy.assemble', task_vars=task_vars)
+
+ src = self._find_needle('files', src)
if not os.path.isdir(src):
raise AnsibleActionFail(u"Source (%s) is not a directory" % src)
@@ -153,13 +150,9 @@ class ActionModule(ActionBase):
res = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
if diff:
res['diff'] = diff
- result.update(res)
+ return res
else:
- result.update(self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars))
+ return self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
- except AnsibleAction as e:
- result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
-
- return result
diff --git a/lib/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py
index 5e18749af04..912e5d2c1e2 100644
--- a/lib/ansible/plugins/action/assert.py
+++ b/lib/ansible/plugins/action/assert.py
@@ -16,19 +16,41 @@
# along with Ansible. If not, see .
from __future__ import annotations
-from ansible.errors import AnsibleError
-from ansible.playbook.conditional import Conditional
+import typing as t
+
+from ansible._internal._templating import _jinja_bits
+from ansible.errors import AnsibleTemplateError
+from ansible.module_utils.common.validation import _check_type_list_strict
from ansible.plugins.action import ActionBase
-from ansible.module_utils.six import string_types
-from ansible.module_utils.parsing.convert_bool import boolean
+from ansible._internal._templating._engine import TemplateEngine
class ActionModule(ActionBase):
- """ Fail with custom message """
+ """Assert that one or more conditional expressions evaluate to true."""
_requires_connection = False
- _VALID_ARGS = frozenset(('fail_msg', 'msg', 'quiet', 'success_msg', 'that'))
+ @classmethod
+ def finalize_task_arg(cls, name: str, value: t.Any, templar: TemplateEngine, context: t.Any) -> t.Any:
+ if name != 'that':
+ # `that` is the only key requiring special handling; delegate to base handling otherwise
+ return super().finalize_task_arg(name, value, templar, context)
+
+ if not isinstance(value, str):
+ # if `that` is not a string, we don't need to attempt to resolve it as a template before validation (which will also listify it)
+ return value
+
+ # if `that` is entirely a string template, we only want to resolve to the container and avoid templating the container contents
+ if _jinja_bits.is_possibly_all_template(value):
+ try:
+ templated_that = templar.resolve_to_container(value)
+ except AnsibleTemplateError:
+ pass
+ else:
+ if isinstance(templated_that, list): # only use `templated_that` if it is a list
+ return templated_that
+
+ return value
def run(self, tmp=None, task_vars=None):
if task_vars is None:
@@ -37,49 +59,26 @@ class ActionModule(ActionBase):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
- if 'that' not in self._task.args:
- raise AnsibleError('conditional required in "that" string')
-
- fail_msg = None
- success_msg = None
-
- fail_msg = self._task.args.get('fail_msg', self._task.args.get('msg'))
- if fail_msg is None:
- fail_msg = 'Assertion failed'
- elif isinstance(fail_msg, list):
- if not all(isinstance(x, string_types) for x in fail_msg):
- raise AnsibleError('Type of one of the elements in fail_msg or msg list is not string type')
- elif not isinstance(fail_msg, (string_types, list)):
- raise AnsibleError('Incorrect type for fail_msg or msg, expected a string or list and got %s' % type(fail_msg))
-
- success_msg = self._task.args.get('success_msg')
- if success_msg is None:
- success_msg = 'All assertions passed'
- elif isinstance(success_msg, list):
- if not all(isinstance(x, string_types) for x in success_msg):
- raise AnsibleError('Type of one of the elements in success_msg list is not string type')
- elif not isinstance(success_msg, (string_types, list)):
- raise AnsibleError('Incorrect type for success_msg, expected a string or list and got %s' % type(success_msg))
-
- quiet = boolean(self._task.args.get('quiet', False), strict=False)
-
- # make sure the 'that' items are a list
- thats = self._task.args['that']
- if not isinstance(thats, list):
- thats = [thats]
-
- # Now we iterate over the that items, temporarily assigning them
- # to the task's when value so we can evaluate the conditional using
- # the built in evaluate function. The when has already been evaluated
- # by this point, and is not used again, so we don't care about mangling
- # that value now
- cond = Conditional(loader=self._loader)
+ validation_result, new_module_args = self.validate_argument_spec(
+ argument_spec=dict(
+ fail_msg=dict(type=str_or_list_of_str, aliases=['msg'], default='Assertion failed'),
+ success_msg=dict(type=str_or_list_of_str, default='All assertions passed'),
+ quiet=dict(type='bool', default=False),
+ # explicitly not validating types `elements` here to let type rules for conditionals apply
+ that=dict(type=_check_type_list_strict, required=True),
+ ),
+ )
+
+ fail_msg = new_module_args['fail_msg']
+ success_msg = new_module_args['success_msg']
+ quiet = new_module_args['quiet']
+ that_list = new_module_args['that']
+
if not quiet:
result['_ansible_verbose_always'] = True
- for that in thats:
- cond.when = [that]
- test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
+ for that in that_list:
+ test_result = self._templar.evaluate_conditional(conditional=that)
if not test_result:
result['failed'] = True
result['evaluated_to'] = test_result
@@ -92,3 +91,13 @@ class ActionModule(ActionBase):
result['changed'] = False
result['msg'] = success_msg
return result
+
+
+def str_or_list_of_str(value: t.Any) -> str | list[str]:
+ if isinstance(value, str):
+ return value
+
+ if not isinstance(value, list) or any(not isinstance(item, str) for item in value):
+ raise TypeError("a string or list of strings is required")
+
+ return value
diff --git a/lib/ansible/plugins/action/async_status.py b/lib/ansible/plugins/action/async_status.py
index a0fe11eb59d..676fc9324ec 100644
--- a/lib/ansible/plugins/action/async_status.py
+++ b/lib/ansible/plugins/action/async_status.py
@@ -28,7 +28,7 @@ class ActionModule(ActionBase):
)
# initialize response
- results['started'] = results['finished'] = 0
+ results['started'] = results['finished'] = False
results['stdout'] = results['stderr'] = ''
results['stdout_lines'] = results['stderr_lines'] = []
@@ -43,9 +43,14 @@ class ActionModule(ActionBase):
results['erased'] = log_path
else:
results['results_file'] = log_path
- results['started'] = 1
+ results['started'] = True
new_module_args['_async_dir'] = async_dir
results = merge_hash(results, self._execute_module(module_name='ansible.legacy.async_status', task_vars=task_vars, module_args=new_module_args))
+ # Backwards compat shim for when started/finished were ints,
+ # mostly to work with ansible.windows.async_status
+ for convert in ('started', 'finished'):
+ results[convert] = bool(results[convert])
+
return results
diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py
index 2047671b47c..89a6a8f1f95 100644
--- a/lib/ansible/plugins/action/copy.py
+++ b/lib/ansible/plugins/action/copy.py
@@ -23,12 +23,11 @@ import os
import os.path
import stat
import tempfile
-import traceback
from ansible import constants as C
-from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleFileNotFound
from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
@@ -410,8 +409,14 @@ class ActionModule(ActionBase):
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
+
del tmp # tmp no longer has any effect
+ # ensure user is not setting internal parameters
+ for internal in ('_original_basename', '_diff_peek'):
+ if self._task.args.get(internal, None) is not None:
+ raise AnsibleActionFail(f'Invalid parameter specified: "{internal}"')
+
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
@@ -446,10 +451,10 @@ class ActionModule(ActionBase):
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
- except Exception as err:
- result['failed'] = True
- result['msg'] = "could not write content temp file: %s" % to_native(err)
- return self._ensure_invocation(result)
+ except Exception as ex:
+ self._ensure_invocation(result)
+
+ raise AnsibleActionFail(message="could not write content temp file", result=result) from ex
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
@@ -465,11 +470,10 @@ class ActionModule(ActionBase):
try:
# find in expected paths
source = self._find_needle('files', source)
- except AnsibleError as e:
- result['failed'] = True
- result['msg'] = to_text(e)
- result['exception'] = traceback.format_exc()
- return self._ensure_invocation(result)
+ except AnsibleError as ex:
+ self._ensure_invocation(result)
+
+ raise AnsibleActionFail(result=result) from ex
if trailing_slash != source.endswith(os.path.sep):
if source[-1] == os.path.sep:
diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py
index eefc2b74a33..55016e5b0b5 100644
--- a/lib/ansible/plugins/action/debug.py
+++ b/lib/ansible/plugins/action/debug.py
@@ -17,29 +17,32 @@
# along with Ansible. If not, see .
from __future__ import annotations
-from ansible.errors import AnsibleUndefinedVariable
-from ansible.module_utils.six import string_types
-from ansible.module_utils.common.text.converters import to_text
+from ansible.errors import AnsibleValueOmittedError, AnsibleError
+from ansible.module_utils.common.validation import _check_type_str_no_conversion
from ansible.plugins.action import ActionBase
+from ansible._internal._templating._jinja_common import UndefinedMarker, TruncationMarker
+from ansible._internal._templating._utils import Omit
+from ansible._internal._templating._marker_behaviors import ReplacingMarkerBehavior, RoutingMarkerBehavior
+from ansible.utils.display import Display
+
+display = Display()
class ActionModule(ActionBase):
- """ Print statements during execution """
+ """
+ Emits informational messages, with special diagnostic handling of some templating failures.
+ """
TRANSFERS_FILES = False
- _VALID_ARGS = frozenset(('msg', 'var', 'verbosity'))
_requires_connection = False
def run(self, tmp=None, task_vars=None):
- if task_vars is None:
- task_vars = dict()
-
validation_result, new_module_args = self.validate_argument_spec(
- argument_spec={
- 'msg': {'type': 'raw', 'default': 'Hello world!'},
- 'var': {'type': 'raw'},
- 'verbosity': {'type': 'int', 'default': 0},
- },
+ argument_spec=dict(
+ msg=dict(type='raw', default='Hello world!'),
+ var=dict(type=_check_type_str_no_conversion),
+ verbosity=dict(type='int', default=0),
+ ),
mutually_exclusive=(
('msg', 'var'),
),
@@ -51,31 +54,34 @@ class ActionModule(ActionBase):
# get task verbosity
verbosity = new_module_args['verbosity']
+ replacing_behavior = ReplacingMarkerBehavior()
+
+ var_behavior = RoutingMarkerBehavior({
+ UndefinedMarker: replacing_behavior,
+ TruncationMarker: replacing_behavior,
+ })
+
if verbosity <= self._display.verbosity:
- if new_module_args['var']:
+ if raw_var_arg := new_module_args['var']:
+ # If var name is same as result, try to template it
try:
- results = self._templar.template(new_module_args['var'], convert_bare=True, fail_on_undefined=True)
- if results == new_module_args['var']:
- # if results is not str/unicode type, raise an exception
- if not isinstance(results, string_types):
- raise AnsibleUndefinedVariable
- # If var name is same as result, try to template it
- results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True)
- except AnsibleUndefinedVariable as e:
- results = u"VARIABLE IS NOT DEFINED!"
- if self._display.verbosity > 0:
- results += u": %s" % to_text(e)
-
- if isinstance(new_module_args['var'], (list, dict)):
- # If var is a list or dict, use the type as key to display
- result[to_text(type(new_module_args['var']))] = results
- else:
- result[new_module_args['var']] = results
+ results = self._templar._engine.extend(marker_behavior=var_behavior).evaluate_expression(raw_var_arg)
+ except AnsibleValueOmittedError as ex:
+ results = repr(Omit)
+ display.warning("The result of the `var` expression could not be omitted; a placeholder was used instead.", obj=ex.obj)
+ except Exception as ex:
+ raise AnsibleError('Error while resolving `var` expression.', obj=raw_var_arg) from ex
+
+ result[raw_var_arg] = results
else:
result['msg'] = new_module_args['msg']
# force flag to make debug output module always verbose
result['_ansible_verbose_always'] = True
+
+ # propagate any warnings in the task result unless we're skipping the task
+ replacing_behavior.emit_warnings()
+
else:
result['skipped_reason'] = "Verbosity threshold not met."
result['skipped'] = True
diff --git a/lib/ansible/plugins/action/dnf.py b/lib/ansible/plugins/action/dnf.py
index 137fb13086c..3d36ae2e34e 100644
--- a/lib/ansible/plugins/action/dnf.py
+++ b/lib/ansible/plugins/action/dnf.py
@@ -30,10 +30,9 @@ class ActionModule(ActionBase):
if module in {'yum', 'auto'}:
try:
- if self._task.delegate_to: # if we delegate, we should use delegated host's facts
- module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
- else:
- module = self._templar.template("{{ansible_facts.pkg_mgr}}")
+ # if we delegate, we should use delegated host's facts
+ expr = "hostvars[delegate_to].ansible_facts.pkg_mgr" if self._task.delegate_to else "ansible_facts.pkg_mgr"
+ module = self._templar.resolve_variable_expression(expr, local_variables=dict(delegate_to=self._task.delegate_to))
except Exception:
pass # could not get it from template!
diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py
index 533cab93ec8..3fb21fbe3c5 100644
--- a/lib/ansible/plugins/action/fetch.py
+++ b/lib/ansible/plugins/action/fetch.py
@@ -51,7 +51,7 @@ class ActionModule(ActionBase):
validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False)
msg = ''
- # validate source and dest are strings FIXME: use basic.py and module specs
+ # FIXME: validate source and dest are strings; use basic.py and module specs
if not isinstance(source, string_types):
msg = "Invalid type supplied for source option, it must be a string"
@@ -119,7 +119,7 @@ class ActionModule(ActionBase):
if 'not found' in slurpres.get('msg', ''):
result['msg'] = "the remote file does not exist, not transferring, ignored"
- elif slurpres.get('msg', '').startswith('source is a directory'):
+ elif slurpres.get('msg', '').lower().startswith('source is a directory'):
result['msg'] = "remote file is a directory, fetch cannot work on directories"
return result
@@ -180,8 +180,8 @@ class ActionModule(ActionBase):
try:
with open(to_bytes(dest, errors='surrogate_or_strict'), 'wb') as f:
f.write(remote_data)
- except (IOError, OSError) as e:
- raise AnsibleActionFail("Failed to fetch the file: %s" % e)
+ except OSError as ex:
+ raise AnsibleActionFail("Failed to fetch the file.") from ex
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
diff --git a/lib/ansible/plugins/action/gather_facts.py b/lib/ansible/plugins/action/gather_facts.py
index 31210ec724d..d9389247daf 100644
--- a/lib/ansible/plugins/action/gather_facts.py
+++ b/lib/ansible/plugins/action/gather_facts.py
@@ -8,10 +8,12 @@ import time
import typing as t
from ansible import constants as C
-from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.errors import AnsibleActionFail
+from ansible.executor.module_common import _apply_action_arg_defaults
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
+from ansible._internal._errors import _error_utils
class ActionModule(ActionBase):
@@ -27,10 +29,8 @@ class ActionModule(ActionBase):
# TODO: remove in favor of controller side argspec detecting valid arguments
# network facts modules must support gather_subset
- try:
- name = self._connection.ansible_name.removeprefix('ansible.netcommon.')
- except AttributeError:
- name = self._connection._load_name.split('.')[-1]
+ name = self._connection.ansible_name.removeprefix('ansible.netcommon.')
+
if name not in ('network_cli', 'httpapi', 'netconf'):
subset = mod_args.pop('gather_subset', None)
if subset not in ('all', ['all'], None):
@@ -53,14 +53,12 @@ class ActionModule(ActionBase):
fact_module, collection_list=self._task.collections
).resolved_fqcn
- mod_args = get_action_args_with_defaults(
- resolved_fact_module, mod_args, self._task.module_defaults, self._templar,
- action_groups=self._task._parent._play._action_groups
- )
+ mod_args = _apply_action_arg_defaults(resolved_fact_module, self._task, mod_args, self._templar)
return mod_args
def _combine_task_result(self, result: dict[str, t.Any], task_result: dict[str, t.Any]) -> dict[str, t.Any]:
+ """ builds the final result to return """
filtered_res = {
'ansible_facts': task_result.get('ansible_facts', {}),
'warnings': task_result.get('warnings', []),
@@ -70,6 +68,33 @@ class ActionModule(ActionBase):
# on conflict the last plugin processed wins, but try to do deep merge and append to lists.
return merge_hash(result, filtered_res, list_merge='append_rp')
+ def _handle_smart(self, modules: list, task_vars: dict[str, t.Any]):
+ """ Updates the module list when 'smart' is used, lookup network os mappings or use setup, warn when things seem inconsistent """
+
+ if 'smart' not in modules:
+ return
+
+ modules.pop(modules.index('smart')) # remove as this will cause 'module not found' errors
+ network_os = self._task.args.get('network_os', task_vars.get('ansible_network_os', task_vars.get('ansible_facts', {}).get('network_os')))
+
+ if network_os:
+
+ connection_map = C.config.get_config_value('CONNECTION_FACTS_MODULES', variables=task_vars)
+ if network_os in connection_map:
+ modules.append(connection_map[network_os])
+ elif not modules:
+ raise AnsibleActionFail(f"No fact modules available and we could not find a fact module for your network OS ({network_os}), "
+ "try setting one via the `FACTS_MODULES` configuration.")
+
+ if set(modules).intersection(set(C._ACTION_SETUP)):
+ # most don't realize how setup works with networking connection plugins (forced_local)
+ self._display.warning("Detected 'setup' module and a network OS is set, the output when running it will reflect 'localhost'"
+ " and not the target when a networking connection plugin is used.")
+
+ elif not set(modules).intersection(set(C._ACTION_SETUP)):
+ # no network OS and setup not in list, add setup by default since 'smart'
+ modules.append('ansible.legacy.setup')
+
def run(self, tmp: t.Optional[str] = None, task_vars: t.Optional[dict[str, t.Any]] = None) -> dict[str, t.Any]:
result = super(ActionModule, self).run(tmp, task_vars)
@@ -77,13 +102,9 @@ class ActionModule(ActionBase):
# copy the value with list() so we don't mutate the config
modules = list(C.config.get_config_value('FACTS_MODULES', variables=task_vars))
+ self._handle_smart(modules, task_vars)
parallel = task_vars.pop('ansible_facts_parallel', self._task.args.pop('parallel', None))
- if 'smart' in modules:
- connection_map = C.config.get_config_value('CONNECTION_FACTS_MODULES', variables=task_vars)
- network_os = self._task.args.get('network_os', task_vars.get('ansible_network_os', task_vars.get('ansible_facts', {}).get('network_os')))
- modules.extend([connection_map.get(network_os or self._connection.ansible_name, 'ansible.legacy.setup')])
- modules.pop(modules.index('smart'))
failed = {}
skipped = {}
@@ -137,7 +158,7 @@ class ActionModule(ActionBase):
for module in jobs:
poll_args = {'jid': jobs[module]['ansible_job_id'], '_async_dir': os.path.dirname(jobs[module]['results_file'])}
res = self._execute_module(module_name='ansible.legacy.async_status', module_args=poll_args, task_vars=task_vars, wrap_async=False)
- if res.get('finished', 0) == 1:
+ if res.get('finished', False):
if res.get('failed', False):
failed[module] = res
elif res.get('skipped', False):
@@ -156,16 +177,19 @@ class ActionModule(ActionBase):
self._task.async_val = async_val
if skipped:
- result['msg'] = "The following modules were skipped: %s\n" % (', '.join(skipped.keys()))
+ result['msg'] = f"The following modules were skipped: {', '.join(skipped.keys())}."
result['skipped_modules'] = skipped
if len(skipped) == len(modules):
result['skipped'] = True
if failed:
- result['failed'] = True
- result['msg'] = "The following modules failed to execute: %s\n" % (', '.join(failed.keys()))
result['failed_modules'] = failed
+ result.update(_error_utils.result_dict_from_captured_errors(
+ msg=f"The following modules failed to execute: {', '.join(failed.keys())}.",
+ errors=[r['exception'] for r in failed.values()],
+ ))
+
# tell executor facts were gathered
result['ansible_facts']['_ansible_facts_gathered'] = True
diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
index c32e6227dbf..3eeef2d9c8d 100644
--- a/lib/ansible/plugins/action/include_vars.py
+++ b/lib/ansible/plugins/action/include_vars.py
@@ -9,8 +9,9 @@ import pathlib
import ansible.constants as C
from ansible.errors import AnsibleError
+from ansible._internal._datatag._tags import SourceWasEncrypted
from ansible.module_utils.six import string_types
-from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.action import ActionBase
from ansible.utils.vars import combine_vars
@@ -142,9 +143,8 @@ class ActionModule(ActionBase):
result['message'] = err_msg
elif self.hash_behaviour is not None and self.hash_behaviour != C.DEFAULT_HASH_BEHAVIOUR:
merge_hashes = self.hash_behaviour == 'merge'
- for key, value in results.items():
- old_value = task_vars.get(key, None)
- results[key] = combine_vars(old_value, value, merge=merge_hashes)
+ existing_variables = {k: v for k, v in task_vars.items() if k in results}
+ results = combine_vars(existing_variables, results, merge=merge_hashes)
result['ansible_included_var_files'] = self.included_files
result['ansible_facts'] = results
@@ -168,9 +168,9 @@ class ActionModule(ActionBase):
)
self.source_dir = path_to_use
else:
- if hasattr(self._task._ds, '_data_source'):
+ if (origin := self._task._origin) and origin.path: # origin.path is not present for ad-hoc tasks
current_dir = (
- "/".join(self._task._ds._data_source.split('/')[:-1])
+ "/".join(origin.path.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
@@ -234,13 +234,13 @@ class ActionModule(ActionBase):
failed = True
err_msg = ('{0} does not have a valid extension: {1}'.format(to_native(filename), ', '.join(self.valid_extensions)))
else:
- b_data, show_content = self._loader._get_file_contents(filename)
- data = to_text(b_data, errors='surrogate_or_strict')
+ data = self._loader.load_from_file(filename, cache='none', trusted_as_template=True)
- self.show_content = show_content
- data = self._loader.load(data, file_name=filename, show_content=show_content)
- if not data:
+ self.show_content &= not SourceWasEncrypted.is_tagged_on(data)
+
+ if data is None: # support empty files, but not falsey values
data = dict()
+
if not isinstance(data, dict):
failed = True
err_msg = ('{0} must be stored as a dictionary/hash'.format(to_native(filename)))
diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py
index 13b2cdf7766..e5042c95f27 100644
--- a/lib/ansible/plugins/action/package.py
+++ b/lib/ansible/plugins/action/package.py
@@ -16,8 +16,8 @@
# along with Ansible. If not, see .
from __future__ import annotations
-from ansible.errors import AnsibleAction, AnsibleActionFail
-from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.errors import AnsibleActionFail
+from ansible.executor.module_common import _apply_action_arg_defaults
from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
@@ -38,7 +38,7 @@ class ActionModule(ActionBase):
self._supports_check_mode = True
self._supports_async = True
- result = super(ActionModule, self).run(tmp, task_vars)
+ super(ActionModule, self).run(tmp, task_vars)
module = self._task.args.get('use', 'auto')
@@ -92,21 +92,15 @@ class ActionModule(ActionBase):
# get defaults for specific module
context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
- new_module_args = get_action_args_with_defaults(
- context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar,
- action_groups=self._task._parent._play._action_groups
- )
+ new_module_args = _apply_action_arg_defaults(context.resolved_fqcn, self._task, new_module_args, self._templar)
if module in self.BUILTIN_PKG_MGR_MODULES:
# prefix with ansible.legacy to eliminate external collisions while still allowing library/ override
module = 'ansible.legacy.' + module
display.vvvv("Running %s" % module)
- result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+ return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)
else:
raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.')
-
- except AnsibleAction as e:
- result.update(e.result)
-
- return result
+ finally:
+ pass # avoid de-dent all on refactor
diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py
index c22a66cada5..0f2b2d49892 100644
--- a/lib/ansible/plugins/action/script.py
+++ b/lib/ansible/plugins/action/script.py
@@ -17,10 +17,12 @@
from __future__ import annotations
import os
+import pathlib
import re
import shlex
+import typing as _t
-from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail, AnsibleActionSkip
+from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleActionSkip
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.action import ActionBase
@@ -34,7 +36,7 @@ class ActionModule(ActionBase):
# after chopping off a potential drive letter.
windows_absolute_path_detection = re.compile(r'^(?:[a-zA-Z]\:)?(\\|\/)')
- def run(self, tmp=None, task_vars=None):
+ def run(self, tmp: str | None = None, task_vars: dict[str, _t.Any] | None = None) -> dict[str, _t.Any]:
""" handler for file transfer operations """
if task_vars is None:
task_vars = dict()
@@ -48,12 +50,11 @@ class ActionModule(ActionBase):
'chdir': {'type': 'str'},
'executable': {'type': 'str'},
},
- required_one_of=[
- ['_raw_params', 'cmd']
- ]
+ required_one_of=[['_raw_params', 'cmd']],
+ mutually_exclusive=[['_raw_params', 'cmd']],
)
- result = super(ActionModule, self).run(tmp, task_vars)
+ super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
try:
@@ -88,7 +89,7 @@ class ActionModule(ActionBase):
# Split out the script as the first item in raw_params using
# shlex.split() in order to support paths and files with spaces in the name.
# Any arguments passed to the script will be added back later.
- raw_params = to_native(new_module_args.get('_raw_params', ''), errors='surrogate_or_strict')
+ raw_params = new_module_args['_raw_params'] or new_module_args['cmd']
parts = [to_text(s, errors='surrogate_or_strict') for s in shlex.split(raw_params.strip())]
source = parts[0]
@@ -105,16 +106,11 @@ class ActionModule(ActionBase):
# check mode is supported if 'creates' or 'removes' are provided
# the task has already been skipped if a change would not occur
if new_module_args['creates'] or new_module_args['removes']:
- result['changed'] = True
- raise _AnsibleActionDone(result=result)
+ return dict(changed=True)
# If the script doesn't return changed in the result, it defaults to True,
# but since the script may override 'changed', just skip instead of guessing.
else:
- result['changed'] = False
- raise AnsibleActionSkip('Check mode is not supported for this task.', result=result)
-
- # now we execute script, always assume changed.
- result['changed'] = True
+ raise AnsibleActionSkip('Check mode is not supported for this task.', result=dict(changed=False))
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir,
@@ -135,7 +131,7 @@ class ActionModule(ActionBase):
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src), execute=True)
# add preparation steps to one ssh roundtrip executing the script
- env_dict = dict()
+ env_dict: dict[str, _t.Any] = {}
env_string = self._compute_environment_string(env_dict)
if executable:
@@ -152,23 +148,28 @@ class ActionModule(ActionBase):
# FUTURE: use a more public method to get the exec payload
pc = self._task
exec_data = ps_manifest._create_powershell_wrapper(
- to_bytes(script_cmd), source, {}, env_dict, self._task.async_val,
- pc.become, pc.become_method, pc.become_user,
- self._play_context.become_pass, pc.become_flags, "script", task_vars, None
+ name=f"ansible.builtin.script.{pathlib.Path(source).stem}",
+ module_data=to_bytes(script_cmd),
+ module_path=source,
+ module_args={},
+ environment=env_dict,
+ async_timeout=self._task.async_val,
+ become_plugin=self._connection.become,
+ substyle="script",
+ task_vars=task_vars,
+ profile='legacy', # the profile doesn't really matter since the module args dict is empty
)
# build the necessary exec wrapper command
# FUTURE: this still doesn't let script work on Windows with non-pipelined connections or
# full manual exec of KEEP_REMOTE_FILES
script_cmd = self._connection._shell.build_module_command(env_string='', shebang='#!powershell', cmd='')
- result.update(self._low_level_execute_command(cmd=script_cmd, in_data=exec_data, sudoable=True, chdir=chdir))
+ # now we execute script, always assume changed.
+ result: dict[str, object] = dict(self._low_level_execute_command(cmd=script_cmd, in_data=exec_data, sudoable=True, chdir=chdir), changed=True)
if 'rc' in result and result['rc'] != 0:
- raise AnsibleActionFail('non-zero return code')
+ result.update(msg='non-zero return code', failed=True)
- except AnsibleAction as e:
- result.update(e.result)
+ return result
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
-
- return result
diff --git a/lib/ansible/plugins/action/service.py b/lib/ansible/plugins/action/service.py
index 2b00d10b9d3..c444132ac7b 100644
--- a/lib/ansible/plugins/action/service.py
+++ b/lib/ansible/plugins/action/service.py
@@ -16,9 +16,8 @@
# along with Ansible. If not, see .
from __future__ import annotations
-
-from ansible.errors import AnsibleAction, AnsibleActionFail
-from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.errors import AnsibleActionFail
+from ansible.executor.module_common import _apply_action_arg_defaults
from ansible.plugins.action import ActionBase
@@ -40,17 +39,16 @@ class ActionModule(ActionBase):
self._supports_check_mode = True
self._supports_async = True
- result = super(ActionModule, self).run(tmp, task_vars)
+ super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto').lower()
if module == 'auto':
try:
- if self._task.delegate_to: # if we delegate, we should use delegated host's facts
- module = self._templar.template("{{hostvars['%s']['ansible_facts']['service_mgr']}}" % self._task.delegate_to)
- else:
- module = self._templar.template('{{ansible_facts.service_mgr}}')
+ # if we delegate, we should use delegated host's facts
+ expr = "hostvars[delegate_to].ansible_facts.service_mgr" if self._task.delegate_to else "ansible_facts.service_mgr"
+ module = self._templar.resolve_variable_expression(expr, local_variables=dict(delegate_to=self._task.delegate_to))
except Exception:
pass # could not get it from template!
@@ -79,24 +77,17 @@ class ActionModule(ActionBase):
# get defaults for specific module
context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
- new_module_args = get_action_args_with_defaults(
- context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar,
- action_groups=self._task._parent._play._action_groups
- )
+ new_module_args = _apply_action_arg_defaults(context.resolved_fqcn, self._task, new_module_args, self._templar)
# collection prefix known internal modules to avoid collisions from collections search, while still allowing library/ overrides
if module in self.BUILTIN_SVC_MGR_MODULES:
module = 'ansible.legacy.' + module
self._display.vvvv("Running %s" % module)
- result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+ return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)
else:
raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.')
- except AnsibleAction as e:
- result.update(e.result)
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
-
- return result
diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
index b95ec4940f9..62921aed676 100644
--- a/lib/ansible/plugins/action/set_fact.py
+++ b/lib/ansible/plugins/action/set_fact.py
@@ -18,12 +18,9 @@
from __future__ import annotations
from ansible.errors import AnsibleActionFail
-from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
-from ansible.utils.vars import isidentifier
-
-import ansible.constants as C
+from ansible.utils.vars import validate_variable_name
class ActionModule(ActionBase):
@@ -43,16 +40,10 @@ class ActionModule(ActionBase):
if self._task.args:
for (k, v) in self._task.args.items():
- k = self._templar.template(k)
+ k = self._templar.template(k) # a rare case where key templating is allowed; backward-compatibility for dynamic storage
- if not isidentifier(k):
- raise AnsibleActionFail("The variable name '%s' is not valid. Variables must start with a letter or underscore character, "
- "and contain only letters, numbers and underscores." % k)
+ validate_variable_name(k)
- # NOTE: this should really use BOOLEANS from convert_bool, but only in the k=v case,
- # right now it converts matching explicit YAML strings also when 'jinja2_native' is disabled.
- if not C.DEFAULT_JINJA2_NATIVE and isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
- v = boolean(v, strict=False)
facts[k] = v
else:
raise AnsibleActionFail('No key/value pairs provided, at least one is required for this action to succeed')
diff --git a/lib/ansible/plugins/action/set_stats.py b/lib/ansible/plugins/action/set_stats.py
index 309180f7a3d..bb312000ec3 100644
--- a/lib/ansible/plugins/action/set_stats.py
+++ b/lib/ansible/plugins/action/set_stats.py
@@ -19,7 +19,7 @@ from __future__ import annotations
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
-from ansible.utils.vars import isidentifier
+from ansible.utils.vars import validate_variable_name
class ActionModule(ActionBase):
@@ -42,7 +42,7 @@ class ActionModule(ActionBase):
data = self._task.args.get('data', {})
if not isinstance(data, dict):
- data = self._templar.template(data, convert_bare=False, fail_on_undefined=True)
+ data = self._templar.template(data)
if not isinstance(data, dict):
result['failed'] = True
@@ -59,14 +59,9 @@ class ActionModule(ActionBase):
stats[opt] = val
for (k, v) in data.items():
-
k = self._templar.template(k)
- if not isidentifier(k):
- result['failed'] = True
- result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
- "letters, numbers and underscores." % k)
- return result
+ validate_variable_name(k)
stats['data'][k] = self._templar.template(v)
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
index f83522dd70d..19844827341 100644
--- a/lib/ansible/plugins/action/template.py
+++ b/lib/ansible/plugins/action/template.py
@@ -20,12 +20,13 @@ from jinja2.defaults import (
from ansible import constants as C
from ansible.config.manager import ensure_type
-from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.errors import AnsibleError, AnsibleActionFail
from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.plugins.action import ActionBase
-from ansible.template import generate_ansible_template_vars, AnsibleEnvironment
+from ansible.template import trust_as_template
+from ansible._internal._templating import _template_vars
class ActionModule(ActionBase):
@@ -39,11 +40,11 @@ class ActionModule(ActionBase):
if task_vars is None:
task_vars = dict()
- result = super(ActionModule, self).run(tmp, task_vars)
+ super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Options type validation
- # stings
+ # strings
for s_type in ('src', 'dest', 'state', 'newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
'block_end_string', 'comment_start_string', 'comment_end_string'):
if s_type in self._task.args:
@@ -98,63 +99,47 @@ class ActionModule(ActionBase):
if mode == 'preserve':
mode = '0%03o' % stat.S_IMODE(os.stat(source).st_mode)
- # Get vault decrypted tmp file
- try:
- tmp_source = self._loader.get_real_file(source)
- except AnsibleFileNotFound as e:
- raise AnsibleActionFail("could not find src=%s, %s" % (source, to_text(e)))
- b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
-
# template the source data locally & get ready to transfer
- try:
- with open(b_tmp_source, 'rb') as f:
- try:
- template_data = to_text(f.read(), errors='surrogate_or_strict')
- except UnicodeError:
- raise AnsibleActionFail("Template source files must be utf-8 encoded")
-
- # set jinja2 internal search path for includes
- searchpath = task_vars.get('ansible_search_path', [])
- searchpath.extend([self._loader._basedir, os.path.dirname(source)])
-
- # We want to search into the 'templates' subdir of each search path in
- # addition to our original search paths.
- newsearchpath = []
- for p in searchpath:
- newsearchpath.append(os.path.join(p, 'templates'))
- newsearchpath.append(p)
- searchpath = newsearchpath
-
- # add ansible 'template' vars
- temp_vars = task_vars.copy()
- # NOTE in the case of ANSIBLE_DEBUG=1 task_vars is VarsWithSources(MutableMapping)
- # so | operator cannot be used as it can be used only on dicts
- # https://peps.python.org/pep-0584/#what-about-mapping-and-mutablemapping
- temp_vars.update(generate_ansible_template_vars(self._task.args.get('src', None), source, dest))
-
- # force templar to use AnsibleEnvironment to prevent issues with native types
- # https://github.com/ansible/ansible/issues/46169
- templar = self._templar.copy_with_new_env(environment_class=AnsibleEnvironment,
- searchpath=searchpath,
- newline_sequence=newline_sequence,
- available_variables=temp_vars)
- overrides = dict(
- block_start_string=block_start_string,
- block_end_string=block_end_string,
- variable_start_string=variable_start_string,
- variable_end_string=variable_end_string,
- comment_start_string=comment_start_string,
- comment_end_string=comment_end_string,
- trim_blocks=trim_blocks,
- lstrip_blocks=lstrip_blocks
- )
- resultant = templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False, overrides=overrides)
- except AnsibleAction:
- raise
- except Exception as e:
- raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
- finally:
- self._loader.cleanup_tmp_file(b_tmp_source)
+ template_data = trust_as_template(self._loader.get_text_file_contents(source))
+
+ # set jinja2 internal search path for includes
+ searchpath = task_vars.get('ansible_search_path', [])
+ searchpath.extend([self._loader._basedir, os.path.dirname(source)])
+
+ # We want to search into the 'templates' subdir of each search path in
+ # addition to our original search paths.
+ newsearchpath = []
+ for p in searchpath:
+ newsearchpath.append(os.path.join(p, 'templates'))
+ newsearchpath.append(p)
+ searchpath = newsearchpath
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ temp_vars.update(_template_vars.generate_ansible_template_vars(
+ path=self._task.args.get('src', None),
+ fullpath=source,
+ dest_path=dest,
+ include_ansible_managed='ansible_managed' not in temp_vars, # do not clobber ansible_managed when set by the user
+ ))
+
+ overrides = dict(
+ block_start_string=block_start_string,
+ block_end_string=block_end_string,
+ variable_start_string=variable_start_string,
+ variable_end_string=variable_end_string,
+ comment_start_string=comment_start_string,
+ comment_end_string=comment_end_string,
+ trim_blocks=trim_blocks,
+ lstrip_blocks=lstrip_blocks,
+ newline_sequence=newline_sequence,
+ )
+
+ data_templar = self._templar.copy_with_new_env(searchpath=searchpath, available_variables=temp_vars)
+ resultant = data_templar.template(template_data, escape_backslashes=False, overrides=overrides)
+
+ if resultant is None:
+ resultant = ''
new_task = self._task.copy()
# mode is either the mode from task.args or the mode of the source file if the task.args
@@ -188,13 +173,8 @@ class ActionModule(ActionBase):
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj)
- result.update(copy_action.run(task_vars=task_vars))
+ return copy_action.run(task_vars=task_vars)
finally:
shutil.rmtree(to_bytes(local_tempdir, errors='surrogate_or_strict'))
-
- except AnsibleAction as e:
- result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
-
- return result
diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py
index ece2597adaf..be2740a81d6 100644
--- a/lib/ansible/plugins/action/unarchive.py
+++ b/lib/ansible/plugins/action/unarchive.py
@@ -19,8 +19,7 @@ from __future__ import annotations
import os
-from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleActionSkip
-from ansible.module_utils.common.text.converters import to_text
+from ansible.errors import AnsibleActionFail, AnsibleActionSkip
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
@@ -34,7 +33,7 @@ class ActionModule(ActionBase):
if task_vars is None:
task_vars = dict()
- result = super(ActionModule, self).run(tmp, task_vars)
+ super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
@@ -68,15 +67,9 @@ class ActionModule(ActionBase):
source = os.path.expanduser(source)
if not remote_src:
- try:
- source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
- except AnsibleError as e:
- raise AnsibleActionFail(to_text(e))
+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
- try:
- remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
- except AnsibleError as e:
- raise AnsibleActionFail(to_text(e))
+ remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
if not remote_stat['exists'] or not remote_stat['isdir']:
raise AnsibleActionFail("dest '%s' must be an existing dir" % dest)
@@ -102,9 +95,6 @@ class ActionModule(ActionBase):
# execute the unarchive module now, with the updated args (using ansible.legacy prefix to eliminate collections
# collisions with local override
- result.update(self._execute_module(module_name='ansible.legacy.unarchive', module_args=new_module_args, task_vars=task_vars))
- except AnsibleAction as e:
- result.update(e.result)
+ return self._execute_module(module_name='ansible.legacy.unarchive', module_args=new_module_args, task_vars=task_vars)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
- return result
diff --git a/lib/ansible/plugins/action/uri.py b/lib/ansible/plugins/action/uri.py
index 9860f2683b3..851340c1d0c 100644
--- a/lib/ansible/plugins/action/uri.py
+++ b/lib/ansible/plugins/action/uri.py
@@ -5,11 +5,10 @@
from __future__ import annotations
+import collections.abc as _c
import os
-from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.common.collections import Mapping, MutableMapping
+from ansible.errors import AnsibleActionFail
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
@@ -25,7 +24,7 @@ class ActionModule(ActionBase):
if task_vars is None:
task_vars = dict()
- result = super(ActionModule, self).run(tmp, task_vars)
+ super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
body_format = self._task.args.get('body_format', 'raw')
@@ -38,38 +37,31 @@ class ActionModule(ActionBase):
# everything is remote, so we just execute the module
# without changing any of the module arguments
# call with ansible.legacy prefix to prevent collections collisions while allowing local override
- raise _AnsibleActionDone(result=self._execute_module(module_name='ansible.legacy.uri',
- task_vars=task_vars, wrap_async=self._task.async_val))
+ return self._execute_module(module_name='ansible.legacy.uri', task_vars=task_vars, wrap_async=self._task.async_val)
kwargs = {}
if src:
- try:
- src = self._find_needle('files', src)
- except AnsibleError as e:
- raise AnsibleActionFail(to_native(e))
+ src = self._find_needle('files', src)
tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src))
kwargs['src'] = tmp_src
self._transfer_file(src, tmp_src)
self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
elif body_format == 'form-multipart':
- if not isinstance(body, Mapping):
+ if not isinstance(body, _c.Mapping):
raise AnsibleActionFail(
'body must be mapping, cannot be type %s' % body.__class__.__name__
)
for field, value in body.items():
- if not isinstance(value, MutableMapping):
+ if not isinstance(value, _c.MutableMapping):
continue
content = value.get('content')
filename = value.get('filename')
if not filename or content:
continue
- try:
- filename = self._find_needle('files', filename)
- except AnsibleError as e:
- raise AnsibleActionFail(to_native(e))
+ filename = self._find_needle('files', filename)
tmp_src = self._connection._shell.join_path(
self._connection._shell.tmpdir,
@@ -83,10 +75,7 @@ class ActionModule(ActionBase):
new_module_args = self._task.args | kwargs
# call with ansible.legacy prefix to prevent collections collisions while allowing local override
- result.update(self._execute_module('ansible.legacy.uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
- except AnsibleAction as e:
- result.update(e.result)
+ return self._execute_module('ansible.legacy.uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
- return result
diff --git a/lib/ansible/plugins/become/__init__.py b/lib/ansible/plugins/become/__init__.py
index 6f7a2b88abf..235287c07f3 100644
--- a/lib/ansible/plugins/become/__init__.py
+++ b/lib/ansible/plugins/become/__init__.py
@@ -3,6 +3,7 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
+import re
import shlex
from abc import abstractmethod
@@ -13,6 +14,7 @@ from gettext import dgettext
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins import AnsiblePlugin
+from ansible.utils import display as _display
def _gen_id(length=32):
@@ -32,6 +34,9 @@ class BecomeBase(AnsiblePlugin):
# plugin requires a tty, i.e su
require_tty = False
+ # plugin allows for pipelining execution
+ pipelining = True
+
# prompt to match
prompt = ''
@@ -53,11 +58,11 @@ class BecomeBase(AnsiblePlugin):
return getattr(playcontext, option, None)
- def expect_prompt(self):
+ def expect_prompt(self) -> bool:
"""This function assists connection plugins in determining if they need to wait for
a prompt. Both a prompt and a password are required.
"""
- return self.prompt and self.get_option('become_pass')
+ return bool(self.prompt and self.get_option('become_pass'))
def _build_success_command(self, cmd, shell, noexe=False):
if not all((cmd, shell, self.success)):
@@ -65,9 +70,8 @@ class BecomeBase(AnsiblePlugin):
try:
cmd = shlex.quote('%s %s %s %s' % (shell.ECHO, self.success, shell.COMMAND_SEP, cmd))
- except AttributeError:
- # TODO: This should probably become some more robust functionality used to detect incompat
- raise AnsibleError('The %s shell family is incompatible with the %s become plugin' % (shell.SHELL_FAMILY, self.name))
+ except AttributeError as ex:
+ raise AnsibleError(f'The {shell._load_name!r} shell plugin does not support become. It is missing the {ex.name!r} attribute.')
exe = getattr(shell, 'executable', None)
if exe and not noexe:
cmd = '%s -c %s' % (exe, cmd)
@@ -78,6 +82,25 @@ class BecomeBase(AnsiblePlugin):
self._id = _gen_id()
self.success = 'BECOME-SUCCESS-%s' % self._id
+ def strip_become_prompt(self, data: bytes) -> bytes:
+ """
+ Strips the first found configured become prompt from `data`, trailing whitespace and anything that precedes the prompt, then returns the result.
+ If no prompt is expected, or the prompt is not `str` or `bytes`, `data` will be returned as-is.
+ """
+ if not self.prompt or not isinstance(self.prompt, (str, bytes)) or not self.expect_prompt():
+ return data
+
+ return self._strip_through_prefix(self.prompt, data)
+
+ def strip_become_success(self, data: bytes) -> bytes:
+ """Strips the first found success marker from `data`, trailing whitespace and anything that precedes the success marker, then returns the result."""
+ return self._strip_through_prefix(self.success, data)
+
+ @staticmethod
+ def _strip_through_prefix(match: str | bytes, data: bytes) -> bytes:
+ """Strips the first occurrence of `match` from `data`, trailing whitespace and anything that precedes `match`, then returns the result."""
+ return re.sub(br'^.*?' + re.escape(to_bytes(match)) + br'\s*', b'', data, count=1, flags=re.DOTALL)
+
def check_success(self, b_output):
b_success = to_bytes(self.success)
return any(b_success in l.rstrip() for l in b_output.splitlines(True))
diff --git a/lib/ansible/plugins/become/runas.py b/lib/ansible/plugins/become/runas.py
index 3094c46c4b0..0389e1ed42f 100644
--- a/lib/ansible/plugins/become/runas.py
+++ b/lib/ansible/plugins/become/runas.py
@@ -61,6 +61,8 @@ DOCUMENTATION = """
- The Secondary Logon service (seclogon) must be running to use runas
"""
+from ansible.errors import AnsibleError
+from ansible.parsing.splitter import split_args
from ansible.plugins.become import BecomeBase
@@ -72,3 +74,72 @@ class BecomeModule(BecomeBase):
# this is a noop, the 'real' runas is implemented
# inside the windows powershell execution subsystem
return cmd
+
+ def _build_powershell_wrapper_action(self) -> tuple[str, dict[str, object], dict[str, object]]:
+ # See ansible.executor.powershell.become_wrapper.ps1 for the
+ # parameter names
+ params = {
+ 'BecomeUser': self.get_option('become_user'),
+ }
+ secure_params = {}
+
+ password = self.get_option('become_pass')
+ if password:
+ secure_params['BecomePassword'] = password
+
+ flags = self.get_option('become_flags')
+ if flags:
+ split_flags = split_args(flags)
+ for flag in split_flags:
+ if '=' not in flag:
+ raise ValueError(f"become_flags entry '{flag}' is in an invalid format, must be a key=value pair")
+
+ k, v = flag.split('=', 1)
+
+ param_name, param_value = self._parse_flag(k, v)
+ params[param_name] = param_value
+
+ return 'become_wrapper.ps1', params, secure_params
+
+ def _parse_flag(self, name: str, value: str) -> tuple[str, str]:
+ logon_types = {
+ 'interactive': 'Interactive',
+ 'network': 'Network',
+ 'batch': 'Batch',
+ 'service': 'Service',
+ 'unlock': 'Unlock',
+ 'network_cleartext': 'NetworkCleartext',
+ 'new_credentials': 'NewCredentials',
+ }
+ logon_flags = {
+ 'none': 'None',
+ 'with_profile': 'WithProfile',
+ 'netcredentials_only': 'NetCredentialsOnly',
+ }
+
+ match name.lower():
+ case 'logon_type':
+ param_name = 'LogonType'
+ if param_value := logon_types.get(value.lower(), None):
+ return param_name, param_value
+ else:
+ raise AnsibleError(f"become_flags logon_type value '{value}' is not valid, valid values are: {', '.join(logon_types.keys())}")
+
+ case 'logon_flags':
+ param_name = 'LogonFlags'
+ flags = value.split(',')
+
+ param_values: list[str] = []
+ for flag in flags:
+ if not flag:
+ continue
+
+ if flag_value := logon_flags.get(flag.lower(), None):
+ param_values.append(flag_value)
+ else:
+ raise AnsibleError(f"become_flags logon_flags value '{flag}' is not valid, valid values are: {', '.join(logon_flags.keys())}")
+
+ return param_name, ", ".join(param_values)
+
+ case _:
+ raise AnsibleError(f"become_flags key '{name}' is not a valid runas flag, must be 'logon_type' or 'logon_flags'")
diff --git a/lib/ansible/plugins/become/su.py b/lib/ansible/plugins/become/su.py
index b8a7f0be993..381e5e7fe5e 100644
--- a/lib/ansible/plugins/become/su.py
+++ b/lib/ansible/plugins/become/su.py
@@ -93,7 +93,7 @@ DOCUMENTATION = """
import re
import shlex
-from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.common.text.converters import to_text
from ansible.plugins.become import BecomeBase
@@ -101,6 +101,8 @@ class BecomeModule(BecomeBase):
name = 'su'
+ pipelining = False
+
# messages for detecting prompted password issues
fail = ('Authentication failure',)
@@ -139,15 +141,18 @@ class BecomeModule(BecomeBase):
'口令',
]
- def check_password_prompt(self, b_output):
+ def check_password_prompt(self, b_output: bytes) -> bool:
""" checks if the expected password prompt exists in b_output """
-
prompts = self.get_option('prompt_l10n') or self.SU_PROMPT_LOCALIZATIONS
- b_password_string = b"|".join((br'(\w+\'s )?' + to_bytes(p)) for p in prompts)
+ password_prompt_strings = "|".join(re.escape(p) for p in prompts)
# Colon or unicode fullwidth colon
- b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
- b_su_prompt_localizations_re = re.compile(b_password_string, flags=re.IGNORECASE)
- return bool(b_su_prompt_localizations_re.match(b_output))
+ prompt_pattern = rf"(?:{password_prompt_strings})\s*[::]"
+ match = re.search(prompt_pattern, to_text(b_output), flags=re.IGNORECASE)
+
+ if match:
+ self.prompt = match.group(0) # preserve the actual matched string so we can scrub the output
+
+ return bool(match)
def build_become_command(self, cmd, shell):
super(BecomeModule, self).build_become_command(cmd, shell)
diff --git a/lib/ansible/plugins/become/sudo.py b/lib/ansible/plugins/become/sudo.py
index 6a33c987c04..13a86607503 100644
--- a/lib/ansible/plugins/become/sudo.py
+++ b/lib/ansible/plugins/become/sudo.py
@@ -72,12 +72,25 @@ DOCUMENTATION = """
ini:
- section: sudo_become_plugin
key: password
+ sudo_chdir:
+ description: Directory to change to before invoking sudo; can avoid permission errors when dropping privileges.
+ type: string
+ required: False
+ version_added: '2.19'
+ vars:
+ - name: ansible_sudo_chdir
+ env:
+ - name: ANSIBLE_SUDO_CHDIR
+ ini:
+ - section: sudo_become_plugin
+ key: chdir
"""
import re
import shlex
from ansible.plugins.become import BecomeBase
+from ansible.errors import AnsibleError
class BecomeModule(BecomeBase):
@@ -117,4 +130,10 @@ class BecomeModule(BecomeBase):
if user:
user = '-u %s' % (user)
+ if chdir := self.get_option('sudo_chdir'):
+ try:
+ becomecmd = f'{shell.CD} {shlex.quote(chdir)} {shell._SHELL_AND} {becomecmd}'
+ except AttributeError as ex:
+ raise AnsibleError(f'The {shell._load_name!r} shell plugin does not support sudo chdir. It is missing the {ex.name!r} attribute.')
+
return ' '.join([becomecmd, flags, prompt, user, self._build_success_command(cmd, shell)])
diff --git a/lib/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py
index 3bc5a16f303..4ec276bca67 100644
--- a/lib/ansible/plugins/cache/__init__.py
+++ b/lib/ansible/plugins/cache/__init__.py
@@ -18,18 +18,18 @@
from __future__ import annotations
import copy
-import errno
import os
import tempfile
import time
+import typing as t
from abc import abstractmethod
-from collections.abc import MutableMapping
+from collections import abc as c
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common.file import S_IRWU_RG_RO
-from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins import AnsiblePlugin
from ansible.plugins.loader import cache_loader
from ansible.utils.collection_loader import resource_from_fqcr
@@ -42,37 +42,36 @@ class BaseCacheModule(AnsiblePlugin):
# Backwards compat only. Just import the global display instead
_display = display
+ _persistent = True
+ """Plugins that do not persist data between runs can set False to bypass schema-version key munging and JSON serialization wrapper."""
- def __init__(self, *args, **kwargs):
- super(BaseCacheModule, self).__init__()
- self.set_options(var_options=args, direct=kwargs)
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__()
- @abstractmethod
- def get(self, key):
- pass
+ self.set_options(var_options=args, direct=kwargs)
@abstractmethod
- def set(self, key, value):
+ def get(self, key: str) -> dict[str, object]:
pass
@abstractmethod
- def keys(self):
+ def set(self, key: str, value: dict[str, object]) -> None:
pass
@abstractmethod
- def contains(self, key):
+ def keys(self) -> t.Sequence[str]:
pass
@abstractmethod
- def delete(self, key):
+ def contains(self, key: object) -> bool:
pass
@abstractmethod
- def flush(self):
+ def delete(self, key: str) -> None:
pass
@abstractmethod
- def copy(self):
+ def flush(self) -> None:
pass
@@ -108,15 +107,15 @@ class BaseFileCacheModule(BaseCacheModule):
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
- except (OSError, IOError) as e:
- raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
+ except OSError as ex:
+ raise AnsibleError(f"Error in {self.plugin_name!r} cache plugin while trying to create cache dir {self._cache_dir!r}.") from ex
else:
for x in (os.R_OK, os.W_OK, os.X_OK):
if not os.access(self._cache_dir, x):
raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
self.plugin_name, self._cache_dir))
- def _get_cache_file_name(self, key):
+ def _get_cache_file_name(self, key: str) -> str:
prefix = self.get_option('_prefix')
if prefix:
cachefile = "%s/%s%s" % (self._cache_dir, prefix, key)
@@ -144,11 +143,10 @@ class BaseFileCacheModule(BaseCacheModule):
self.delete(key)
raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
"It has been removed, so you can re-run your command now." % cachefile)
- except (OSError, IOError) as e:
- display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+ except FileNotFoundError:
raise KeyError
- except Exception as e:
- raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
+ except Exception as ex:
+ raise AnsibleError(f"Error while accessing the cache file {cachefile!r}.") from ex
return self._cache.get(key)
@@ -161,13 +159,13 @@ class BaseFileCacheModule(BaseCacheModule):
try:
try:
self._dump(value, tmpfile_path)
- except (OSError, IOError) as e:
- display.warning("error in '%s' cache plugin while trying to write to '%s' : %s" % (self.plugin_name, tmpfile_path, to_bytes(e)))
+ except OSError as ex:
+ display.error_as_warning(f"Error in {self.plugin_name!r} cache plugin while trying to write to {tmpfile_path!r}.", exception=ex)
try:
os.rename(tmpfile_path, cachefile)
os.chmod(cachefile, mode=S_IRWU_RG_RO)
- except (OSError, IOError) as e:
- display.warning("error in '%s' cache plugin while trying to move '%s' to '%s' : %s" % (self.plugin_name, tmpfile_path, cachefile, to_bytes(e)))
+ except OSError as ex:
+ display.error_as_warning(f"Error in {self.plugin_name!r} cache plugin while trying to move {tmpfile_path!r} to {cachefile!r}.", exception=ex)
finally:
try:
os.unlink(tmpfile_path)
@@ -182,12 +180,12 @@ class BaseFileCacheModule(BaseCacheModule):
cachefile = self._get_cache_file_name(key)
try:
st = os.stat(cachefile)
- except (OSError, IOError) as e:
- if e.errno == errno.ENOENT:
- return False
- else:
- display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
- return False
+ except FileNotFoundError:
+ return False
+ except OSError as ex:
+ display.error_as_warning(f"Error in {self.plugin_name!r} cache plugin while trying to stat {cachefile!r}.", exception=ex)
+
+ return False
if time.time() - st.st_mtime <= self._timeout:
return False
@@ -224,11 +222,10 @@ class BaseFileCacheModule(BaseCacheModule):
try:
os.stat(cachefile)
return True
- except (OSError, IOError) as e:
- if e.errno == errno.ENOENT:
- return False
- else:
- display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+ except FileNotFoundError:
+ return False
+ except OSError as ex:
+ display.error_as_warning(f"Error in {self.plugin_name!r} cache plugin while trying to stat {cachefile!r}.", exception=ex)
def delete(self, key):
try:
@@ -237,7 +234,7 @@ class BaseFileCacheModule(BaseCacheModule):
pass
try:
os.remove(self._get_cache_file_name(key))
- except (OSError, IOError):
+ except OSError:
pass # TODO: only pass on non existing?
def flush(self):
@@ -245,14 +242,8 @@ class BaseFileCacheModule(BaseCacheModule):
for key in self.keys():
self.delete(key)
- def copy(self):
- ret = dict()
- for key in self.keys():
- ret[key] = self.get(key)
- return ret
-
@abstractmethod
- def _load(self, filepath):
+ def _load(self, filepath: str) -> object:
"""
Read data from a filepath and return it as a value
@@ -271,7 +262,7 @@ class BaseFileCacheModule(BaseCacheModule):
pass
@abstractmethod
- def _dump(self, value, filepath):
+ def _dump(self, value: object, filepath: str) -> None:
"""
Write data to a filepath
@@ -281,19 +272,13 @@ class BaseFileCacheModule(BaseCacheModule):
pass
-class CachePluginAdjudicator(MutableMapping):
- """
- Intermediary between a cache dictionary and a CacheModule
- """
+class CachePluginAdjudicator(c.MutableMapping):
+ """Batch update wrapper around a cache plugin."""
+
def __init__(self, plugin_name='memory', **kwargs):
self._cache = {}
self._retrieved = {}
-
self._plugin = cache_loader.get(plugin_name, **kwargs)
- if not self._plugin:
- raise AnsibleError('Unable to load the cache plugin (%s).' % plugin_name)
-
- self._plugin_name = plugin_name
def update_cache_if_changed(self):
if self._retrieved != self._cache:
@@ -302,6 +287,7 @@ class CachePluginAdjudicator(MutableMapping):
def set_cache(self):
for top_level_cache_key in self._cache.keys():
self._plugin.set(top_level_cache_key, self._cache[top_level_cache_key])
+
self._retrieved = copy.deepcopy(self._cache)
def load_whole_cache(self):
@@ -309,7 +295,7 @@ class CachePluginAdjudicator(MutableMapping):
self._cache[key] = self._plugin.get(key)
def __repr__(self):
- return to_text(self._cache)
+ return repr(self._cache)
def __iter__(self):
return iter(self.keys())
@@ -319,13 +305,10 @@ class CachePluginAdjudicator(MutableMapping):
def _do_load_key(self, key):
load = False
- if all([
- key not in self._cache,
- key not in self._retrieved,
- self._plugin_name != 'memory',
- self._plugin.contains(key),
- ]):
+
+ if key not in self._cache and key not in self._retrieved and self._plugin._persistent and self._plugin.contains(key):
load = True
+
return load
def __getitem__(self, key):
@@ -336,16 +319,18 @@ class CachePluginAdjudicator(MutableMapping):
pass
else:
self._retrieved[key] = self._cache[key]
+
return self._cache[key]
def get(self, key, default=None):
if self._do_load_key(key):
try:
self._cache[key] = self._plugin.get(key)
- except KeyError as e:
+ except KeyError:
pass
else:
self._retrieved[key] = self._cache[key]
+
return self._cache.get(key, default)
def items(self):
@@ -360,6 +345,7 @@ class CachePluginAdjudicator(MutableMapping):
def pop(self, key, *args):
if args:
return self._cache.pop(key, args[0])
+
return self._cache.pop(key)
def __delitem__(self, key):
@@ -368,6 +354,9 @@ class CachePluginAdjudicator(MutableMapping):
def __setitem__(self, key, value):
self._cache[key] = value
+ def clear(self):
+ self.flush()
+
def flush(self):
self._plugin.flush()
self._cache = {}
diff --git a/lib/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py
index a7c7468b820..837365d9b4a 100644
--- a/lib/ansible/plugins/cache/base.py
+++ b/lib/ansible/plugins/cache/base.py
@@ -18,3 +18,11 @@ from __future__ import annotations
# moved actual classes to __init__ kept here for backward compat with 3rd parties
from ansible.plugins.cache import BaseCacheModule, BaseFileCacheModule # pylint: disable=unused-import
+
+from ansible.utils.display import Display as _Display
+
+_Display().deprecated(
+ msg="The `ansible.plugins.cache.base` Python module is deprecated.",
+ help_text="Import from `ansible.plugins.cache` instead.",
+ version="2.23",
+)
diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py
index 6184947b6c9..00ead7c77c6 100644
--- a/lib/ansible/plugins/cache/jsonfile.py
+++ b/lib/ansible/plugins/cache/jsonfile.py
@@ -40,23 +40,17 @@ DOCUMENTATION = """
type: integer
"""
-import codecs
import json
+import pathlib
-from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
- """
- A caching module backed by json files.
- """
-
- def _load(self, filepath):
- # Valid JSON is always UTF-8 encoded.
- with codecs.open(filepath, 'r', encoding='utf-8') as f:
- return json.load(f, cls=AnsibleJSONDecoder)
-
- def _dump(self, value, filepath):
- with codecs.open(filepath, 'w', encoding='utf-8') as f:
- f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
+ """A caching module backed by json files."""
+
+ def _load(self, filepath: str) -> object:
+ return json.loads(pathlib.Path(filepath).read_text())
+
+ def _dump(self, value: object, filepath: str) -> None:
+ pathlib.Path(filepath).write_text(json.dumps(value))
diff --git a/lib/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py
index 780a643f151..055860da6ef 100644
--- a/lib/ansible/plugins/cache/memory.py
+++ b/lib/ansible/plugins/cache/memory.py
@@ -20,12 +20,15 @@ from ansible.plugins.cache import BaseCacheModule
class CacheModule(BaseCacheModule):
+ _persistent = False # prevent unnecessary JSON serialization and key munging
def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
self._cache = {}
def get(self, key):
- return self._cache.get(key)
+ return self._cache[key]
def set(self, key, value):
self._cache[key] = value
@@ -41,12 +44,3 @@ class CacheModule(BaseCacheModule):
def flush(self):
self._cache = {}
-
- def copy(self):
- return self._cache.copy()
-
- def __getstate__(self):
- return self.copy()
-
- def __setstate__(self, data):
- self._cache = data
diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py
index c88fbd55724..2fc52c45c74 100644
--- a/lib/ansible/plugins/callback/__init__.py
+++ b/lib/ansible/plugins/callback/__init__.py
@@ -18,32 +18,34 @@
from __future__ import annotations
import difflib
+import functools
+import inspect
import json
import re
import sys
import textwrap
+import typing as t
+import collections.abc as _c
+
from typing import TYPE_CHECKING
-from collections import OrderedDict
-from collections.abc import MutableMapping
from copy import deepcopy
from ansible import constants as C
-from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.six import text_type
-from ansible.parsing.ajson import AnsibleJSONEncoder
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from ansible.parsing.yaml.objects import AnsibleUnicode
+from ansible.module_utils._internal import _datatag
+from ansible._internal._yaml import _dumper
from ansible.plugins import AnsiblePlugin
from ansible.utils.color import stringc
from ansible.utils.display import Display
-from ansible.utils.unsafe_proxy import AnsibleUnsafeText, NativeJinjaUnsafeText
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
+from ansible.module_utils._internal._json._profiles import _fallback_to_str
+from ansible._internal._templating import _engine
+from ansible.module_utils._internal import _deprecator
import yaml
if TYPE_CHECKING:
- from ansible.executor.task_result import TaskResult
+ from ansible.executor.task_result import CallbackTaskResult
global_display = Display()
@@ -52,23 +54,46 @@ __all__ = ["CallbackBase"]
_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
-_YAML_TEXT_TYPES = (text_type, AnsibleUnicode, AnsibleUnsafeText, NativeJinjaUnsafeText)
# Characters that libyaml/pyyaml consider breaks
_YAML_BREAK_CHARS = '\n\x85\u2028\u2029' # NL, NEL, LS, PS
# regex representation of libyaml/pyyaml of a space followed by a break character
_SPACE_BREAK_RE = re.compile(fr' +([{_YAML_BREAK_CHARS}])')
-class _AnsibleCallbackDumper(AnsibleDumper):
- def __init__(self, lossy=False):
- self._lossy = lossy
+_T_callable = t.TypeVar("_T_callable", bound=t.Callable)
+
- def __call__(self, *args, **kwargs):
- # pyyaml expects that we are passing an object that can be instantiated, but to
- # smuggle the ``lossy`` configuration, we do that in ``__init__`` and then
- # define this ``__call__`` that will mimic the ability for pyyaml to instantiate class
+class _AnsibleCallbackDumper(_dumper.AnsibleDumper):
+ def __init__(self, *args, lossy: bool = False, **kwargs):
super().__init__(*args, **kwargs)
- return self
+
+ self._lossy = lossy
+
+ def _pretty_represent_str(self, data):
+ """Uses block style for multi-line strings"""
+ data = _datatag.AnsibleTagHelper.as_native_type(data)
+
+ if _should_use_block(data):
+ style = '|'
+ if self._lossy:
+ data = _munge_data_for_lossy_yaml(data)
+ else:
+ style = self.default_style
+
+ node = yaml.representer.ScalarNode('tag:yaml.org,2002:str', data, style=style)
+
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+
+ return node
+
+ @classmethod
+ def _register_representers(cls) -> None:
+ super()._register_representers()
+
+ # exact type checks occur first against representers, then subclasses against multi-representers
+ cls.add_representer(str, cls._pretty_represent_str)
+ cls.add_multi_representer(str, cls._pretty_represent_str)
def _should_use_block(scalar):
@@ -77,6 +102,7 @@ def _should_use_block(scalar):
for ch in _YAML_BREAK_CHARS:
if ch in scalar:
return True
+
return False
@@ -95,12 +121,12 @@ class _SpecialCharacterTranslator:
return None
-def _filter_yaml_special(scalar):
+def _filter_yaml_special(scalar: str) -> str:
"""Filter a string removing any character that libyaml/pyyaml declare as special"""
return scalar.translate(_SpecialCharacterTranslator())
-def _munge_data_for_lossy_yaml(scalar):
+def _munge_data_for_lossy_yaml(scalar: str) -> str:
"""Modify a string so that analyze_scalar in libyaml/pyyaml will allow block formatting"""
# we care more about readability than accuracy, so...
# ...libyaml/pyyaml does not permit trailing spaces for block scalars
@@ -113,43 +139,27 @@ def _munge_data_for_lossy_yaml(scalar):
return _SPACE_BREAK_RE.sub(r'\1', scalar)
-def _pretty_represent_str(self, data):
- """Uses block style for multi-line strings"""
- data = text_type(data)
- if _should_use_block(data):
- style = '|'
- if self._lossy:
- data = _munge_data_for_lossy_yaml(data)
- else:
- style = self.default_style
-
- node = yaml.representer.ScalarNode('tag:yaml.org,2002:str', data, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
-
-for data_type in _YAML_TEXT_TYPES:
- _AnsibleCallbackDumper.add_representer(
- data_type,
- _pretty_represent_str
- )
-
-
class CallbackBase(AnsiblePlugin):
-
"""
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
"""
- def __init__(self, display=None, options=None):
+ _implemented_callback_methods: frozenset[str] = frozenset()
+ """Set of callback methods overridden by each subclass; used by TQM to bypass callback dispatch on no-op methods."""
+
+ def __init__(self, display: Display | None = None, options: dict[str, t.Any] | None = None) -> None:
+ super().__init__()
+
if display:
self._display = display
else:
self._display = global_display
+ # FUTURE: fix double-loading of non-collection stdout callback plugins that don't set CALLBACK_NEEDS_ENABLED
+
+ # FUTURE: this code is jacked for 2.x- it should just use the type names and always assume 2.0+ for normal cases
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
@@ -159,21 +169,72 @@ class CallbackBase(AnsiblePlugin):
self.disabled = False
self.wants_implicit_tasks = False
- self._plugin_options = {}
+ self._plugin_options: dict[str, t.Any] = {}
+
if options is not None:
self.set_options(options)
- self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
+ self._hide_in_debug = (
+ 'changed', 'failed', 'skipped', 'invocation', 'skip_reason',
+ 'ansible_loop_var', 'ansible_index_var', 'ansible_loop',
+ )
+
+ self._current_task_result: CallbackTaskResult | None = None
# helper for callbacks, so they don't all have to include deepcopy
_copy_result = deepcopy
+ def _init_callback_methods(self) -> None:
+ """Record analysis of callback methods on each callback instance for dispatch optimization and deprecation warnings."""
+ implemented_callback_methods: set[str] = set()
+ deprecated_v1_method_overrides: set[str] = set()
+ plugin_file = sys.modules[type(self).__module__].__file__
+
+ if plugin_info := _deprecator._path_as_plugininfo(plugin_file):
+ plugin_name = plugin_info.resolved_name
+ else:
+ plugin_name = plugin_file
+
+ for base_v2_method, base_v1_method in CallbackBase._v2_v1_method_map.items():
+ method_name = None
+
+ if not inspect.ismethod(method := getattr(self, (v2_method_name := base_v2_method.__name__))) or method.__func__ is not base_v2_method:
+ implemented_callback_methods.add(v2_method_name) # v2 method directly implemented by subclass
+ method_name = v2_method_name
+ elif base_v1_method is None:
+ pass # no corresponding v1 method
+ elif not inspect.ismethod(method := getattr(self, (v1_method_name := base_v1_method.__name__))) or method.__func__ is not base_v1_method:
+ implemented_callback_methods.add(v2_method_name) # v1 method directly implemented by subclass
+ deprecated_v1_method_overrides.add(v1_method_name)
+ method_name = v1_method_name
+
+ if method_name and v2_method_name == 'v2_on_any':
+ deprecated_v1_method_overrides.discard(method_name) # avoid including v1 on_any in the v1 deprecation below
+
+ global_display.deprecated(
+ msg=f'The {plugin_name!r} callback plugin implements deprecated method {method_name!r}.',
+ version='2.23',
+ help_text='Use event-specific callback methods instead.',
+ )
+
+ self._implemented_callback_methods = frozenset(implemented_callback_methods)
+
+ if deprecated_v1_method_overrides:
+ global_display.deprecated(
+ msg=f'The {plugin_name!r} callback plugin implements the following deprecated method(s): {", ".join(sorted(deprecated_v1_method_overrides))}',
+ version='2.23',
+ help_text='Implement the `v2_*` equivalent callback method(s) instead.',
+ )
+
def set_option(self, k, v):
self._plugin_options[k] = C.config.get_config_value(k, plugin_type=self.plugin_type, plugin_name=self._load_name, direct={k: v})
- def get_option(self, k):
+ def get_option(self, k, hostvars=None):
return self._plugin_options[k]
+ def has_option(self, option):
+ return (option in self._plugin_options)
+
def set_options(self, task_keys=None, var_options=None, direct=None):
""" This is different than the normal plugin method as callbacks get called early and really don't accept keywords.
Also _options was already taken for CLI args and callbacks use _plugin_options instead.
@@ -183,25 +244,30 @@ class CallbackBase(AnsiblePlugin):
self._plugin_options = C.config.get_plugin_options(self.plugin_type, self._load_name, keys=task_keys, variables=var_options, direct=direct)
@staticmethod
- def host_label(result):
- """Return label for the hostname (& delegated hostname) of a task
- result.
- """
- label = "%s" % result._host.get_name()
- if result._task.delegate_to and result._task.delegate_to != result._host.get_name():
+ def host_label(result: CallbackTaskResult) -> str:
+ """Return label for the hostname (& delegated hostname) of a task result."""
+ label = result.host.get_name()
+ if result.task.delegate_to and result.task.delegate_to != result.host.get_name():
# show delegated host
- label += " -> %s" % result._task.delegate_to
+ label += " -> %s" % result.task.delegate_to
# in case we have 'extra resolution'
- ahost = result._result.get('_ansible_delegated_vars', {}).get('ansible_host', result._task.delegate_to)
- if result._task.delegate_to != ahost:
+ ahost = result.result.get('_ansible_delegated_vars', {}).get('ansible_host', result.task.delegate_to)
+ if result.task.delegate_to != ahost:
label += "(%s)" % ahost
return label
- def _run_is_verbose(self, result, verbosity=0):
- return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
- and result._result.get('_ansible_verbose_override', False) is False)
-
- def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False, serialize=True):
+ def _run_is_verbose(self, result: CallbackTaskResult, verbosity: int = 0) -> bool:
+ return ((self._display.verbosity > verbosity or result.result.get('_ansible_verbose_always', False) is True)
+ and result.result.get('_ansible_verbose_override', False) is False)
+
+ def _dump_results(
+ self,
+ result: _c.Mapping[str, t.Any],
+ indent: int | None = None,
+ sort_keys: bool = True,
+ keep_invocation: bool = False,
+ serialize: bool = True,
+ ) -> str:
try:
result_format = self.get_option('result_format')
except KeyError:
@@ -238,9 +304,12 @@ class CallbackBase(AnsiblePlugin):
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
- # remove exception from screen output
- if 'exception' in abridged_result:
- del abridged_result['exception']
+ # remove error/warning values; the stdout callback should have already handled them
+ abridged_result.pop('exception', None)
+ abridged_result.pop('warnings', None)
+ abridged_result.pop('deprecations', None)
+
+ abridged_result = _engine.TemplateEngine().transform(abridged_result) # ensure the dumped view matches the transformed view a playbook sees
if not serialize:
# Just return ``abridged_result`` without going through serialization
@@ -249,18 +318,9 @@ class CallbackBase(AnsiblePlugin):
return abridged_result
if result_format == 'json':
- try:
- return json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
- except TypeError:
- # Python3 bug: throws an exception when keys are non-homogenous types:
- # https://bugs.python.org/issue25457
- # sort into an OrderedDict and then json.dumps() that instead
- if not OrderedDict:
- raise
- return json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
- cls=AnsibleJSONEncoder, indent=indent,
- ensure_ascii=False, sort_keys=False)
- elif result_format == 'yaml':
+ return json.dumps(abridged_result, cls=_fallback_to_str.Encoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
+
+ if result_format == 'yaml':
# None is a sentinel in this case that indicates default behavior
# default behavior for yaml is to prettify results
lossy = pretty_results in (None, True)
@@ -277,7 +337,7 @@ class CallbackBase(AnsiblePlugin):
yaml.dump(
abridged_result,
allow_unicode=True,
- Dumper=_AnsibleCallbackDumper(lossy=lossy),
+ Dumper=functools.partial(_AnsibleCallbackDumper, lossy=lossy),
default_flow_style=False,
indent=indent,
# sort_keys=sort_keys # This requires PyYAML>=5.1
@@ -285,32 +345,36 @@ class CallbackBase(AnsiblePlugin):
' ' * (indent or 4)
)
- def _handle_warnings(self, res):
- """ display warnings, if enabled and any exist in the result """
- if C.ACTION_WARNINGS:
- if 'warnings' in res and res['warnings']:
- for warning in res['warnings']:
- self._display.warning(warning)
- del res['warnings']
- if 'deprecations' in res and res['deprecations']:
- for warning in res['deprecations']:
- self._display.deprecated(**warning)
- del res['deprecations']
-
- def _handle_exception(self, result, use_stderr=False):
-
- if 'exception' in result:
- msg = "An exception occurred during task execution. "
- exception_str = to_text(result['exception'])
- if self._display.verbosity < 3:
- # extract just the actual error message from the exception text
- error = exception_str.strip().split('\n')[-1]
- msg += "To see the full traceback, use -vvv. The error was: %s" % error
- else:
- msg = "The full traceback is:\n" + exception_str
- del result['exception']
+ # DTFIX5: add test to exercise this case
+ raise ValueError(f'Unsupported result_format {result_format!r}.')
+
+ def _handle_warnings(self, res: _c.MutableMapping[str, t.Any]) -> None:
+ """Display warnings and deprecation warnings sourced by task execution."""
+ if res.pop('warnings', None) and self._current_task_result and (warnings := self._current_task_result.warnings):
+ # display warnings from the current task result if `warnings` was not removed from `result` (or made falsey)
+ for warning in warnings:
+ self._display._warning(warning)
+
+ if res.pop('deprecations', None) and self._current_task_result and (deprecations := self._current_task_result.deprecations):
+ # display deprecations from the current task result if `deprecations` was not removed from `result` (or made falsey)
+ for deprecation in deprecations:
+ self._display._deprecated(deprecation)
+
+ def _handle_exception(self, result: _c.MutableMapping[str, t.Any], use_stderr: bool = False) -> None:
+ if result.pop('exception', None) and self._current_task_result and (exception := self._current_task_result.exception):
+ # display exception from the current task result if `exception` was not removed from `result` (or made falsey)
+ self._display._error(exception, stderr=use_stderr)
+
+ def _handle_warnings_and_exception(self, result: CallbackTaskResult) -> None:
+ """Standardized handling of warnings/deprecations and exceptions from a task/item result."""
+ # DTFIX5: make/doc/porting-guide a public version of this method?
+ try:
+ use_stderr = self.get_option('display_failed_stderr')
+ except KeyError:
+ use_stderr = False
- self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
+ self._handle_warnings(result.result)
+ self._handle_exception(result.result, use_stderr=use_stderr)
def _serialize_diff(self, diff):
try:
@@ -327,7 +391,8 @@ class CallbackBase(AnsiblePlugin):
if result_format == 'json':
return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
- elif result_format == 'yaml':
+
+ if result_format == 'yaml':
# None is a sentinel in this case that indicates default behavior
# default behavior for yaml is to prettify results
lossy = pretty_results in (None, True)
@@ -335,7 +400,7 @@ class CallbackBase(AnsiblePlugin):
yaml.dump(
diff,
allow_unicode=True,
- Dumper=_AnsibleCallbackDumper(lossy=lossy),
+ Dumper=functools.partial(_AnsibleCallbackDumper, lossy=lossy),
default_flow_style=False,
indent=4,
# sort_keys=sort_keys # This requires PyYAML>=5.1
@@ -343,6 +408,9 @@ class CallbackBase(AnsiblePlugin):
' '
)
+ # DTFIX5: add test to exercise this case
+ raise ValueError(f'Unsupported result_format {result_format!r}.')
+
def _get_diff(self, difflist):
if not isinstance(difflist, list):
@@ -361,7 +429,7 @@ class CallbackBase(AnsiblePlugin):
if 'before' in diff and 'after' in diff:
# format complex structures into 'files'
for x in ['before', 'after']:
- if isinstance(diff[x], MutableMapping):
+ if isinstance(diff[x], _c.Mapping):
diff[x] = self._serialize_diff(diff[x])
elif diff[x] is None:
diff[x] = ''
@@ -403,7 +471,7 @@ class CallbackBase(AnsiblePlugin):
ret.append(diff['prepared'])
return u''.join(ret)
- def _get_item_label(self, result):
+ def _get_item_label(self, result: _c.Mapping[str, t.Any]) -> t.Any:
""" retrieves the value to be displayed as a label for an item entry from a result object"""
if result.get('_ansible_no_log', False):
item = "(censored due to no_log)"
@@ -411,14 +479,15 @@ class CallbackBase(AnsiblePlugin):
item = result.get('_ansible_item_label', result.get('item'))
return item
- def _process_items(self, result):
+ def _process_items(self, result: CallbackTaskResult) -> None:
# just remove them as now they get handled by individual callbacks
- del result._result['results']
+ del result.result['results']
def _clean_results(self, result, task_name):
""" removes data from results for display """
# mostly controls that debug only outputs what it was meant to
+ # FIXME: this is a terrible heuristic to format debug's output- it masks exception detail
if task_name in C._ACTION_DEBUG:
if 'msg' in result:
# msg should be alone
@@ -453,9 +522,6 @@ class CallbackBase(AnsiblePlugin):
def runner_on_unreachable(self, host, res):
pass
- def runner_on_no_hosts(self):
- pass
-
def runner_on_async_poll(self, host, res, jid, clock):
pass
@@ -483,15 +549,6 @@ class CallbackBase(AnsiblePlugin):
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
pass
- def playbook_on_setup(self):
- pass
-
- def playbook_on_import_for_host(self, host, imported_file):
- pass
-
- def playbook_on_not_import_for_host(self, host, missing_file):
- pass
-
def playbook_on_play_start(self, name):
pass
@@ -505,80 +562,104 @@ class CallbackBase(AnsiblePlugin):
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
- def v2_runner_on_failed(self, result: TaskResult, ignore_errors: bool = False) -> None:
- """Get details about a failed task and whether or not Ansible should continue
- running tasks on the host where the failure occurred, then process the details
- as required by the callback (output, profiling, logging, notifications, etc.)
-
- Note: The 'ignore_errors' directive only works when the task can run and returns
- a value of 'failed'. It does not make Ansible ignore undefined variable errors,
- connection failures, execution issues (for example, missing packages), or syntax errors.
+ def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors: bool = False) -> None:
+ """Process results of a failed task.
- Customization note: For more information about the attributes and methods of the
- TaskResult class, see lib/ansible/executor/task_result.py.
+ Note: The value of 'ignore_errors' tells Ansible whether to
+ continue running tasks on the host where this task failed.
+ But the 'ignore_errors' directive only works when the task can
+ run and returns a value of 'failed'. It does not make Ansible
+ ignore undefined variable errors, connection failures, execution
+ issues (for example, missing packages), or syntax errors.
- :param TaskResult result: An object that contains details about the task
- :param bool ignore_errors: Whether or not Ansible should continue running tasks on the host
- where the failure occurred
+ :param result: The parameters of the task and its results.
+ :type result: CallbackTaskResult
+ :param ignore_errors: Whether Ansible should continue \
+ running tasks on the host where the task failed.
+ :type ignore_errors: bool
:return: None
+ :rtype: None
"""
- host = result._host.get_name()
- self.runner_on_failed(host, result._result, ignore_errors)
+ host = result.host.get_name()
+ self.runner_on_failed(host, result.result, ignore_errors)
- def v2_runner_on_ok(self, result: TaskResult) -> None:
- """Get details about a successful task and process them as required by the callback
- (output, profiling, logging, notifications, etc.)
+ def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
+ """Process results of a successful task.
- Customization note: For more information about the attributes and methods of the
- TaskResult class, see lib/ansible/executor/task_result.py.
-
- :param TaskResult result: An object that contains details about the task
+ :param result: The parameters of the task and its results.
+ :type result: CallbackTaskResult
:return: None
+ :rtype: None
"""
- host = result._host.get_name()
- self.runner_on_ok(host, result._result)
-
- def v2_runner_on_skipped(self, result: TaskResult) -> None:
- """Get details about a skipped task and process them as required by the callback
- (output, profiling, logging, notifications, etc.)
+ host = result.host.get_name()
+ self.runner_on_ok(host, result.result)
- Customization note: For more information about the attributes and methods of the
- TaskResult class, see lib/ansible/executor/task_result.py.
+ def v2_runner_on_skipped(self, result: CallbackTaskResult) -> None:
+ """Process results of a skipped task.
- :param TaskResult result: An object that contains details about the task
+ :param result: The parameters of the task and its results.
+ :type result: CallbackTaskResult
:return: None
+ :rtype: None
"""
if C.DISPLAY_SKIPPED_HOSTS:
- host = result._host.get_name()
- self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
+ host = result.host.get_name()
+ self.runner_on_skipped(host, self._get_item_label(getattr(result.result, 'results', {})))
- def v2_runner_on_unreachable(self, result):
- host = result._host.get_name()
- self.runner_on_unreachable(host, result._result)
+ def v2_runner_on_unreachable(self, result: CallbackTaskResult) -> None:
+ """Process results of a task if a target node is unreachable.
- def v2_runner_on_async_poll(self, result):
- host = result._host.get_name()
- jid = result._result.get('ansible_job_id')
+ :param result: The parameters of the task and its results.
+ :type result: CallbackTaskResult
+
+ :return: None
+ :rtype: None
+ """
+ host = result.host.get_name()
+ self.runner_on_unreachable(host, result.result)
+
+ def v2_runner_on_async_poll(self, result: CallbackTaskResult) -> None:
+ """Get details about an unfinished task running in async mode.
+
+ Note: The value of the `poll` keyword in the task determines
+ the interval at which polling occurs and this method is run.
+
+ :param result: The parameters of the task and its status.
+ :type result: CallbackTaskResult
+
+ :rtype: None
+ :rtype: None
+ """
+ host = result.host.get_name()
+ jid = result.result.get('ansible_job_id')
# FIXME, get real clock
clock = 0
- self.runner_on_async_poll(host, result._result, jid, clock)
+ self.runner_on_async_poll(host, result.result, jid, clock)
+
+ def v2_runner_on_async_ok(self, result: CallbackTaskResult) -> None:
+ """Process results of a successful task that ran in async mode.
- def v2_runner_on_async_ok(self, result):
- host = result._host.get_name()
- jid = result._result.get('ansible_job_id')
- self.runner_on_async_ok(host, result._result, jid)
+ :param result: The parameters of the task and its results.
+ :type result: CallbackTaskResult
- def v2_runner_on_async_failed(self, result):
- host = result._host.get_name()
+ :return: None
+ :rtype: None
+ """
+ host = result.host.get_name()
+ jid = result.result.get('ansible_job_id')
+ self.runner_on_async_ok(host, result.result, jid)
+
+ def v2_runner_on_async_failed(self, result: CallbackTaskResult) -> None:
+ host = result.host.get_name()
# Attempt to get the async job ID. If the job does not finish before the
# async timeout value, the ID may be within the unparsed 'async_result' dict.
- jid = result._result.get('ansible_job_id')
- if not jid and 'async_result' in result._result:
- jid = result._result['async_result'].get('ansible_job_id')
- self.runner_on_async_failed(host, result._result, jid)
+ jid = result.result.get('ansible_job_id')
+ if not jid and 'async_result' in result.result:
+ jid = result.result['async_result'].get('ansible_job_id')
+ self.runner_on_async_failed(host, result.result, jid)
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
@@ -595,50 +676,36 @@ class CallbackBase(AnsiblePlugin):
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task.name, is_conditional)
- # FIXME: not called
- def v2_playbook_on_cleanup_task_start(self, task):
- pass # no v1 correspondence
-
def v2_playbook_on_handler_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
- # FIXME: not called
- def v2_playbook_on_import_for_host(self, result, imported_file):
- host = result._host.get_name()
- self.playbook_on_import_for_host(host, imported_file)
-
- # FIXME: not called
- def v2_playbook_on_not_import_for_host(self, result, missing_file):
- host = result._host.get_name()
- self.playbook_on_not_import_for_host(host, missing_file)
-
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
- def v2_on_file_diff(self, result):
- if 'diff' in result._result:
- host = result._host.get_name()
- self.on_file_diff(host, result._result['diff'])
+ def v2_on_file_diff(self, result: CallbackTaskResult) -> None:
+ if 'diff' in result.result:
+ host = result.host.get_name()
+ self.on_file_diff(host, result.result['diff'])
def v2_playbook_on_include(self, included_file):
pass # no v1 correspondence
- def v2_runner_item_on_ok(self, result):
+ def v2_runner_item_on_ok(self, result: CallbackTaskResult) -> None:
pass
- def v2_runner_item_on_failed(self, result):
+ def v2_runner_item_on_failed(self, result: CallbackTaskResult) -> None:
pass
- def v2_runner_item_on_skipped(self, result):
+ def v2_runner_item_on_skipped(self, result: CallbackTaskResult) -> None:
pass
- def v2_runner_retry(self, result):
+ def v2_runner_retry(self, result: CallbackTaskResult) -> None:
pass
def v2_runner_on_start(self, host, task):
@@ -647,3 +714,31 @@ class CallbackBase(AnsiblePlugin):
.. versionadded:: 2.8
"""
pass
+
+ _v2_v1_method_map = {
+ v2_on_any: on_any,
+ v2_on_file_diff: on_file_diff,
+ v2_playbook_on_handler_task_start: None,
+ v2_playbook_on_include: None,
+ v2_playbook_on_no_hosts_matched: playbook_on_no_hosts_matched,
+ v2_playbook_on_no_hosts_remaining: playbook_on_no_hosts_remaining,
+ v2_playbook_on_notify: playbook_on_notify,
+ v2_playbook_on_play_start: playbook_on_play_start,
+ v2_playbook_on_start: playbook_on_start,
+ v2_playbook_on_stats: playbook_on_stats,
+ v2_playbook_on_task_start: playbook_on_task_start,
+ v2_playbook_on_vars_prompt: playbook_on_vars_prompt,
+ v2_runner_item_on_failed: None,
+ v2_runner_item_on_ok: None,
+ v2_runner_item_on_skipped: None,
+ v2_runner_on_async_failed: runner_on_async_failed,
+ v2_runner_on_async_ok: runner_on_async_ok,
+ v2_runner_on_async_poll: runner_on_async_poll,
+ v2_runner_on_failed: runner_on_failed,
+ v2_runner_on_ok: runner_on_ok,
+ v2_runner_on_skipped: runner_on_skipped,
+ v2_runner_on_start: None,
+ v2_runner_on_unreachable: runner_on_unreachable,
+ v2_runner_retry: None,
+ }
+ """Internal mapping of v2 callback methods with v1 counterparts; populated after type init for deprecation warnings and bypass calculation."""
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
index 39bd5a45f39..5032d917c42 100644
--- a/lib/ansible/plugins/callback/default.py
+++ b/lib/ansible/plugins/callback/default.py
@@ -21,6 +21,7 @@ DOCUMENTATION = """
from ansible import constants as C
from ansible import context
+from ansible.executor.task_result import CallbackTaskResult
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
@@ -46,40 +47,39 @@ class CallbackModule(CallbackBase):
self._task_type_cache = {}
super(CallbackModule, self).__init__()
- def v2_runner_on_failed(self, result, ignore_errors=False):
-
+ def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors: bool = False) -> None:
host_label = self.host_label(result)
- self._clean_results(result._result, result._task.action)
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
- self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
- self._handle_warnings(result._result)
+ self._handle_warnings_and_exception(result)
- if result._task.loop and 'results' in result._result:
- self._process_items(result)
+ # FIXME: this method should not exist, delegate "suggested keys to display" to the plugin or something... As-is, the placement of this
+ # call obliterates `results`, which causes a task summary to be printed on loop failures, which we don't do anywhere else.
+ self._clean_results(result.result, result.task.action)
+ if result.task.loop and 'results' in result.result:
+ self._process_items(result)
else:
if self._display.verbosity < 2 and self.get_option('show_task_path_on_failure'):
- self._print_task_path(result._task)
- msg = "fatal: [%s]: FAILED! => %s" % (host_label, self._dump_results(result._result))
+ self._print_task_path(result.task)
+ msg = "fatal: [%s]: FAILED! => %s" % (host_label, self._dump_results(result.result))
self._display.display(msg, color=C.COLOR_ERROR, stderr=self.get_option('display_failed_stderr'))
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
- def v2_runner_on_ok(self, result):
-
+ def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
host_label = self.host_label(result)
- if isinstance(result._task, TaskInclude):
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ if isinstance(result.task, TaskInclude):
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
return
- elif result._result.get('changed', False):
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ elif result.result.get('changed', False):
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
msg = "changed: [%s]" % (host_label,)
color = C.COLOR_CHANGED
@@ -87,49 +87,52 @@ class CallbackModule(CallbackBase):
if not self.get_option('display_ok_hosts'):
return
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
msg = "ok: [%s]" % (host_label,)
color = C.COLOR_OK
- self._handle_warnings(result._result)
+ self._handle_warnings_and_exception(result)
- if result._task.loop and 'results' in result._result:
+ if result.task.loop and 'results' in result.result:
self._process_items(result)
else:
- self._clean_results(result._result, result._task.action)
+ self._clean_results(result.result, result.task.action)
if self._run_is_verbose(result):
- msg += " => %s" % (self._dump_results(result._result),)
+ msg += " => %s" % (self._dump_results(result.result),)
self._display.display(msg, color=color)
- def v2_runner_on_skipped(self, result):
-
+ def v2_runner_on_skipped(self, result: CallbackTaskResult) -> None:
if self.get_option('display_skipped_hosts'):
- self._clean_results(result._result, result._task.action)
+ self._clean_results(result.result, result.task.action)
+
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ self._handle_warnings_and_exception(result)
- if result._task.loop is not None and 'results' in result._result:
+ if result.task.loop is not None and 'results' in result.result:
self._process_items(result)
- msg = "skipping: [%s]" % result._host.get_name()
+ msg = "skipping: [%s]" % result.host.get_name()
if self._run_is_verbose(result):
- msg += " => %s" % self._dump_results(result._result)
+ msg += " => %s" % self._dump_results(result.result)
self._display.display(msg, color=C.COLOR_SKIP)
- def v2_runner_on_unreachable(self, result):
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ def v2_runner_on_unreachable(self, result: CallbackTaskResult) -> None:
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
+
+ self._handle_warnings_and_exception(result)
host_label = self.host_label(result)
- msg = "fatal: [%s]: UNREACHABLE! => %s" % (host_label, self._dump_results(result._result))
+ msg = "fatal: [%s]: UNREACHABLE! => %s" % (host_label, self._dump_results(result.result))
self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.get_option('display_failed_stderr'))
- if result._task.ignore_unreachable:
+ if result.task.ignore_unreachable:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_matched(self):
@@ -171,6 +174,7 @@ class CallbackModule(CallbackBase):
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
+ # FIXME: the no_log value is not templated at this point, so any template will be considered truthy
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
@@ -193,9 +197,6 @@ class CallbackModule(CallbackBase):
self._last_task_banner = task._uuid
- def v2_playbook_on_cleanup_task_start(self, task):
- self._task_start(task, prefix='CLEANUP TASK')
-
def v2_playbook_on_handler_task_start(self, task):
self._task_start(task, prefix='RUNNING HANDLER')
@@ -218,30 +219,29 @@ class CallbackModule(CallbackBase):
self._display.banner(msg)
- def v2_on_file_diff(self, result):
- if result._task.loop and 'results' in result._result:
- for res in result._result['results']:
+ def v2_on_file_diff(self, result: CallbackTaskResult) -> None:
+ if result.task.loop and 'results' in result.result:
+ for res in result.result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
self._display.display(diff)
- elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
- diff = self._get_diff(result._result['diff'])
+ elif 'diff' in result.result and result.result['diff'] and result.result.get('changed', False):
+ diff = self._get_diff(result.result['diff'])
if diff:
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
self._display.display(diff)
- def v2_runner_item_on_ok(self, result):
-
+ def v2_runner_item_on_ok(self, result: CallbackTaskResult) -> None:
host_label = self.host_label(result)
- if isinstance(result._task, TaskInclude):
+ if isinstance(result.task, TaskInclude):
return
- elif result._result.get('changed', False):
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ elif result.result.get('changed', False):
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
msg = 'changed'
color = C.COLOR_CHANGED
@@ -249,43 +249,47 @@ class CallbackModule(CallbackBase):
if not self.get_option('display_ok_hosts'):
return
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
msg = 'ok'
color = C.COLOR_OK
- msg = "%s: [%s] => (item=%s)" % (msg, host_label, self._get_item_label(result._result))
- self._clean_results(result._result, result._task.action)
+ self._handle_warnings_and_exception(result)
+
+ msg = "%s: [%s] => (item=%s)" % (msg, host_label, self._get_item_label(result.result))
+ self._clean_results(result.result, result.task.action)
if self._run_is_verbose(result):
- msg += " => %s" % self._dump_results(result._result)
+ msg += " => %s" % self._dump_results(result.result)
self._display.display(msg, color=color)
- def v2_runner_item_on_failed(self, result):
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ def v2_runner_item_on_failed(self, result: CallbackTaskResult) -> None:
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
+
+ self._handle_warnings_and_exception(result)
host_label = self.host_label(result)
- self._clean_results(result._result, result._task.action)
- self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
msg = "failed: [%s]" % (host_label,)
- self._handle_warnings(result._result)
+ self._clean_results(result.result, result.task.action)
self._display.display(
- msg + " (item=%s) => %s" % (self._get_item_label(result._result), self._dump_results(result._result)),
+ msg + " (item=%s) => %s" % (self._get_item_label(result.result), self._dump_results(result.result)),
color=C.COLOR_ERROR,
stderr=self.get_option('display_failed_stderr')
)
- def v2_runner_item_on_skipped(self, result):
+ def v2_runner_item_on_skipped(self, result: CallbackTaskResult) -> None:
if self.get_option('display_skipped_hosts'):
- if self._last_task_banner != result._task._uuid:
- self._print_task_banner(result._task)
+ if self._last_task_banner != result.task._uuid:
+ self._print_task_banner(result.task)
+
+ self._handle_warnings_and_exception(result)
- self._clean_results(result._result, result._task.action)
- msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item_label(result._result))
+ self._clean_results(result.result, result.task.action)
+ msg = "skipping: [%s] => (item=%s) " % (result.host.get_name(), self._get_item_label(result.result))
if self._run_is_verbose(result):
- msg += " => %s" % self._dump_results(result._result)
+ msg += " => %s" % self._dump_results(result.result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
@@ -370,37 +374,37 @@ class CallbackModule(CallbackBase):
if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
self._display.banner("DRY RUN")
- def v2_runner_retry(self, result):
- task_name = result.task_name or result._task
+ def v2_runner_retry(self, result: CallbackTaskResult) -> None:
+ task_name = result.task_name or result.task
host_label = self.host_label(result)
- msg = "FAILED - RETRYING: [%s]: %s (%d retries left)." % (host_label, task_name, result._result['retries'] - result._result['attempts'])
+ msg = "FAILED - RETRYING: [%s]: %s (%d retries left)." % (host_label, task_name, result.result['retries'] - result.result['attempts'])
if self._run_is_verbose(result, verbosity=2):
- msg += "Result was: %s" % self._dump_results(result._result)
+ msg += "Result was: %s" % self._dump_results(result.result)
self._display.display(msg, color=C.COLOR_DEBUG)
- def v2_runner_on_async_poll(self, result):
- host = result._host.get_name()
- jid = result._result.get('ansible_job_id')
- started = result._result.get('started')
- finished = result._result.get('finished')
+ def v2_runner_on_async_poll(self, result: CallbackTaskResult) -> None:
+ host = result.host.get_name()
+ jid = result.result.get('ansible_job_id')
+ started = result.result.get('started')
+ finished = result.result.get('finished')
self._display.display(
'ASYNC POLL on %s: jid=%s started=%s finished=%s' % (host, jid, started, finished),
color=C.COLOR_DEBUG
)
- def v2_runner_on_async_ok(self, result):
- host = result._host.get_name()
- jid = result._result.get('ansible_job_id')
+ def v2_runner_on_async_ok(self, result: CallbackTaskResult) -> None:
+ host = result.host.get_name()
+ jid = result.result.get('ansible_job_id')
self._display.display("ASYNC OK on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
- def v2_runner_on_async_failed(self, result):
- host = result._host.get_name()
+ def v2_runner_on_async_failed(self, result: CallbackTaskResult) -> None:
+ host = result.host.get_name()
# Attempt to get the async job ID. If the job does not finish before the
# async timeout value, the ID may be within the unparsed 'async_result' dict.
- jid = result._result.get('ansible_job_id')
- if not jid and 'async_result' in result._result:
- jid = result._result['async_result'].get('ansible_job_id')
+ jid = result.result.get('ansible_job_id')
+ if not jid and 'async_result' in result.result:
+ jid = result.result['async_result'].get('ansible_job_id')
self._display.display("ASYNC FAILED on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
def v2_playbook_on_notify(self, handler, host):
diff --git a/lib/ansible/plugins/callback/junit.py b/lib/ansible/plugins/callback/junit.py
index e164902474f..b351312fd23 100644
--- a/lib/ansible/plugins/callback/junit.py
+++ b/lib/ansible/plugins/callback/junit.py
@@ -82,13 +82,20 @@ DOCUMENTATION = """
- enable in configuration
"""
+import decimal
import os
import time
import re
+import typing as t
-from ansible import constants as C
+from ansible import constants
from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.module_utils._internal import _event_utils
+from ansible._internal import _event_formatting
+from ansible.playbook.task import Task
from ansible.plugins.callback import CallbackBase
+from ansible.executor.task_result import CallbackTaskResult
+from ansible.playbook.included_file import IncludedFile
from ansible.utils._junit_xml import (
TestCase,
TestError,
@@ -126,7 +133,7 @@ class CallbackModule(CallbackBase):
Default: True
JUNIT_HIDE_TASK_ARGUMENTS (optional): Hide the arguments for a task
Default: False
- JUNIT_TEST_CASE_PREFIX (optional): Consider a task only as test case if it has this value as prefix. Additionally failing tasks are recorded as failed
+ JUNIT_TEST_CASE_PREFIX (optional): Consider a task only as test case if it has this value as prefix. Additionally, failing tasks are recorded as failed
test cases.
Default:
"""
@@ -136,7 +143,7 @@ class CallbackModule(CallbackBase):
CALLBACK_NAME = 'junit'
CALLBACK_NEEDS_ENABLED = True
- def __init__(self):
+ def __init__(self) -> None:
super(CallbackModule, self).__init__()
self._output_dir = os.getenv('JUNIT_OUTPUT_DIR', os.path.expanduser('~/.ansible.log'))
@@ -150,20 +157,18 @@ class CallbackModule(CallbackBase):
self._replace_out_of_tree_path = os.getenv('JUNIT_REPLACE_OUT_OF_TREE_PATH', None)
self._playbook_path = None
self._playbook_name = None
- self._play_name = None
- self._task_data = None
+ self._play_name: str | None = None
+ self._task_data: dict[str, TaskData] = {}
self.disabled = False
- self._task_data = {}
-
if self._replace_out_of_tree_path is not None:
self._replace_out_of_tree_path = to_text(self._replace_out_of_tree_path)
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
- def _start_task(self, task):
+ def _start_task(self, task: Task) -> None:
""" record the start of a task for one or more hosts """
uuid = task._uuid
@@ -183,23 +188,23 @@ class CallbackModule(CallbackBase):
self._task_data[uuid] = TaskData(uuid, name, path, play, action)
- def _finish_task(self, status, result):
+ def _finish_task(self, status: str, result: IncludedFile | CallbackTaskResult) -> None:
""" record the results of a task for a single host """
- task_uuid = result._task._uuid
+ if isinstance(result, CallbackTaskResult):
+ task_uuid = result.task._uuid
+ host_uuid = result.host._uuid
+ host_name = result.host.name
- if hasattr(result, '_host'):
- host_uuid = result._host._uuid
- host_name = result._host.name
+ if self._fail_on_change == 'true' and status == 'ok' and result.result.get('changed', False):
+ status = 'failed'
else:
+ task_uuid = result._task._uuid
host_uuid = 'include'
host_name = 'include'
task_data = self._task_data[task_uuid]
- if self._fail_on_change == 'true' and status == 'ok' and result._result.get('changed', False):
- status = 'failed'
-
# ignore failure if expected and toggle result if asked for
if status == 'failed' and 'EXPECTED FAILURE' in task_data.name:
status = 'ok'
@@ -212,11 +217,11 @@ class CallbackModule(CallbackBase):
if task_data.name.startswith(self._test_case_prefix) or status == 'failed':
task_data.add_host(HostData(host_uuid, host_name, status, result))
- def _build_test_case(self, task_data, host_data):
+ def _build_test_case(self, task_data: TaskData, host_data: HostData) -> TestCase:
""" build a TestCase from the given TaskData and HostData """
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
- duration = host_data.finish - task_data.start
+ duration = decimal.Decimal(host_data.finish - task_data.start)
if self._task_relative_path and task_data.path:
junit_classname = to_text(os.path.relpath(to_bytes(task_data.path), to_bytes(self._task_relative_path)))
@@ -232,7 +237,8 @@ class CallbackModule(CallbackBase):
if host_data.status == 'included':
return TestCase(name=name, classname=junit_classname, time=duration, system_out=str(host_data.result))
- res = host_data.result._result
+ task_result = t.cast(CallbackTaskResult, host_data.result)
+ res = task_result.result
rc = res.get('rc', 0)
dump = self._dump_results(res, indent=0)
dump = self._cleanse_string(dump)
@@ -243,9 +249,9 @@ class CallbackModule(CallbackBase):
test_case = TestCase(name=name, classname=junit_classname, time=duration)
if host_data.status == 'failed':
- if 'exception' in res:
- message = res['exception'].strip().split('\n')[-1]
- output = res['exception']
+ if error_summary := task_result.exception:
+ message = _event_utils.format_event_brief_message(error_summary.event)
+ output = _event_formatting.format_event_traceback(error_summary.event)
test_case.errors.append(TestError(message=message, output=output))
elif 'msg' in res:
message = res['msg']
@@ -261,7 +267,8 @@ class CallbackModule(CallbackBase):
return test_case
- def _cleanse_string(self, value):
+ @staticmethod
+ def _cleanse_string(value):
""" convert surrogate escapes to the unicode replacement character to avoid XML encoding errors """
return to_text(to_bytes(value, errors='surrogateescape'), errors='replace')
@@ -271,7 +278,7 @@ class CallbackModule(CallbackBase):
test_cases = []
for task_uuid, task_data in self._task_data.items():
- if task_data.action in C._ACTION_SETUP and self._include_setup_tasks_in_report == 'false':
+ if task_data.action in constants._ACTION_SETUP and self._include_setup_tasks_in_report == 'false':
continue
for host_uuid, host_data in task_data.host_data.items():
@@ -293,31 +300,25 @@ class CallbackModule(CallbackBase):
def v2_playbook_on_play_start(self, play):
self._play_name = play.get_name()
- def v2_runner_on_no_hosts(self, task):
- self._start_task(task)
-
- def v2_playbook_on_task_start(self, task, is_conditional):
- self._start_task(task)
-
- def v2_playbook_on_cleanup_task_start(self, task):
+ def v2_playbook_on_task_start(self, task: Task, is_conditional: bool) -> None:
self._start_task(task)
- def v2_playbook_on_handler_task_start(self, task):
+ def v2_playbook_on_handler_task_start(self, task: Task) -> None:
self._start_task(task)
- def v2_runner_on_failed(self, result, ignore_errors=False):
+ def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors=False) -> None:
if ignore_errors and self._fail_on_ignore != 'true':
self._finish_task('ok', result)
else:
self._finish_task('failed', result)
- def v2_runner_on_ok(self, result):
+ def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
self._finish_task('ok', result)
- def v2_runner_on_skipped(self, result):
+ def v2_runner_on_skipped(self, result: CallbackTaskResult) -> None:
self._finish_task('skipped', result)
- def v2_playbook_on_include(self, included_file):
+ def v2_playbook_on_include(self, included_file: IncludedFile) -> None:
self._finish_task('included', included_file)
def v2_playbook_on_stats(self, stats):
@@ -329,21 +330,21 @@ class TaskData:
Data about an individual task.
"""
- def __init__(self, uuid, name, path, play, action):
+ def __init__(self, uuid: str, name: str, path: str, play: str, action: str) -> None:
self.uuid = uuid
self.name = name
self.path = path
self.play = play
self.start = None
- self.host_data = {}
+ self.host_data: dict[str, HostData] = {}
self.start = time.time()
self.action = action
- def add_host(self, host):
+ def add_host(self, host: HostData) -> None:
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
- host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ host.result = f'{self.host_data[host.uuid].result}\n{host.result}'
else:
raise Exception('%s: %s: %s: duplicate host callback: %s' % (self.path, self.play, self.name, host.name))
@@ -355,7 +356,7 @@ class HostData:
Data about an individual host.
"""
- def __init__(self, uuid, name, status, result):
+ def __init__(self, uuid: str, name: str, status: str, result: IncludedFile | CallbackTaskResult | str) -> None:
self.uuid = uuid
self.name = name
self.status = status
diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py
index 181e90eba9a..cff4048bd6f 100644
--- a/lib/ansible/plugins/callback/minimal.py
+++ b/lib/ansible/plugins/callback/minimal.py
@@ -15,6 +15,7 @@ DOCUMENTATION = """
- result_format_callback
"""
+from ansible.executor.task_result import CallbackTaskResult
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
@@ -40,39 +41,41 @@ class CallbackModule(CallbackBase):
return buf + "\n"
- def v2_runner_on_failed(self, result, ignore_errors=False):
+ def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors: bool = False) -> None:
+ self._handle_warnings_and_exception(result)
- self._handle_exception(result._result)
- self._handle_warnings(result._result)
-
- if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
- self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR)
+ if result.task.action in C.MODULE_NO_JSON and 'module_stderr' not in result.result:
+ self._display.display(self._command_generic_msg(result.host.get_name(), result.result, "FAILED"), color=C.COLOR_ERROR)
else:
- self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR)
+ self._display.display("%s | FAILED! => %s" % (result.host.get_name(), self._dump_results(result.result, indent=4)), color=C.COLOR_ERROR)
- def v2_runner_on_ok(self, result):
- self._clean_results(result._result, result._task.action)
+ def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
+ self._handle_warnings_and_exception(result)
- self._handle_warnings(result._result)
+ self._clean_results(result.result, result.task.action)
- if result._result.get('changed', False):
+ if result.result.get('changed', False):
color = C.COLOR_CHANGED
state = 'CHANGED'
else:
color = C.COLOR_OK
state = 'SUCCESS'
- if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
- self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
+ if result.task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result.result:
+ self._display.display(self._command_generic_msg(result.host.get_name(), result.result, state), color=color)
else:
- self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=4)), color=color)
+ self._display.display("%s | %s => %s" % (result.host.get_name(), state, self._dump_results(result.result, indent=4)), color=color)
+
+ def v2_runner_on_skipped(self, result: CallbackTaskResult) -> None:
+ self._handle_warnings_and_exception(result)
+
+ self._display.display("%s | SKIPPED" % (result.host.get_name()), color=C.COLOR_SKIP)
- def v2_runner_on_skipped(self, result):
- self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
+ def v2_runner_on_unreachable(self, result: CallbackTaskResult) -> None:
+ self._handle_warnings_and_exception(result)
- def v2_runner_on_unreachable(self, result):
- self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE)
+ self._display.display("%s | UNREACHABLE! => %s" % (result.host.get_name(), self._dump_results(result.result, indent=4)), color=C.COLOR_UNREACHABLE)
def v2_on_file_diff(self, result):
- if 'diff' in result._result and result._result['diff']:
- self._display.display(self._get_diff(result._result['diff']))
+ if 'diff' in result.result and result.result['diff']:
+ self._display.display(self._get_diff(result.result['diff']))
diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py
index 4ac74d61629..38c2cc5cfd3 100644
--- a/lib/ansible/plugins/callback/oneline.py
+++ b/lib/ansible/plugins/callback/oneline.py
@@ -13,8 +13,11 @@ DOCUMENTATION = """
- This is the output callback used by the C(-o)/C(--one-line) command line option.
"""
-from ansible.plugins.callback import CallbackBase
from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.template import Templar
+from ansible.executor.task_result import CallbackTaskResult
+from ansible.module_utils._internal import _deprecator
class CallbackModule(CallbackBase):
@@ -28,6 +31,15 @@ class CallbackModule(CallbackBase):
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'oneline'
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self._display.deprecated( # pylint: disable=ansible-deprecated-unnecessary-collection-name
+ msg='The oneline callback plugin is deprecated.',
+ version='2.23',
+ deprecator=_deprecator.ANSIBLE_CORE_DEPRECATOR, # entire plugin being removed; this improves the messaging
+ )
+
def _command_generic_msg(self, hostname, result, caption):
stdout = result.get('stdout', '').replace('\n', '\\n').replace('\r', '\\r')
if 'stderr' in result and result['stderr']:
@@ -36,40 +48,41 @@ class CallbackModule(CallbackBase):
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc', -1), stdout)
- def v2_runner_on_failed(self, result, ignore_errors=False):
- if 'exception' in result._result:
+ def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors: bool = False) -> None:
+ if 'exception' in result.result:
+ error_text = Templar().template(result.result['exception']) # transform to a string
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
- error = result._result['exception'].strip().split('\n')[-1]
+ error = error_text.strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
- msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n', '')
+ msg = "An exception occurred during task execution. The full traceback is:\n" + error_text.replace('\n', '')
- if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
- self._display.display(self._command_generic_msg(result._host.get_name(), result._result, 'FAILED'), color=C.COLOR_ERROR)
+ if result.task.action in C.MODULE_NO_JSON and 'module_stderr' not in result.result:
+ self._display.display(self._command_generic_msg(result.host.get_name(), result.result, 'FAILED'), color=C.COLOR_ERROR)
else:
self._display.display(msg, color=C.COLOR_ERROR)
- self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n', '')),
+ self._display.display("%s | FAILED! => %s" % (result.host.get_name(), self._dump_results(result.result, indent=0).replace('\n', '')),
color=C.COLOR_ERROR)
- def v2_runner_on_ok(self, result):
+ def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
- if result._result.get('changed', False):
+ if result.result.get('changed', False):
color = C.COLOR_CHANGED
state = 'CHANGED'
else:
color = C.COLOR_OK
state = 'SUCCESS'
- if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
- self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
+ if result.task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result.result:
+ self._display.display(self._command_generic_msg(result.host.get_name(), result.result, state), color=color)
else:
- self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=0).replace('\n', '')),
+ self._display.display("%s | %s => %s" % (result.host.get_name(), state, self._dump_results(result.result, indent=0).replace('\n', '')),
color=color)
- def v2_runner_on_unreachable(self, result):
- self._display.display("%s | UNREACHABLE!: %s" % (result._host.get_name(), result._result.get('msg', '')), color=C.COLOR_UNREACHABLE)
+ def v2_runner_on_unreachable(self, result: CallbackTaskResult) -> None:
+ self._display.display("%s | UNREACHABLE!: %s" % (result.host.get_name(), result.result.get('msg', '')), color=C.COLOR_UNREACHABLE)
- def v2_runner_on_skipped(self, result):
- self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
+ def v2_runner_on_skipped(self, result: CallbackTaskResult) -> None:
+ self._display.display("%s | SKIPPED" % (result.host.get_name()), color=C.COLOR_SKIP)
diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py
index 9618f8ec8c7..96cf0c33a76 100644
--- a/lib/ansible/plugins/callback/tree.py
+++ b/lib/ansible/plugins/callback/tree.py
@@ -30,9 +30,11 @@ DOCUMENTATION = """
import os
from ansible.constants import TREE_DIR
-from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.executor.task_result import CallbackTaskResult
+from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins.callback import CallbackBase
from ansible.utils.path import makedirs_safe, unfrackpath
+from ansible.module_utils._internal import _deprecator
class CallbackModule(CallbackBase):
@@ -45,6 +47,15 @@ class CallbackModule(CallbackBase):
CALLBACK_NAME = 'tree'
CALLBACK_NEEDS_ENABLED = True
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self._display.deprecated( # pylint: disable=ansible-deprecated-unnecessary-collection-name
+ msg='The tree callback plugin is deprecated.',
+ version='2.23',
+ deprecator=_deprecator.ANSIBLE_CORE_DEPRECATOR, # entire plugin being removed; this improves the messaging
+ )
+
def set_options(self, task_keys=None, var_options=None, direct=None):
""" override to set self.tree """
@@ -62,24 +73,24 @@ class CallbackModule(CallbackBase):
buf = to_bytes(buf)
try:
makedirs_safe(self.tree)
- except (OSError, IOError) as e:
- self._display.warning(u"Unable to access or create the configured directory (%s): %s" % (to_text(self.tree), to_text(e)))
+ except OSError as ex:
+ self._display.error_as_warning(f"Unable to access or create the configured directory {self.tree!r}.", exception=ex)
try:
path = to_bytes(os.path.join(self.tree, hostname))
with open(path, 'wb+') as fd:
fd.write(buf)
- except (OSError, IOError) as e:
- self._display.warning(u"Unable to write to %s's file: %s" % (hostname, to_text(e)))
+ except OSError as ex:
+ self._display.error_as_warning(f"Unable to write to {hostname!r}'s file.", exception=ex)
- def result_to_tree(self, result):
- self.write_tree_file(result._host.get_name(), self._dump_results(result._result))
+ def result_to_tree(self, result: CallbackTaskResult) -> None:
+ self.write_tree_file(result.host.get_name(), self._dump_results(result.result))
- def v2_runner_on_ok(self, result):
+ def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
self.result_to_tree(result)
- def v2_runner_on_failed(self, result, ignore_errors=False):
+ def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors: bool = False) -> None:
self.result_to_tree(result)
- def v2_runner_on_unreachable(self, result):
+ def v2_runner_on_unreachable(self, result: CallbackTaskResult) -> None:
self.result_to_tree(result)
diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py
index de4a79e9818..553235884fd 100644
--- a/lib/ansible/plugins/connection/__init__.py
+++ b/lib/ansible/plugins/connection/__init__.py
@@ -15,6 +15,7 @@ from abc import abstractmethod
from functools import wraps
from ansible import constants as C
+from ansible.errors import AnsibleValueOmittedError
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.playbook.play_context import PlayContext
from ansible.plugins import AnsiblePlugin
@@ -35,6 +36,12 @@ P = t.ParamSpec('P')
T = t.TypeVar('T')
+class ConnectionKwargs(t.TypedDict):
+ task_uuid: str
+ ansible_playbook_pid: str
+ shell: t.NotRequired[ShellBase]
+
+
def ensure_connect(
func: c.Callable[t.Concatenate[ConnectionBase, P], T],
) -> c.Callable[t.Concatenate[ConnectionBase, P], T]:
@@ -71,10 +78,8 @@ class ConnectionBase(AnsiblePlugin):
def __init__(
self,
play_context: PlayContext,
- new_stdin: io.TextIOWrapper | None = None,
- shell: ShellBase | None = None,
*args: t.Any,
- **kwargs: t.Any,
+ **kwargs: t.Unpack[ConnectionKwargs],
) -> None:
super(ConnectionBase, self).__init__()
@@ -83,9 +88,6 @@ class ConnectionBase(AnsiblePlugin):
if not hasattr(self, '_play_context'):
# Backwards compat: self._play_context isn't really needed, using set_options/get_option
self._play_context = play_context
- # Delete once the deprecation period is over for WorkerProcess._new_stdin
- if not hasattr(self, '__new_stdin'):
- self.__new_stdin = new_stdin
if not hasattr(self, '_display'):
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
@@ -95,25 +97,14 @@ class ConnectionBase(AnsiblePlugin):
self._connected = False
self._socket_path: str | None = None
- # helper plugins
- self._shell = shell
-
# we always must have shell
- if not self._shell:
+ if not (shell := kwargs.get('shell')):
shell_type = play_context.shell if play_context.shell else getattr(self, '_shell_type', None)
- self._shell = get_shell_plugin(shell_type=shell_type, executable=self._play_context.executable)
+ shell = get_shell_plugin(shell_type=shell_type, executable=self._play_context.executable)
+ self._shell = shell
self.become: BecomeBase | None = None
- @property
- def _new_stdin(self) -> io.TextIOWrapper | None:
- display.deprecated(
- "The connection's stdin object is deprecated. "
- "Call display.prompt_until(msg) instead.",
- version='2.19',
- )
- return self.__new_stdin
-
def set_become_plugin(self, plugin: BecomeBase) -> None:
self.become = plugin
@@ -285,6 +276,50 @@ class ConnectionBase(AnsiblePlugin):
display.debug('Set connection var {0} to {1}'.format(varname, value))
variables[varname] = value
+ def _resolve_option_variables(self, variables, templar):
+ """
+ Return a dict of variable -> templated value, for any variables that
+ that match options registered by this plugin.
+ """
+ # create dict of 'templated vars'
+ var_options = {
+ '_extras': {},
+ }
+ for var_name in C.config.get_plugin_vars('connection', self._load_name):
+ if var_name in variables:
+ try:
+ var_options[var_name] = templar.template(variables[var_name])
+ except AnsibleValueOmittedError:
+ pass
+
+ # add extras if plugin supports them
+ if getattr(self, 'allow_extras', False):
+ for var_name in variables:
+ if var_name.startswith(f'ansible_{self.extras_prefix}_') and var_name not in var_options:
+ try:
+ var_options['_extras'][var_name] = templar.template(variables[var_name])
+ except AnsibleValueOmittedError:
+ pass
+
+ return var_options
+
+ def is_pipelining_enabled(self, wrap_async: bool = False) -> bool:
+
+ is_enabled = False
+ if self.has_pipelining and (not self.become or self.become.pipelining):
+ try:
+ is_enabled = self.get_option('pipelining')
+ except KeyError:
+ is_enabled = getattr(self._play_context, 'pipelining', False)
+
+ # TODO: deprecate always_pipeline_modules and has_native_async in favor for each plugin overriding this function
+ conditions = [
+ is_enabled or self.always_pipeline_modules, # enabled via config or forced via connection (eg winrm)
+ not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files
+ not wrap_async or self.has_native_async, # async does not normally support pipelining unless it does (eg winrm)
+ ]
+ return all(conditions)
+
class NetworkConnectionBase(ConnectionBase):
"""
@@ -298,11 +333,10 @@ class NetworkConnectionBase(ConnectionBase):
def __init__(
self,
play_context: PlayContext,
- new_stdin: io.TextIOWrapper | None = None,
*args: t.Any,
**kwargs: t.Any,
) -> None:
- super(NetworkConnectionBase, self).__init__(play_context, new_stdin, *args, **kwargs)
+ super(NetworkConnectionBase, self).__init__(play_context, *args, **kwargs)
self._messages: list[tuple[str, str]] = []
self._conn_closed = False
diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py
index 2fa8f491a08..0e650fd14f0 100644
--- a/lib/ansible/plugins/connection/local.py
+++ b/lib/ansible/plugins/connection/local.py
@@ -11,23 +11,42 @@ DOCUMENTATION = """
- This connection plugin allows ansible to execute tasks on the Ansible 'controller' instead of on a remote host.
author: ansible (@core)
version_added: historical
+ options:
+ become_success_timeout:
+ version_added: '2.19'
+ type: int
+ default: 10
+ description:
+ - Number of seconds to wait for become to succeed when enabled.
+ - The default will be used if the configured value is less than 1.
+ vars:
+ - name: ansible_local_become_success_timeout
+ become_strip_preamble:
+ version_added: '2.19'
+ type: bool
+ default: true
+ description:
+ - Strip internal become output preceding command execution. Disable for additional diagnostics.
+ vars:
+ - name: ansible_local_become_strip_preamble
extends_documentation_fragment:
- connection_pipelining
notes:
- The remote user is ignored, the user with which the ansible CLI was executed is used instead.
"""
-import fcntl
+import functools
import getpass
import os
import pty
import selectors
import shutil
import subprocess
+import time
import typing as t
import ansible.constants as C
-from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleConnectionFailure
from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
@@ -82,11 +101,11 @@ class Connection(ConnectionBase):
display.debug("opening command with Popen()")
if isinstance(cmd, (text_type, binary_type)):
- cmd = to_bytes(cmd)
+ cmd = to_text(cmd)
else:
- cmd = map(to_bytes, cmd)
+ cmd = map(to_text, cmd)
- master = None
+ pty_primary = None
stdin = subprocess.PIPE
if sudoable and self.become and self.become.expect_prompt() and not self.get_option('pipelining'):
# Create a pty if sudoable for privilege escalation that needs it.
@@ -94,9 +113,9 @@ class Connection(ConnectionBase):
# cause the command to fail in certain situations where we are escalating
# privileges or the command otherwise needs a pty.
try:
- master, stdin = pty.openpty()
- except (IOError, OSError) as e:
- display.debug("Unable to open pty: %s" % to_native(e))
+ pty_primary, stdin = pty.openpty()
+ except OSError as ex:
+ display.debug(f"Unable to open pty: {ex}")
p = subprocess.Popen(
cmd,
@@ -108,60 +127,134 @@ class Connection(ConnectionBase):
stderr=subprocess.PIPE,
)
- # if we created a master, we can close the other half of the pty now, otherwise master is stdin
- if master is not None:
+ # if we created a pty, we can close the other half of the pty now, otherwise primary is stdin
+ if pty_primary is not None:
os.close(stdin)
display.debug("done running command with Popen()")
- if self.become and self.become.expect_prompt() and sudoable:
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- selector = selectors.DefaultSelector()
- selector.register(p.stdout, selectors.EVENT_READ)
- selector.register(p.stderr, selectors.EVENT_READ)
-
- become_output = b''
- try:
- while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
- events = selector.select(self._play_context.timeout)
- if not events:
- stdout, stderr = p.communicate()
- raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
-
- for key, event in events:
- if key.fileobj == p.stdout:
- chunk = p.stdout.read()
- elif key.fileobj == p.stderr:
- chunk = p.stderr.read()
-
- if not chunk:
- stdout, stderr = p.communicate()
- raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
- become_output += chunk
- finally:
- selector.close()
-
- if not self.become.check_success(become_output):
- become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
- if master is None:
- p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
- else:
- os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
-
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ become_stdout_bytes, become_stderr_bytes = self._ensure_become_success(p, pty_primary, sudoable)
display.debug("getting output with communicate()")
stdout, stderr = p.communicate(in_data)
display.debug("done communicating")
+ # preserve output from privilege escalation stage as `bytes`; it may contain actual output (eg `raw`) or error messages
+ stdout = become_stdout_bytes + stdout
+ stderr = become_stderr_bytes + stderr
+
# finally, close the other half of the pty, if it was created
- if master:
- os.close(master)
+ if pty_primary:
+ os.close(pty_primary)
display.debug("done with local.exec_command()")
- return (p.returncode, stdout, stderr)
+ return p.returncode, stdout, stderr
+
+ def _ensure_become_success(self, p: subprocess.Popen, pty_primary: int, sudoable: bool) -> tuple[bytes, bytes]:
+ """
+ Ensure that become succeeds, returning a tuple containing stdout captured after success and all stderr.
+ Returns immediately if `self.become` or `sudoable` are False, or `build_become_command` was not called, without performing any additional checks.
+ """
+ if not self.become or not sudoable or not self.become._id: # _id is set by build_become_command, if it was not called, assume no become
+ return b'', b''
+
+ start_seconds = time.monotonic()
+ become_stdout = bytearray()
+ become_stderr = bytearray()
+ last_stdout_prompt_offset = 0
+ last_stderr_prompt_offset = 0
+
+ # map the buffers to their associated stream for the selector reads
+ become_capture = {
+ p.stdout: become_stdout,
+ p.stderr: become_stderr,
+ }
+
+ expect_password_prompt = self.become.expect_prompt()
+ sent_password = False
+
+ def become_error_msg(reason: str) -> str:
+ error_message = f'{reason} waiting for become success'
+
+ if expect_password_prompt and not sent_password:
+ error_message += ' or become password prompt'
+
+ error_message += '.'
+
+ if become_stdout:
+ error_message += f'\n>>> Standard Output\n{to_text(bytes(become_stdout))}'
+
+ if become_stderr:
+ error_message += f'\n>>> Standard Error\n{to_text(bytes(become_stderr))}'
+
+ return error_message
+
+ os.set_blocking(p.stdout.fileno(), False)
+ os.set_blocking(p.stderr.fileno(), False)
+
+ with selectors.DefaultSelector() as selector:
+ selector.register(p.stdout, selectors.EVENT_READ, 'stdout')
+ selector.register(p.stderr, selectors.EVENT_READ, 'stderr')
+
+ while not self.become.check_success(become_stdout):
+ if not selector.get_map(): # we only reach end of stream after all descriptors are EOF
+ raise AnsibleError(become_error_msg('Premature end of stream'))
+
+ if expect_password_prompt and (
+ self.become.check_password_prompt(become_stdout[last_stdout_prompt_offset:]) or
+ self.become.check_password_prompt(become_stderr[last_stderr_prompt_offset:])
+ ):
+ if sent_password:
+ raise AnsibleError(become_error_msg('Duplicate become password prompt encountered'))
+
+ last_stdout_prompt_offset = len(become_stdout)
+ last_stderr_prompt_offset = len(become_stderr)
+
+ password_to_send = to_bytes(self.become.get_option('become_pass') or '') + b'\n'
+
+ if pty_primary is None:
+ p.stdin.write(password_to_send)
+ p.stdin.flush()
+ else:
+ os.write(pty_primary, password_to_send)
+
+ sent_password = True
+
+ remaining_timeout_seconds = self._become_success_timeout - (time.monotonic() - start_seconds)
+ events = selector.select(remaining_timeout_seconds) if remaining_timeout_seconds > 0 else []
+
+ if not events:
+ # ignoring remaining output after timeout to prevent hanging
+ raise AnsibleConnectionFailure(become_error_msg('Timed out'))
+
+ # read all content (non-blocking) from streams that signaled available input and append to the associated buffer
+ for key, event in events:
+ obj = t.cast(t.BinaryIO, key.fileobj)
+
+ if chunk := obj.read():
+ become_capture[obj] += chunk
+ else:
+ selector.unregister(obj) # EOF on this obj, stop polling it
+
+ os.set_blocking(p.stdout.fileno(), True)
+ os.set_blocking(p.stderr.fileno(), True)
+
+ become_stdout_bytes = bytes(become_stdout)
+ become_stderr_bytes = bytes(become_stderr)
+
+ if self.get_option('become_strip_preamble'):
+ become_stdout_bytes = self.become.strip_become_success(self.become.strip_become_prompt(become_stdout_bytes))
+ become_stderr_bytes = self.become.strip_become_prompt(become_stderr_bytes)
+
+ return become_stdout_bytes, become_stderr_bytes
+
+ @functools.cached_property
+ def _become_success_timeout(self) -> int:
+ """Timeout value for become success in seconds."""
+ if (timeout := self.get_option('become_success_timeout')) < 1:
+ timeout = C.config.get_config_default('become_success_timeout', plugin_type='connection', plugin_name='local')
+
+ return timeout
def put_file(self, in_path: str, out_path: str) -> None:
""" transfer a file from local to local """
@@ -178,8 +271,8 @@ class Connection(ConnectionBase):
shutil.copyfile(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except shutil.Error:
raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_native(in_path), to_native(out_path)))
- except IOError as e:
- raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
+ except OSError as ex:
+ raise AnsibleError(f"Failed to transfer file to {out_path!r}.") from ex
def fetch_file(self, in_path: str, out_path: str) -> None:
""" fetch a file from local to local -- for compatibility """
@@ -189,6 +282,9 @@ class Connection(ConnectionBase):
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
self.put_file(in_path, out_path)
+ def reset(self) -> None:
+ pass
+
def close(self) -> None:
""" terminate the connection; nothing to do here """
self._connected = False
diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py
index 239c1bdd5f8..3355f11f02a 100644
--- a/lib/ansible/plugins/connection/paramiko_ssh.py
+++ b/lib/ansible/plugins/connection/paramiko_ssh.py
@@ -248,11 +248,13 @@ from ansible.errors import (
AnsibleError,
AnsibleFileNotFound,
)
-from ansible.module_utils.compat.paramiko import PARAMIKO_IMPORT_ERR, paramiko
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.compat.paramiko import _PARAMIKO_IMPORT_ERR as PARAMIKO_IMPORT_ERR, _paramiko as paramiko
from ansible.plugins.connection import ConnectionBase
from ansible.utils.display import Display
from ansible.utils.path import makedirs_safe
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils._internal import _deprecator
display = Display()
@@ -326,6 +328,15 @@ class Connection(ConnectionBase):
transport = 'paramiko'
_log_channel: str | None = None
+ def __init__(self, *args, **kwargs):
+ display.deprecated( # pylint: disable=ansible-deprecated-unnecessary-collection-name
+ msg='The paramiko connection plugin is deprecated.',
+ version='2.21',
+ deprecator=_deprecator.ANSIBLE_CORE_DEPRECATOR, # entire plugin being removed; this improves the messaging
+ )
+
+ super().__init__(*args, **kwargs)
+
def _cache_key(self) -> str:
return "%s__%s__" % (self.get_option('remote_addr'), self.get_option('remote_user'))
@@ -402,7 +413,7 @@ class Connection(ConnectionBase):
# TODO: check if we need to look at several possible locations, possible for loop
ssh.load_system_host_keys(ssh_known_hosts)
break
- except IOError:
+ except OSError:
pass # file was not found, but not required to function
ssh.load_system_host_keys()
@@ -444,19 +455,18 @@ class Connection(ConnectionBase):
)
except paramiko.ssh_exception.BadHostKeyException as e:
raise AnsibleConnectionFailure('host key mismatch for %s' % e.hostname)
- except paramiko.ssh_exception.AuthenticationException as e:
- msg = 'Failed to authenticate: {0}'.format(to_text(e))
- raise AnsibleAuthenticationFailure(msg)
- except Exception as e:
- msg = to_text(e)
+ except paramiko.ssh_exception.AuthenticationException as ex:
+ raise AnsibleAuthenticationFailure() from ex
+ except Exception as ex:
+ msg = str(ex)
if u"PID check failed" in msg:
- raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
+ raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible") from ex
elif u"Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u .' % (
self.get_option('remote_user'), self.get_options('remote_addr'), port, msg)
- raise AnsibleConnectionFailure(msg)
+ raise AnsibleConnectionFailure(msg) from ex
else:
- raise AnsibleConnectionFailure(msg)
+ raise AnsibleConnectionFailure(msg) from ex
return ssh
@@ -557,8 +567,8 @@ class Connection(ConnectionBase):
try:
self.sftp.put(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
- except IOError:
- raise AnsibleError("failed to transfer file to %s" % out_path)
+ except OSError as ex:
+ raise AnsibleError(f"Failed to transfer file to {out_path!r}.") from ex
def _connect_sftp(self) -> paramiko.sftp_client.SFTPClient:
@@ -583,8 +593,8 @@ class Connection(ConnectionBase):
try:
self.sftp.get(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
- except IOError:
- raise AnsibleError("failed to transfer file from %s" % in_path)
+ except OSError as ex:
+ raise AnsibleError(f"Failed to transfer file from {in_path!r}.") from ex
def _any_keys_added(self) -> bool:
diff --git a/lib/ansible/plugins/connection/psrp.py b/lib/ansible/plugins/connection/psrp.py
index 95348d61079..cef9b4346d7 100644
--- a/lib/ansible/plugins/connection/psrp.py
+++ b/lib/ansible/plugins/connection/psrp.py
@@ -308,11 +308,13 @@ import base64
import json
import logging
import os
+import shlex
import typing as t
from ansible import constants as C
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.errors import AnsibleFileNotFound
+from ansible.executor.powershell.module_manifest import _bootstrap_powershell_script
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
@@ -431,8 +433,10 @@ class Connection(ConnectionBase):
sudoable=sudoable)
pwsh_in_data: bytes | str | None = None
+ script_args: list[str] | None = None
- if cmd.startswith(" ".join(_common_args) + " -EncodedCommand"):
+ common_args_prefix = " ".join(_common_args)
+ if cmd.startswith(f"{common_args_prefix} -EncodedCommand"):
# This is a PowerShell script encoded by the shell plugin, we will
# decode the script and execute it in the runspace instead of
# starting a new interpreter to save on time
@@ -457,6 +461,17 @@ class Connection(ConnectionBase):
display.vvv("PSRP: EXEC (via pipeline wrapper)")
else:
display.vvv("PSRP: EXEC %s" % script, host=self._psrp_host)
+
+ elif cmd.startswith(f"{common_args_prefix} -File "): # trailing space is on purpose
+ # Used when executing a script file, we will execute it in the runspace process
+ # instead on a new subprocess
+ script = 'param([string]$Path, [Parameter(ValueFromRemainingArguments)][string[]]$ScriptArgs) & $Path @ScriptArgs'
+
+ # Using shlex isn't perfect but it's good enough.
+ cmd = cmd[len(common_args_prefix) + 7:]
+ script_args = shlex.split(cmd)
+ display.vvv(f"PSRP: EXEC {cmd}")
+
else:
# In other cases we want to execute the cmd as the script. We add on the 'exit $LASTEXITCODE' to ensure the
# rc is propagated back to the connection plugin.
@@ -464,7 +479,11 @@ class Connection(ConnectionBase):
pwsh_in_data = in_data
display.vvv(u"PSRP: EXEC %s" % script, host=self._psrp_host)
- rc, stdout, stderr = self._exec_psrp_script(script, pwsh_in_data)
+ rc, stdout, stderr = self._exec_psrp_script(
+ script=script,
+ input_data=pwsh_in_data.splitlines() if pwsh_in_data else None,
+ arguments=script_args,
+ )
return rc, stdout, stderr
def put_file(self, in_path: str, out_path: str) -> None:
@@ -473,101 +492,9 @@ class Connection(ConnectionBase):
out_path = self._shell._unquote(out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._psrp_host)
- copy_script = """begin {
- $ErrorActionPreference = "Stop"
- $WarningPreference = "Continue"
- $path = $MyInvocation.UnboundArguments[0]
- $fd = [System.IO.File]::Create($path)
- $algo = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
- $bytes = @()
-
- $bindingFlags = [System.Reflection.BindingFlags]'NonPublic, Instance'
- Function Get-Property {
- <#
- .SYNOPSIS
- Gets the private/internal property specified of the object passed in.
- #>
- Param (
- [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
- [System.Object]
- $Object,
-
- [Parameter(Mandatory=$true, Position=1)]
- [System.String]
- $Name
- )
-
- $Object.GetType().GetProperty($Name, $bindingFlags).GetValue($Object, $null)
- }
-
- Function Set-Property {
- <#
- .SYNOPSIS
- Sets the private/internal property specified on the object passed in.
- #>
- Param (
- [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
- [System.Object]
- $Object,
-
- [Parameter(Mandatory=$true, Position=1)]
- [System.String]
- $Name,
-
- [Parameter(Mandatory=$true, Position=2)]
- [AllowNull()]
- [System.Object]
- $Value
- )
-
- $Object.GetType().GetProperty($Name, $bindingFlags).SetValue($Object, $Value, $null)
- }
-
- Function Get-Field {
- <#
- .SYNOPSIS
- Gets the private/internal field specified of the object passed in.
- #>
- Param (
- [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
- [System.Object]
- $Object,
-
- [Parameter(Mandatory=$true, Position=1)]
- [System.String]
- $Name
- )
-
- $Object.GetType().GetField($Name, $bindingFlags).GetValue($Object)
- }
-
- # MaximumAllowedMemory is required to be set to so we can send input data that exceeds the limit on a PS
- # Runspace. We use reflection to access/set this property as it is not accessible publicly. This is not ideal
- # but works on all PowerShell versions I've tested with. We originally used WinRS to send the raw bytes to the
- # host but this falls flat if someone is using a custom PS configuration name so this is a workaround. This
- # isn't required for smaller files so if it fails we ignore the error and hope it wasn't needed.
- # https://github.com/PowerShell/PowerShell/blob/c8e72d1e664b1ee04a14f226adf655cced24e5f0/src/System.Management.Automation/engine/serialization.cs#L325
- try {
- $Host | Get-Property 'ExternalHost' | `
- Get-Field '_transportManager' | `
- Get-Property 'Fragmentor' | `
- Get-Property 'DeserializationContext' | `
- Set-Property 'MaximumAllowedMemory' $null
- } catch {}
-}
-process {
- $bytes = [System.Convert]::FromBase64String($input)
- $algo.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) > $null
- $fd.Write($bytes, 0, $bytes.Length)
-}
-end {
- $fd.Close()
-
- $algo.TransformFinalBlock($bytes, 0, 0) > $null
- $hash = [System.BitConverter]::ToString($algo.Hash).Replace('-', '').ToLowerInvariant()
- Write-Output -InputObject "{`"sha1`":`"$hash`"}"
-}
-"""
+ script, in_data = _bootstrap_powershell_script('psrp_put_file.ps1', {
+ 'Path': out_path,
+ }, has_input=True)
# Get the buffer size of each fragment to send, subtract 82 for the fragment, message, and other header info
# fields that PSRP adds. Adjust to size of the base64 encoded bytes length.
@@ -580,6 +507,8 @@ end {
raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
def read_gen():
+ yield from in_data.decode().splitlines()
+
offset = 0
with open(b_in_path, 'rb') as src_fd:
@@ -598,7 +527,7 @@ end {
if offset == 0: # empty file
yield [""]
- rc, stdout, stderr = self._exec_psrp_script(copy_script, read_gen(), arguments=[out_path])
+ rc, stdout, stderr = self._exec_psrp_script(script, read_gen())
if rc != 0:
raise AnsibleError(to_native(stderr))
@@ -622,6 +551,7 @@ end {
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
+ b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
# because we are dealing with base64 data we need to get the max size
# of the bytes that the base64 size would equal
@@ -629,74 +559,38 @@ end {
(self.runspace.connection.max_payload_size / 4 * 3))
buffer_size = max_b64_size - (max_b64_size % 1024)
- # setup the file stream with read only mode
- setup_script = """param([string]$Path)
-$ErrorActionPreference = "Stop"
-
-if (Test-Path -LiteralPath $path -PathType Leaf) {
- $fs = New-Object -TypeName System.IO.FileStream -ArgumentList @(
- $path,
- [System.IO.FileMode]::Open,
- [System.IO.FileAccess]::Read,
- [System.IO.FileShare]::Read
- )
-} elseif (Test-Path -Path $path -PathType Container) {
- Write-Output -InputObject "[DIR]"
-} else {
- Write-Error -Message "$path does not exist"
- $host.SetShouldExit(1)
-}"""
-
- # read the file stream at the offset and return the b64 string
- read_script = """param([int64]$Offset, [int]$BufferSize)
-$ErrorActionPreference = "Stop"
-$fs.Seek($Offset, [System.IO.SeekOrigin]::Begin) > $null
-$buffer = New-Object -TypeName byte[] -ArgumentList $BufferSize
-$read = $fs.Read($buffer, 0, $buffer.Length)
-
-if ($read -gt 0) {
- [System.Convert]::ToBase64String($buffer, 0, $read)
-}"""
-
- # need to run the setup script outside of the local scope so the
- # file stream stays active between fetch operations
- rc, stdout, stderr = self._exec_psrp_script(
- setup_script,
- use_local_scope=False,
- arguments=[in_path],
- )
- if rc != 0:
- raise AnsibleError("failed to setup file stream for fetch '%s': %s"
- % (out_path, to_native(stderr)))
- elif stdout.strip() == '[DIR]':
- # to be consistent with other connection plugins, we assume the caller has created the target dir
- return
+ script, in_data = _bootstrap_powershell_script('psrp_fetch_file.ps1', {
+ 'Path': in_path,
+ 'BufferSize': buffer_size,
+ })
- b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
- # to be consistent with other connection plugins, we assume the caller has created the target dir
- offset = 0
- with open(b_out_path, 'wb') as out_file:
- while True:
- display.vvvvv("PSRP FETCH %s to %s (offset=%d" %
- (in_path, out_path, offset), host=self._psrp_host)
- rc, stdout, stderr = self._exec_psrp_script(
- read_script,
- arguments=[offset, buffer_size],
- )
- if rc != 0:
- raise AnsibleError("failed to transfer file to '%s': %s"
- % (out_path, to_native(stderr)))
+ ps = PowerShell(self.runspace)
+ ps.add_script(script)
+ ps.begin_invoke(in_data.decode().splitlines())
- data = base64.b64decode(stdout.strip())
- out_file.write(data)
- if len(data) < buffer_size:
- break
- offset += len(data)
+ # Call poll once to get the first output telling us if it's a file/dir/failure
+ ps.poll_invoke()
- rc, stdout, stderr = self._exec_psrp_script("$fs.Close()")
- if rc != 0:
- display.warning("failed to close remote file stream of file "
- "'%s': %s" % (in_path, to_native(stderr)))
+ if ps.output:
+ if ps.output.pop(0) == '[DIR]':
+ # to be consistent with other connection plugins, we assume the caller has created the target dir
+ return
+
+ with open(b_out_path, 'wb') as out_file:
+ while True:
+ while ps.output:
+ data = base64.b64decode(ps.output.pop(0))
+ out_file.write(data)
+
+ if ps.state == PSInvocationState.RUNNING:
+ ps.poll_invoke()
+ else:
+ break
+
+ ps.end_invoke()
+ rc, stdout, stderr = self._parse_pipeline_result(ps)
+ if rc != 0:
+ raise AnsibleError(f"failed to transfer file to '{out_path}': {to_text(stderr)}")
def close(self) -> None:
if self.runspace and self.runspace.state == RunspacePoolState.OPENED:
@@ -837,6 +731,23 @@ if ($read -gt 0) {
for error in pipeline.streams.error:
# the error record is not as fully fleshed out like we usually get
# in PS, we will manually create it here
+ # NativeCommandError and NativeCommandErrorMessage are special
+ # cases used for stderr from a subprocess, we will just print the
+ # error message
+ if error.fq_error == 'NativeCommandErrorMessage' and not error.target_name:
+ # This can be removed once Server 2016 is EOL and no longer
+ # supported. PS 5.1 on 2016 will emit 1 error record under
+ # NativeCommandError being the first line, subsequent records
+ # are the raw stderr up to 4096 chars. Each entry is the raw
+ # stderr value without any newlines appended so we just use the
+ # value as is. We know it's 2016 as the target_name is empty in
+ # this scenario.
+ stderr_list.append(str(error))
+ continue
+ elif error.fq_error in ['NativeCommandError', 'NativeCommandErrorMessage']:
+ stderr_list.append(f"{error}\r\n")
+ continue
+
command_name = "%s : " % error.command_name if error.command_name else ''
position = "%s\r\n" % error.invocation_position_message if error.invocation_position_message else ''
error_msg = "%s%s\r\n%s" \
@@ -847,11 +758,11 @@ if ($read -gt 0) {
stacktrace = error.script_stacktrace
if display.verbosity >= 3 and stacktrace is not None:
error_msg += "\r\nStackTrace:\r\n%s" % stacktrace
- stderr_list.append(error_msg)
+ stderr_list.append(f"{error_msg}\r\n")
if len(self.host.ui.stderr) > 0:
stderr_list += self.host.ui.stderr
- stderr = u"\r\n".join([to_text(o) for o in stderr_list])
+ stderr = "".join([to_text(o) for o in stderr_list])
display.vvvvv("PSRP RC: %d" % rc, host=self._psrp_host)
display.vvvvv("PSRP STDOUT: %s" % stdout, host=self._psrp_host)
diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py
index 299039faa5b..08ff188cf6c 100644
--- a/lib/ansible/plugins/connection/ssh.py
+++ b/lib/ansible/plugins/connection/ssh.py
@@ -62,10 +62,27 @@ DOCUMENTATION = """
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
+ password_mechanism:
+ description: Mechanism to use for handling ssh password prompt
+ type: string
+ default: ssh_askpass
+ choices:
+ - ssh_askpass
+ - sshpass
+ - disable
+ version_added: '2.19'
+ env:
+ - name: ANSIBLE_SSH_PASSWORD_MECHANISM
+ ini:
+ - {key: password_mechanism, section: ssh_connection}
+ vars:
+ - name: ansible_ssh_password_mechanism
sshpass_prompt:
description:
- - Password prompt that sshpass should search for. Supported by sshpass 1.06 and up.
+ - Password prompt that C(sshpass)/C(SSH_ASKPASS) should search for.
+ - Supported by sshpass 1.06 and up when O(password_mechanism) set to V(sshpass).
- Defaults to C(Enter PIN for) when pkcs11_provider is set.
+ - Defaults to C(assword) when O(password_mechanism) set to V(ssh_askpass).
default: ''
type: string
ini:
@@ -248,7 +265,6 @@ DOCUMENTATION = """
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
-
private_key_file:
description:
- Path to private key file to use for authentication.
@@ -264,7 +280,27 @@ DOCUMENTATION = """
cli:
- name: private_key_file
option: '--private-key'
-
+ private_key:
+ description:
+ - Private key contents in PEM format. Requires the C(SSH_AGENT) configuration to be enabled.
+ type: string
+ env:
+ - name: ANSIBLE_PRIVATE_KEY
+ vars:
+ - name: ansible_private_key
+ - name: ansible_ssh_private_key
+ version_added: '2.19'
+ private_key_passphrase:
+ description:
+ - Private key passphrase, dependent on O(private_key).
+ - This does NOT have any effect when used with O(private_key_file).
+ type: string
+ env:
+ - name: ANSIBLE_PRIVATE_KEY_PASSPHRASE
+ vars:
+ - name: ansible_private_key_passphrase
+ - name: ansible_ssh_private_key_passphrase
+ version_added: '2.19'
control_path:
description:
- This is the location to save SSH's ControlPath sockets, it uses SSH's variable substitution.
@@ -296,7 +332,9 @@ DOCUMENTATION = """
version_added: '2.7'
sftp_batch_mode:
default: true
- description: 'TODO: write it'
+ description:
+ - When set to C(True), sftp will be run in batch mode, allowing detection of transfer errors.
+ - When set to C(False), sftp will not be run in batch mode, preventing detection of transfer errors.
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
@@ -357,41 +395,69 @@ DOCUMENTATION = """
type: string
description:
- "PKCS11 SmartCard provider such as opensc, example: /usr/local/lib/opensc-pkcs11.so"
- - Requires sshpass version 1.06+, sshpass must support the -P option.
env: [{name: ANSIBLE_PKCS11_PROVIDER}]
ini:
- {key: pkcs11_provider, section: ssh_connection}
vars:
- name: ansible_ssh_pkcs11_provider
+ verbosity:
+ version_added: '2.19'
+ default: 0
+ type: int
+ description:
+ - Requested verbosity level for the SSH CLI.
+ env: [{name: ANSIBLE_SSH_VERBOSITY}]
+ ini:
+ - {key: verbosity, section: ssh_connection}
+ vars:
+ - name: ansible_ssh_verbosity
"""
import collections.abc as c
+import argparse
import errno
+import contextlib
import fcntl
import hashlib
import io
+import json
import os
+import pathlib
import pty
import re
import selectors
import shlex
+import shutil
import subprocess
+import sys
+import tempfile
import time
import typing as t
-
from functools import wraps
+from multiprocessing.shared_memory import SharedMemory
+
+from ansible import constants as C
from ansible.errors import (
AnsibleAuthenticationFailure,
AnsibleConnectionFailure,
AnsibleError,
AnsibleFileNotFound,
)
-from ansible.module_utils.six import PY3, text_type, binary_type
+from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase, BUFSIZE
-from ansible.plugins.shell.powershell import _parse_clixml
+from ansible.plugins.shell.powershell import _replace_stderr_clixml
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath, makedirs_safe
+from ansible._internal._ssh import _ssh_agent
+
+try:
+ from cryptography.hazmat.primitives import serialization
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+else:
+ HAS_CRYPTOGRAPHY = True
+
display = Display()
@@ -408,6 +474,11 @@ b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when th
SSHPASS_AVAILABLE = None
SSH_DEBUG = re.compile(r'^debug\d+: .*')
+_HAS_RESOURCE_TRACK = sys.version_info[:2] >= (3, 13)
+
+PKCS11_DEFAULT_PROMPT = 'Enter PIN for '
+SSH_ASKPASS_DEFAULT_PROMPT = 'assword'
+
class AnsibleControlPersistBrokenPipeError(AnsibleError):
""" ControlPersist broken pipe """
@@ -450,6 +521,7 @@ def _handle_error(
'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.'
raise AnsibleError('{0} {1}'.format(msg, details))
msg = '{0} {1}'.format(msg, details)
+ raise AnsibleConnectionFailure(msg)
if return_tuple[0] == 255:
SSH_ERROR = True
@@ -496,9 +568,10 @@ def _ssh_retry(
remaining_tries = int(self.get_option('reconnection_retries')) + 1
cmd_summary = u"%s..." % to_text(args[0])
conn_password = self.get_option('password') or self._play_context.password
+ is_sshpass = self.get_option('password_mechanism') == 'sshpass'
for attempt in range(remaining_tries):
cmd = t.cast(list[bytes], args[0])
- if attempt != 0 and conn_password and isinstance(cmd, list):
+ if attempt != 0 and is_sshpass and conn_password and isinstance(cmd, list):
# If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
@@ -517,7 +590,7 @@ def _ssh_retry(
except (AnsibleControlPersistBrokenPipeError):
# Retry one more time because of the ControlPersist broken pipe (see #16731)
cmd = t.cast(list[bytes], args[0])
- if conn_password and isinstance(cmd, list):
+ if is_sshpass and conn_password and isinstance(cmd, list):
# This is a retry, so the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
@@ -558,6 +631,24 @@ def _ssh_retry(
return wrapped
+def _clean_shm(func):
+ def inner(self, *args, **kwargs):
+ try:
+ ret = func(self, *args, **kwargs)
+ finally:
+ if self.shm:
+ self.shm.close()
+ with contextlib.suppress(FileNotFoundError):
+ self.shm.unlink()
+ if not _HAS_RESOURCE_TRACK:
+ # deprecated: description='unneeded due to track argument for SharedMemory' python_version='3.12'
+ # There is a resource tracking issue where the resource is deleted, but tracking still has a record
+ # This will effectively overwrite the record and remove it
+ SharedMemory(name=self.shm.name, create=True, size=1).unlink()
+ return ret
+ return inner
+
+
class Connection(ConnectionBase):
""" ssh based connections """
@@ -573,6 +664,8 @@ class Connection(ConnectionBase):
self.user = self._play_context.remote_user
self.control_path: str | None = None
self.control_path_dir: str | None = None
+ self.shm: SharedMemory | None = None
+ self.sshpass_pipe: tuple[int, int] | None = None
# Windows operates differently from a POSIX connection/shell plugin,
# we need to set various properties to ensure SSH on Windows continues
@@ -583,6 +676,13 @@ class Connection(ConnectionBase):
self.module_implementation_preferences = ('.ps1', '.exe', '')
self.allow_executable = False
+ # parser to discover 'passed options', used later on for pipelining resolution
+ self._tty_parser = argparse.ArgumentParser()
+ self._tty_parser.add_argument('-t', action='count')
+ self._tty_parser.add_argument('-o', action='append')
+
+ self._populated_agent: pathlib.Path | None = None
+
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
@@ -614,17 +714,10 @@ class Connection(ConnectionBase):
def _sshpass_available() -> bool:
global SSHPASS_AVAILABLE
- # We test once if sshpass is available, and remember the result. It
- # would be nice to use distutils.spawn.find_executable for this, but
- # distutils isn't always available; shutils.which() is Python3-only.
+ # We test once if sshpass is available, and remember the result.
if SSHPASS_AVAILABLE is None:
- try:
- p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- p.communicate()
- SSHPASS_AVAILABLE = True
- except OSError:
- SSHPASS_AVAILABLE = False
+ SSHPASS_AVAILABLE = shutil.which('sshpass') is not None
return SSHPASS_AVAILABLE
@@ -664,6 +757,52 @@ class Connection(ConnectionBase):
display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self.host)
b_command += b_args
+ def _populate_agent(self) -> pathlib.Path:
+ """Adds configured private key identity to the SSH agent. Returns a path to a file containing the public key."""
+ if self._populated_agent:
+ return self._populated_agent
+
+ if (auth_sock := C.config.get_config_value('SSH_AGENT')) == 'none':
+ raise AnsibleError('Cannot utilize private_key with SSH_AGENT disabled')
+
+ key_data = self.get_option('private_key')
+ passphrase = self.get_option('private_key_passphrase')
+
+ private_key, public_key, fingerprint = _ssh_agent.key_data_into_crypto_objects(
+ to_bytes(key_data),
+ to_bytes(passphrase) if passphrase else None,
+ )
+
+ with _ssh_agent.SshAgentClient(auth_sock) as client:
+ if public_key not in client:
+ display.vvv(f'SSH: SSH_AGENT adding {fingerprint} to agent', host=self.host)
+ client.add(
+ private_key,
+ f'[added by ansible: PID={os.getpid()}, UID={os.getuid()}, EUID={os.geteuid()}, TIME={time.time()}]',
+ C.config.get_config_value('SSH_AGENT_KEY_LIFETIME'),
+ )
+ else:
+ display.vvv(f'SSH: SSH_AGENT {fingerprint} exists in agent', host=self.host)
+ # Write the public key to disk, to be provided as IdentityFile.
+ # This allows ssh to pick an explicit key in the agent to use,
+ # preventing ssh from attempting all keys in the agent.
+ pubkey_path = self._populated_agent = pathlib.Path(C.DEFAULT_LOCAL_TMP).joinpath(
+ fingerprint.replace('/', '-') + '.pub'
+ )
+ if os.path.exists(pubkey_path):
+ return pubkey_path
+
+ with tempfile.NamedTemporaryFile(dir=C.DEFAULT_LOCAL_TMP, delete=False) as f:
+ f.write(public_key.public_bytes(
+ encoding=serialization.Encoding.OpenSSH,
+ format=serialization.PublicFormat.OpenSSH
+ ))
+ # move atomically to prevent race conditions, silently succeeds if the target exists
+ os.rename(f.name, pubkey_path)
+ os.chmod(pubkey_path, mode=0o400)
+
+ return self._populated_agent
+
def _build_command(self, binary: str, subsystem: str, *other_args: bytes | str) -> list[bytes]:
"""
Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command
@@ -677,17 +816,18 @@ class Connection(ConnectionBase):
b_command = []
conn_password = self.get_option('password') or self._play_context.password
+ pkcs11_provider = self.get_option("pkcs11_provider")
+ password_mechanism = self.get_option('password_mechanism')
#
# First, the command to invoke
#
- # If we want to use password authentication, we have to set up a pipe to
+ # If we want to use sshpass for password authentication, we have to set up a pipe to
# write the password to sshpass.
- pkcs11_provider = self.get_option("pkcs11_provider")
- if conn_password or pkcs11_provider:
+ if password_mechanism == 'sshpass' and (conn_password or pkcs11_provider):
if not self._sshpass_available():
- raise AnsibleError("to use the 'ssh' connection type with passwords or pkcs11_provider, you must install the sshpass program")
+ raise AnsibleError("to use the password_mechanism=sshpass, you must install the sshpass program")
if not conn_password and pkcs11_provider:
raise AnsibleError("to use pkcs11_provider you must specify a password/pin")
@@ -697,7 +837,7 @@ class Connection(ConnectionBase):
password_prompt = self.get_option('sshpass_prompt')
if not password_prompt and pkcs11_provider:
# Set default password prompt for pkcs11_provider to make it clear its a PIN
- password_prompt = 'Enter PIN for '
+ password_prompt = PKCS11_DEFAULT_PROMPT
if password_prompt:
b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
@@ -720,16 +860,16 @@ class Connection(ConnectionBase):
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
- # if not using controlpersist and using sshpass
+ # if not using controlpersist and using password auth
b_args: t.Iterable[bytes]
if subsystem == 'sftp' and self.get_option('sftp_batch_mode'):
if conn_password:
b_args = [b'-o', b'BatchMode=no']
- self._add_args(b_command, b_args, u'disable batch mode for sshpass')
+ self._add_args(b_command, b_args, u'disable batch mode for password auth')
b_command += [b'-b', b'-']
- if display.verbosity:
- b_command.append(b'-' + (b'v' * display.verbosity))
+ if (verbosity := self.get_option('verbosity')) > 0:
+ b_command.append(b'-' + (b'v' * verbosity))
# Next, we add ssh_args
ssh_args = self.get_option('ssh_args')
@@ -748,8 +888,14 @@ class Connection(ConnectionBase):
b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
- key = self.get_option('private_key_file')
- if key:
+ if self.get_option('private_key'):
+ try:
+ key = self._populate_agent()
+ except Exception as e:
+ raise AnsibleAuthenticationFailure('Failed to add configured private key into ssh-agent.') from e
+ b_args = (b'-o', b'IdentitiesOnly=yes', b'-o', to_bytes(f'IdentityFile="{key}"', errors='surrogate_or_strict'))
+ self._add_args(b_command, b_args, "ANSIBLE_PRIVATE_KEY/private_key set")
+ elif key := self.get_option('private_key_file'):
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
@@ -815,6 +961,13 @@ class Connection(ConnectionBase):
b_args = (b"-o", b'ControlPath="%s"' % to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
+ if password_mechanism == "ssh_askpass":
+ self._add_args(
+ b_command,
+ (b"-o", b"NumberOfPasswordPrompts=1"),
+ "Restrict number of password prompts in case incorrect password is provided.",
+ )
+
# Finally, we add any caller-supplied extras.
if other_args:
b_command += [to_bytes(a) for a in other_args]
@@ -833,16 +986,13 @@ class Connection(ConnectionBase):
try:
fh.write(to_bytes(in_data))
fh.close()
- except (OSError, IOError) as e:
+ except OSError as ex:
# The ssh connection may have already terminated at this point, with a more useful error
# Only raise AnsibleConnectionFailure if the ssh process is still alive
time.sleep(0.001)
ssh_process.poll()
if getattr(ssh_process, 'returncode', None) is None:
- raise AnsibleConnectionFailure(
- 'Data could not be sent to remote host "%s". Make sure this host can be reached '
- 'over ssh: %s' % (self.host, to_native(e)), orig_exc=e
- )
+ raise AnsibleConnectionFailure(f'Data could not be sent to remote host {self.host!r}. Make sure this host can be reached over SSH.') from ex
display.debug(u'Sent initial data (%d bytes)' % len(in_data))
@@ -852,7 +1002,7 @@ class Connection(ConnectionBase):
""" Terminate a process, ignoring errors """
try:
p.terminate()
- except (OSError, IOError):
+ except OSError:
pass
# This is separate from _run() because we need to do the same thing for stdout
@@ -906,6 +1056,57 @@ class Connection(ConnectionBase):
return b''.join(output), remainder
+ def _init_shm(self) -> dict[str, t.Any]:
+ env = os.environ.copy()
+ popen_kwargs: dict[str, t.Any] = {}
+
+ if self.get_option('password_mechanism') != 'ssh_askpass':
+ return popen_kwargs
+
+ conn_password = self.get_option('password') or self._play_context.password
+ pkcs11_provider = self.get_option("pkcs11_provider")
+ if not conn_password and pkcs11_provider:
+ raise AnsibleError("to use pkcs11_provider you must specify a password/pin")
+
+ if not conn_password:
+ return popen_kwargs
+
+ kwargs = {}
+ if _HAS_RESOURCE_TRACK:
+ # deprecated: description='track argument for SharedMemory always available' python_version='3.12'
+ kwargs['track'] = False
+ self.shm = shm = SharedMemory(create=True, size=16384, **kwargs) # type: ignore[arg-type]
+
+ sshpass_prompt = self.get_option('sshpass_prompt')
+ if not sshpass_prompt and pkcs11_provider:
+ sshpass_prompt = PKCS11_DEFAULT_PROMPT
+ elif not sshpass_prompt:
+ sshpass_prompt = SSH_ASKPASS_DEFAULT_PROMPT
+
+ data = json.dumps({
+ 'password': conn_password,
+ 'prompt': sshpass_prompt,
+ }).encode('utf-8')
+ shm.buf[:len(data)] = bytearray(data)
+ shm.close()
+
+ env['_ANSIBLE_SSH_ASKPASS_SHM'] = str(self.shm.name)
+ adhoc = pathlib.Path(sys.argv[0]).with_name('ansible')
+ env['SSH_ASKPASS'] = str(adhoc) if adhoc.is_file() else 'ansible'
+
+ # SSH_ASKPASS_REQUIRE was added in openssh 8.4, prior to 8.4 there must be no tty, and DISPLAY must be set
+ env['SSH_ASKPASS_REQUIRE'] = 'force'
+ if not env.get('DISPLAY'):
+ # If the user has DISPLAY set, assume it is there for a reason
+ env['DISPLAY'] = '-'
+
+ popen_kwargs['env'] = env
+ # start_new_session runs setsid which detaches the tty to support the use of ASKPASS prior to openssh 8.4
+ popen_kwargs['start_new_session'] = True
+
+ return popen_kwargs
+
+ @_clean_shm
def _bare_run(self, cmd: list[bytes], in_data: bytes | None, sudoable: bool = True, checkrc: bool = True) -> tuple[int, bytes, bytes]:
"""
Starts the command and communicates with it until it ends.
@@ -915,6 +1116,9 @@ class Connection(ConnectionBase):
display_cmd = u' '.join(shlex.quote(to_text(c)) for c in cmd)
display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host)
+ conn_password = self.get_option('password') or self._play_context.password
+ password_mechanism = self.get_option('password_mechanism')
+
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
@@ -927,39 +1131,30 @@ class Connection(ConnectionBase):
else:
cmd = list(map(to_bytes, cmd))
- conn_password = self.get_option('password') or self._play_context.password
+ popen_kwargs = self._init_shm()
+
+ if self.sshpass_pipe:
+ popen_kwargs['pass_fds'] = self.sshpass_pipe
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
- if PY3 and conn_password:
- # pylint: disable=unexpected-keyword-arg
- p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
- else:
- p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **popen_kwargs)
stdin = os.fdopen(master, 'wb', 0)
os.close(slave)
- except (OSError, IOError):
+ except OSError:
p = None
if not p:
try:
- if PY3 and conn_password:
- # pylint: disable=unexpected-keyword-arg
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
- else:
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, **popen_kwargs)
stdin = p.stdin # type: ignore[assignment] # stdin will be set and not None due to the calls above
- except (OSError, IOError) as e:
- raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e))
+ except OSError as ex:
+ raise AnsibleError('Unable to execute ssh command line on a controller.') from ex
- # If we are using SSH password authentication, write the password into
- # the pipe we opened in _build_command.
-
- if conn_password:
+ if password_mechanism == 'sshpass' and conn_password:
os.close(self.sshpass_pipe[0])
try:
os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n')
@@ -983,7 +1178,7 @@ class Connection(ConnectionBase):
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
- # only when using ssh. Otherwise we can send initial data straightaway.
+ # only when using ssh. Otherwise, we can send initial data straight away.
state = states.index('ready_to_send')
if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
@@ -1047,7 +1242,7 @@ class Connection(ConnectionBase):
if poll is not None:
break
self._terminate_process(p)
- raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
+ raise AnsibleConnectionFailure('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
display.vvvvv(f'SSH: Timeout ({timeout}s) waiting for the output', host=self.host)
@@ -1178,10 +1373,15 @@ class Connection(ConnectionBase):
p.stdout.close()
p.stderr.close()
- if self.get_option('host_key_checking'):
- if cmd[0] == b"sshpass" and p.returncode == 6:
- raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
- 'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
+ conn_password = self.get_option('password') or self._play_context.password
+ hostkey_fail = any((
+ (cmd[0] == b"sshpass" and p.returncode == 6),
+ b"read_passphrase: can't open /dev/tty" in b_stderr,
+ b"Host key verification failed" in b_stderr,
+ ))
+ if password_mechanism and self.get_option('host_key_checking') and conn_password and hostkey_fail:
+ raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled. '
+ 'Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
if p.returncode != 0 and controlpersisterror:
@@ -1329,8 +1529,8 @@ class Connection(ConnectionBase):
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
# When running on Windows, stderr may contain CLIXML encoded output
- if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"):
- stderr = _parse_clixml(stderr)
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ stderr = _replace_stderr_clixml(stderr)
return (returncode, stdout, stderr)
@@ -1396,3 +1596,41 @@ class Connection(ConnectionBase):
def close(self) -> None:
self._connected = False
+
+ @property
+ def has_tty(self):
+ return self._is_tty_requested()
+
+ def _is_tty_requested(self):
+
+ # check if we require tty (only from our args, cannot see options in configuration files)
+ opts = []
+ for opt in ('ssh_args', 'ssh_common_args', 'ssh_extra_args'):
+ attr = self.get_option(opt)
+ if attr is not None:
+ opts.extend(self._split_ssh_args(attr))
+
+ args, dummy = self._tty_parser.parse_known_args(opts)
+
+ if args.t:
+ return True
+
+ for arg in args.o or []:
+ if '=' in arg:
+ val = arg.split('=', 1)
+ else:
+ val = arg.split(maxsplit=1)
+
+ if val[0].lower().strip() == 'requesttty':
+ if val[1].lower().strip() in ('yes', 'force'):
+ return True
+
+ return False
+
+ def is_pipelining_enabled(self, wrap_async=False):
+ """ override parent method and ensure we don't request a tty """
+
+ if self._is_tty_requested():
+ return False
+ else:
+ return super(Connection, self).is_pipelining_enabled(wrap_async)
diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py
index 354acce7fad..179d848fe51 100644
--- a/lib/ansible/plugins/connection/winrm.py
+++ b/lib/ansible/plugins/connection/winrm.py
@@ -117,10 +117,6 @@ DOCUMENTATION = """
- kerberos usage mode.
- The managed option means Ansible will obtain kerberos ticket.
- While the manual one means a ticket must already have been obtained by the user.
- - If having issues with Ansible freezing when trying to obtain the
- Kerberos ticket, you can either set this to V(manual) and obtain
- it outside Ansible or install C(pexpect) through pip and try
- again.
choices: [managed, manual]
vars:
- name: ansible_winrm_kinit_mode
@@ -186,6 +182,7 @@ except ImportError:
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
+from ansible.executor.powershell.module_manifest import _bootstrap_powershell_script
from ansible.module_utils.json_utils import _filter_non_json_lines
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
@@ -223,19 +220,6 @@ except ImportError as e:
HAS_XMLTODICT = False
XMLTODICT_IMPORT_ERR = e
-HAS_PEXPECT = False
-try:
- import pexpect
- # echo was added in pexpect 3.3+ which is newer than the RHEL package
- # we can only use pexpect for kerb auth if echo is a valid kwarg
- # https://github.com/ansible/ansible/issues/43462
- if hasattr(pexpect, 'spawn'):
- argspec = getfullargspec(pexpect.spawn.__init__)
- if 'echo' in argspec.args:
- HAS_PEXPECT = True
-except ImportError as e:
- pass
-
# used to try and parse the hostname and detect if IPv6 is being used
try:
import ipaddress
@@ -350,6 +334,7 @@ class Connection(ConnectionBase):
def _kerb_auth(self, principal: str, password: str) -> None:
if password is None:
password = ""
+ b_password = to_bytes(password, encoding='utf-8', errors='surrogate_or_strict')
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
@@ -376,60 +361,28 @@ class Connection(ConnectionBase):
kinit_cmdline.append(principal)
- # pexpect runs the process in its own pty so it can correctly send
- # the password as input even on MacOS which blocks subprocess from
- # doing so. Unfortunately it is not available on the built in Python
- # so we can only use it if someone has installed it
- if HAS_PEXPECT:
- proc_mechanism = "pexpect"
- command = kinit_cmdline.pop(0)
- password = to_text(password, encoding='utf-8',
- errors='surrogate_or_strict')
-
- display.vvvv("calling kinit with pexpect for principal %s"
- % principal)
- try:
- child = pexpect.spawn(command, kinit_cmdline, timeout=60,
- env=krb5env, echo=False)
- except pexpect.ExceptionPexpect as err:
- err_msg = "Kerberos auth failure when calling kinit cmd " \
- "'%s': %s" % (command, to_native(err))
- raise AnsibleConnectionFailure(err_msg)
+ display.vvvv(f"calling kinit for principal {principal}")
- try:
- child.expect(".*:")
- child.sendline(password)
- except OSError as err:
- # child exited before the pass was sent, Ansible will raise
- # error based on the rc below, just display the error here
- display.vvvv("kinit with pexpect raised OSError: %s"
- % to_native(err))
-
- # technically this is the stdout + stderr but to match the
- # subprocess error checking behaviour, we will call it stderr
- stderr = child.read()
- child.wait()
- rc = child.exitstatus
- else:
- proc_mechanism = "subprocess"
- b_password = to_bytes(password, encoding='utf-8',
- errors='surrogate_or_strict')
-
- display.vvvv("calling kinit with subprocess for principal %s"
- % principal)
- try:
- p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=krb5env)
+ # It is important to use start_new_session which spawns the process
+ # with setsid() to avoid it inheriting the current tty. On macOS it
+ # will force it to read from stdin rather than the tty.
+ try:
+ p = subprocess.Popen(
+ kinit_cmdline,
+ start_new_session=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=krb5env,
+ )
- except OSError as err:
- err_msg = "Kerberos auth failure when calling kinit cmd " \
- "'%s': %s" % (self._kinit_cmd, to_native(err))
- raise AnsibleConnectionFailure(err_msg)
+ except OSError as err:
+ err_msg = "Kerberos auth failure when calling kinit cmd " \
+ "'%s': %s" % (self._kinit_cmd, to_native(err))
+ raise AnsibleConnectionFailure(err_msg)
- stdout, stderr = p.communicate(b_password + b'\n')
- rc = p.returncode != 0
+ stdout, stderr = p.communicate(b_password + b'\n')
+ rc = p.returncode
if rc != 0:
# one last attempt at making sure the password does not exist
@@ -437,8 +390,7 @@ class Connection(ConnectionBase):
exp_msg = to_native(stderr.strip())
exp_msg = exp_msg.replace(to_native(password), "")
- err_msg = "Kerberos auth failure for principal %s with %s: %s" \
- % (principal, proc_mechanism, exp_msg)
+ err_msg = f"Kerberos auth failure for principal {principal}: {exp_msg}"
raise AnsibleConnectionFailure(err_msg)
display.vvvvv("kinit succeeded for principal %s" % principal)
@@ -627,7 +579,7 @@ class Connection(ConnectionBase):
def _winrm_exec(
self,
command: str,
- args: t.Iterable[bytes] = (),
+ args: t.Iterable[bytes | str] = (),
from_exec: bool = False,
stdin_iterator: t.Iterable[tuple[bytes, bool]] = None,
) -> tuple[int, bytes, bytes]:
@@ -652,9 +604,7 @@ class Connection(ConnectionBase):
self._winrm_write_stdin(command_id, stdin_iterator)
except Exception as ex:
- display.warning("ERROR DURING WINRM SEND INPUT - attempting to recover: %s %s"
- % (type(ex).__name__, to_text(ex)))
- display.debug(traceback.format_exc())
+ display.error_as_warning("ERROR DURING WINRM SEND INPUT. Attempting to recover.", ex)
stdin_push_failed = True
# Even on a failure above we try at least once to get the output
@@ -771,7 +721,16 @@ class Connection(ConnectionBase):
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
+
+ encoded_prefix = self._shell._encode_script('', as_list=False, strict_mode=False, preserve_rc=False)
+ if cmd.startswith(encoded_prefix) or cmd.startswith("type "):
+ # Avoid double encoding the script, the first means we are already
+ # running the standard PowerShell command, the latter is used for
+ # the no pipeline case where it uses type to pipe the script into
+ # powershell which is known to work without re-encoding as pwsh.
+ cmd_parts = cmd.split(" ")
+ else:
+ cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
@@ -784,7 +743,15 @@ class Connection(ConnectionBase):
return self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
# FUTURE: determine buffer size at runtime via remote winrm config?
- def _put_file_stdin_iterator(self, in_path: str, out_path: str, buffer_size: int = 250000) -> t.Iterable[tuple[bytes, bool]]:
+ def _put_file_stdin_iterator(
+ self,
+ initial_stdin: bytes,
+ in_path: str,
+ out_path: str,
+ buffer_size: int = 250000,
+ ) -> t.Iterable[tuple[bytes, bool]]:
+ yield initial_stdin, False
+
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
@@ -806,40 +773,16 @@ class Connection(ConnectionBase):
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
- script_template = u"""
- begin {{
- $path = '{0}'
-
- $DebugPreference = "Continue"
- $ErrorActionPreference = "Stop"
- Set-StrictMode -Version 2
-
- $fd = [System.IO.File]::Create($path)
-
- $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
-
- $bytes = @() #initialize for empty file case
- }}
- process {{
- $bytes = [System.Convert]::FromBase64String($input)
- $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
- $fd.Write($bytes, 0, $bytes.Length)
- }}
- end {{
- $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
-
- $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
-
- $fd.Close()
-
- Write-Output "{{""sha1"":""$hash""}}"
- }}
- """
-
- script = script_template.format(self._shell._escape(out_path))
- cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
+ copy_script, copy_script_stdin = _bootstrap_powershell_script('winrm_put_file.ps1', {
+ 'Path': out_path,
+ }, has_input=True)
+ cmd_parts = self._shell._encode_script(copy_script, as_list=True, strict_mode=False, preserve_rc=False)
- status_code, b_stdout, b_stderr = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
+ status_code, b_stdout, b_stderr = self._winrm_exec(
+ cmd_parts[0],
+ cmd_parts[1:],
+ stdin_iterator=self._put_file_stdin_iterator(copy_script_stdin, in_path, out_path),
+ )
stdout = to_text(b_stdout)
stderr = to_text(b_stderr)
@@ -873,41 +816,19 @@ class Connection(ConnectionBase):
offset = 0
while True:
try:
- script = """
- $path = '%(path)s'
- If (Test-Path -LiteralPath $path -PathType Leaf)
- {
- $buffer_size = %(buffer_size)d
- $offset = %(offset)d
-
- $stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite)
- $stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null
- $buffer = New-Object -TypeName byte[] $buffer_size
- $bytes_read = $stream.Read($buffer, 0, $buffer_size)
- if ($bytes_read -gt 0) {
- $bytes = $buffer[0..($bytes_read - 1)]
- [System.Convert]::ToBase64String($bytes)
- }
- $stream.Close() > $null
- }
- ElseIf (Test-Path -LiteralPath $path -PathType Container)
- {
- Write-Host "[DIR]";
- }
- Else
- {
- Write-Error "$path does not exist";
- Exit 1;
- }
- """ % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
+ script, in_data = _bootstrap_powershell_script('winrm_fetch_file.ps1', {
+ 'Path': in_path,
+ 'BufferSize': buffer_size,
+ 'Offset': offset,
+ })
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
- status_code, b_stdout, b_stderr = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
+ status_code, b_stdout, b_stderr = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._wrapper_payload_stream(in_data))
stdout = to_text(b_stdout)
stderr = to_text(b_stderr)
if status_code != 0:
- raise IOError(stderr)
+ raise OSError(stderr)
if stdout.strip() == '[DIR]':
data = None
else:
diff --git a/lib/ansible/plugins/doc_fragments/action_core.py b/lib/ansible/plugins/doc_fragments/action_core.py
index 9be51d70062..e30061eeda6 100644
--- a/lib/ansible/plugins/doc_fragments/action_core.py
+++ b/lib/ansible/plugins/doc_fragments/action_core.py
@@ -29,7 +29,7 @@ attributes:
platforms: all
until:
description: Denotes if this action obeys until/retry/poll keywords
- support: full
+ support: none
tags:
description: Allows for the 'tags' keyword to control the selection of this action for execution
support: full
diff --git a/lib/ansible/plugins/doc_fragments/constructed.py b/lib/ansible/plugins/doc_fragments/constructed.py
index 00f8bae414b..4e10afce100 100644
--- a/lib/ansible/plugins/doc_fragments/constructed.py
+++ b/lib/ansible/plugins/doc_fragments/constructed.py
@@ -47,13 +47,13 @@ options:
- The key from input dictionary used to generate groups.
default_value:
description:
- - The default value when the host variable's value is an empty string.
+ - The default value when the host variable's value is V(None) or an empty string.
- This option is mutually exclusive with O(keyed_groups[].trailing_separator).
type: str
version_added: '2.12'
trailing_separator:
description:
- - Set this option to V(false) to omit the O(keyed_groups[].separator) after the host variable when the value is an empty string.
+ - Set this option to V(false) to omit the O(keyed_groups[].separator) after the host variable when the value is V(None) or an empty string.
- This option is mutually exclusive with O(keyed_groups[].default_value).
type: bool
default: true
@@ -79,4 +79,8 @@ options:
type: boolean
default: True
version_added: '2.11'
+notes:
+ - Inventories are not finalized at this stage, so the auto populated C(all) and C(ungrouped) groups will
+ only reflect what previous inventory sources explicitly added to them.
+ - Runtime 'magic variables' are not available during inventory construction. For example, C(groups) and C(hostvars) do not exist yet.
"""
diff --git a/lib/ansible/plugins/doc_fragments/template_common.py b/lib/ansible/plugins/doc_fragments/template_common.py
index 19fcccdae9c..eb904f63f51 100644
--- a/lib/ansible/plugins/doc_fragments/template_common.py
+++ b/lib/ansible/plugins/doc_fragments/template_common.py
@@ -11,9 +11,9 @@ class ModuleDocFragment(object):
# Standard template documentation fragment, use by template and win_template.
DOCUMENTATION = r"""
description:
-- Templates are processed by the L(Jinja2 templating language,http://jinja.pocoo.org/docs/).
+- Templates are processed by the L(Jinja2 templating language,https://jinja.palletsprojects.com/en/stable/).
- Documentation on the template formatting can be found in the
- L(Template Designer Documentation,http://jinja.pocoo.org/docs/templates/).
+ L(Template Designer Documentation,https://jinja.palletsprojects.com/en/stable/templates/).
- Additional variables listed below can be used in templates.
- C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
describe the template name, host, modification time of the template file and the owner uid.
diff --git a/lib/ansible/plugins/doc_fragments/url.py b/lib/ansible/plugins/doc_fragments/url.py
index bddc33db988..942558f166d 100644
--- a/lib/ansible/plugins/doc_fragments/url.py
+++ b/lib/ansible/plugins/doc_fragments/url.py
@@ -72,3 +72,19 @@ options:
default: no
version_added: '2.11'
"""
+
+ URL_REDIRECT = r'''
+options:
+ follow_redirects:
+ description:
+ - Whether or not the URI module should follow redirects.
+ type: str
+ default: safe
+ choices:
+ all: Will follow all redirects.
+ none: Will not follow any redirects.
+ safe: Only redirects doing GET or HEAD requests will be followed.
+ urllib2: Defer to urllib2 behavior (As of writing this follows HTTP redirects).
+ 'no': (DEPRECATED, removed in 2.22) alias of V(none).
+ 'yes': (DEPRECATED, removed in 2.22) alias of V(all).
+'''
diff --git a/lib/ansible/plugins/filter/__init__.py b/lib/ansible/plugins/filter/__init__.py
index 003711f8b58..c28f8056c9f 100644
--- a/lib/ansible/plugins/filter/__init__.py
+++ b/lib/ansible/plugins/filter/__init__.py
@@ -3,11 +3,15 @@
from __future__ import annotations
-from ansible import constants as C
+import typing as t
+
from ansible.plugins import AnsibleJinja2Plugin
class AnsibleJinja2Filter(AnsibleJinja2Plugin):
+ @property
+ def plugin_type(self) -> str:
+ return "filter"
- def _no_options(self, *args, **kwargs):
+ def _no_options(self, *args, **kwargs) -> t.NoReturn:
raise NotImplementedError("Jinja2 filter plugins do not support option functions, they use direct arguments instead.")
diff --git a/lib/ansible/plugins/filter/b64decode.yml b/lib/ansible/plugins/filter/b64decode.yml
index 339de3a724d..08ff396b309 100644
--- a/lib/ansible/plugins/filter/b64decode.yml
+++ b/lib/ansible/plugins/filter/b64decode.yml
@@ -15,6 +15,20 @@ DOCUMENTATION:
description: A Base64 string to decode.
type: string
required: true
+ encoding:
+ description:
+ - The encoding to use to transform from a text string to a byte string.
+ - Defaults to using 'utf-8'.
+ type: string
+ required: false
+ urlsafe:
+ description:
+ - Decode string using URL- and filesystem-safe alphabet,
+ which substitutes I(-) instead of I(+) and I(_) instead of I(/) in the Base64 alphabet.
+ type: bool
+ default: false
+ required: false
+ version_added: 2.19
EXAMPLES: |
# Base64 decode a string
@@ -23,6 +37,14 @@ EXAMPLES: |
# Base64 decode the content of 'b64stuff' variable
stuff: "{{ b64stuff | b64decode }}"
+ # Base64 decode the content with different encoding
+ stuff: "{{ 'QQBuAHMAaQBiAGwAZQAgAC0AIABPMIkwaDB/MAoA' | b64decode(encoding='utf-16-le') }}"
+ # => 'Ansible - くらとみ\n'
+
+ # URL-Safe Base64 decoding
+ stuff: "{{ 'aHR0cHM6Ly93d3cucHl0aG9uLm9yZy9leGFtcGxlLTE=' | b64decode(urlsafe=True) }}"
+ # => 'https://www.python.org/example-1'
+
RETURN:
_value:
description: The contents of the Base64 encoded string.
diff --git a/lib/ansible/plugins/filter/b64encode.yml b/lib/ansible/plugins/filter/b64encode.yml
index ed32bfb8066..6e1d5d0cf89 100644
--- a/lib/ansible/plugins/filter/b64encode.yml
+++ b/lib/ansible/plugins/filter/b64encode.yml
@@ -11,6 +11,20 @@ DOCUMENTATION:
description: A string to encode.
type: string
required: true
+ encoding:
+ description:
+ - The encoding to use to transform from a text string to a byte string.
+ - Defaults to using 'utf-8'.
+ type: string
+ required: false
+ urlsafe:
+ description:
+ - Encode string using URL- and filesystem-safe alphabet,
+ which substitutes I(-) instead of I(+) and I(_) instead of I(/) in the Base64 alphabet.
+ type: bool
+ default: false
+ required: false
+ version_added: 2.19
EXAMPLES: |
# Base64 encode a string
@@ -19,6 +33,14 @@ EXAMPLES: |
# Base64 encode the content of 'stuff' variable
b64stuff: "{{ stuff | b64encode }}"
+ # Base64 encode the content with different encoding
+ b64stuff: "{{ 'Ansible - くらとみ\n' | b64encode(encoding='utf-16-le') }}"
+ # => 'QQBuAHMAaQBiAGwAZQAgAC0AIABPMIkwaDB/MAoA'
+
+ # URL-safe Base64 encoding
+ b64stuff: "{{ 'https://www.python.org/example-1' | b64encode(urlsafe=True) }}"
+ # => 'aHR0cHM6Ly93d3cucHl0aG9uLm9yZy9leGFtcGxlLTE='
+
RETURN:
_value:
description: A Base64 encoded string.
diff --git a/lib/ansible/plugins/filter/bool.yml b/lib/ansible/plugins/filter/bool.yml
index beb8b8ddb1f..dcf21077af5 100644
--- a/lib/ansible/plugins/filter/bool.yml
+++ b/lib/ansible/plugins/filter/bool.yml
@@ -1,13 +1,20 @@
DOCUMENTATION:
name: bool
version_added: "historical"
- short_description: cast into a boolean
+ short_description: coerce some well-known truthy/falsy values to a boolean
description:
- - Attempt to cast the input into a boolean (V(True) or V(False)) value.
+ - Attempt to convert the input value into a boolean (V(True) or V(False)) from a common set of well-known values.
+ - Valid true values are (V(True), 'yes', 'on', '1', 'true', 1).
+ - Valid false values are (V(False), 'no', 'off', '0', 'false', 0).
+ #- An error will result if an invalid value is supplied.
+ - A deprecation warning will result if an invalid value is supplied.
+ - For more permissive boolean conversion, consider the P(ansible.builtin.truthy#test) or P(ansible.builtin.falsy#test) tests.
+ - String comparisons are case-insensitive.
+
positional: _input
options:
_input:
- description: Data to cast.
+ description: Data to convert.
type: raw
required: true
@@ -24,5 +31,5 @@ EXAMPLES: |
RETURN:
_value:
- description: The boolean resulting of casting the input expression into a V(True) or V(False) value.
+ description: The boolean result of coercing the input expression to a V(True) or V(False) value.
type: bool
diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py
index e0deea7e800..f9f9da73a00 100644
--- a/lib/ansible/plugins/filter/core.py
+++ b/lib/ansible/plugins/filter/core.py
@@ -4,6 +4,7 @@
from __future__ import annotations
import base64
+import functools
import glob
import hashlib
import json
@@ -11,26 +12,30 @@ import ntpath
import os.path
import re
import shlex
-import sys
import time
import uuid
import yaml
import datetime
+import typing as t
from collections.abc import Mapping
from functools import partial
from random import Random, SystemRandom, shuffle
-from jinja2.filters import pass_environment
+from jinja2.filters import do_map, do_select, do_selectattr, do_reject, do_rejectattr, pass_environment, sync_do_groupby
+from jinja2.environment import Environment
-from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleFilterTypeError
-from ansible.module_utils.six import string_types, integer_types, reraise, text_type
+from ansible._internal._templating import _lazy_containers
+from ansible.errors import AnsibleFilterError, AnsibleTypeError, AnsibleTemplatePluginError
+from ansible.module_utils.datatag import native_type_name
+from ansible.module_utils.common.json import get_encoder, get_decoder
+from ansible.module_utils.six import string_types, integer_types, text_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.collections import is_sequence
-from ansible.module_utils.common.yaml import yaml_load, yaml_load_all
-from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.parsing.yaml.dumper import AnsibleDumper
-from ansible.template import recursive_check_defined
+from ansible.template import accept_args_markers, accept_lazy_markers
+from ansible._internal._templating._jinja_common import MarkerError, UndefinedMarker, validate_arg_type
+from ansible._internal._yaml import _loader as _yaml_loader
from ansible.utils.display import Display
from ansible.utils.encrypt import do_encrypt, PASSLIB_AVAILABLE
from ansible.utils.hashing import md5s, checksum_s
@@ -42,53 +47,84 @@ display = Display()
UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
-def to_yaml(a, *args, **kw):
- """Make verbose, human-readable yaml"""
- default_flow_style = kw.pop('default_flow_style', None)
- try:
- transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, default_flow_style=default_flow_style, **kw)
- except Exception as e:
- raise AnsibleFilterError("to_yaml - %s" % to_native(e), orig_exc=e)
- return to_text(transformed)
+@accept_lazy_markers
+def to_yaml(a, *_args, default_flow_style: bool | None = None, **kwargs) -> str:
+ """Serialize input as terse flow-style YAML."""
+ return yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, default_flow_style=default_flow_style, **kwargs)
-def to_nice_yaml(a, indent=4, *args, **kw):
- """Make verbose, human-readable yaml"""
- try:
- transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
- except Exception as e:
- raise AnsibleFilterError("to_nice_yaml - %s" % to_native(e), orig_exc=e)
- return to_text(transformed)
+@accept_lazy_markers
+def to_nice_yaml(a, indent=4, *_args, default_flow_style=False, **kwargs) -> str:
+ """Serialize input as verbose multi-line YAML."""
+ return to_yaml(a, indent=indent, default_flow_style=default_flow_style, **kwargs)
+
+
+def from_json(a, profile: str | None = None, **kwargs) -> t.Any:
+ """Deserialize JSON with an optional decoder profile."""
+ cls = get_decoder(profile or "tagless")
+
+ return json.loads(a, cls=cls, **kwargs)
+
+
+def to_json(a, profile: str | None = None, vault_to_text: t.Any = ..., preprocess_unsafe: t.Any = ..., **kwargs) -> str:
+ """Serialize as JSON with an optional encoder profile."""
+ if profile and vault_to_text is not ...:
+ raise ValueError("Only one of `vault_to_text` or `profile` can be specified.")
-def to_json(a, *args, **kw):
- """ Convert the value to JSON """
+ if profile and preprocess_unsafe is not ...:
+ raise ValueError("Only one of `preprocess_unsafe` or `profile` can be specified.")
- # defaults for filters
- if 'vault_to_text' not in kw:
- kw['vault_to_text'] = True
- if 'preprocess_unsafe' not in kw:
- kw['preprocess_unsafe'] = False
+ # deprecated: description='deprecate vault_to_text' core_version='2.23'
+ # deprecated: description='deprecate preprocess_unsafe' core_version='2.23'
- return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
+ cls = get_encoder(profile or "tagless")
+ return json.dumps(a, cls=cls, **kwargs)
-def to_nice_json(a, indent=4, sort_keys=True, *args, **kw):
- """Make verbose, human-readable JSON"""
+
+def to_nice_json(a, indent=4, sort_keys=True, **kwargs):
+ """Make verbose, human-readable JSON."""
# TODO separators can be potentially exposed to the user as well
- kw.pop('separators', None)
- return to_json(a, indent=indent, sort_keys=sort_keys, separators=(',', ': '), *args, **kw)
+ kwargs.pop('separators', None)
+ return to_json(a, indent=indent, sort_keys=sort_keys, separators=(',', ': '), **kwargs)
+
+
+# CAUTION: Do not put non-string values here since they can have unwanted logical equality, such as 1.0 (equal to 1 and True) or 0.0 (equal to 0 and False).
+_valid_bool_true = {'yes', 'on', 'true', '1'}
+_valid_bool_false = {'no', 'off', 'false', '0'}
+
+
+def to_bool(value: object) -> bool:
+ """Convert well-known input values to a boolean value."""
+ value_to_check: object
+
+ if isinstance(value, str):
+ value_to_check = value.lower() # accept mixed case variants
+ elif isinstance(value, int): # bool is also an int
+ value_to_check = str(value).lower() # accept int (0, 1) and bool (True, False) -- not just string versions
+ else:
+ value_to_check = value
+
+ try:
+ if value_to_check in _valid_bool_true:
+ return True
+
+ if value_to_check in _valid_bool_false:
+ return False
+ # if we're still here, the value is unsupported- always fire a deprecation warning
+ result = value_to_check == 1 # backwards compatibility with the old code which checked: value in ('yes', 'on', '1', 'true', 1)
+ except TypeError:
+ result = False
-def to_bool(a):
- """ return a bool for the arg """
- if a is None or isinstance(a, bool):
- return a
- if isinstance(a, string_types):
- a = a.lower()
- if a in ('yes', 'on', '1', 'true', 1):
- return True
- return False
+ # NB: update the doc string to reflect reality once this fallback is removed
+ display.deprecated(
+ msg=f'The `bool` filter coerced invalid value {value!r} ({native_type_name(value)}) to {result!r}.',
+ version='2.23',
+ )
+
+ return result
def to_datetime(string, format="%Y-%m-%d %H:%M:%S"):
@@ -215,20 +251,24 @@ def regex_escape(string, re_type='python'):
def from_yaml(data):
- if isinstance(data, string_types):
- # The ``text_type`` call here strips any custom
- # string wrapper class, so that CSafeLoader can
- # read the data
- return yaml_load(text_type(to_text(data, errors='surrogate_or_strict')))
+ if data is None:
+ return None
+
+ if isinstance(data, str):
+ return yaml.load(data, Loader=_yaml_loader.AnsibleInstrumentedLoader) # type: ignore[arg-type]
+
+ display.deprecated(f"The from_yaml filter ignored non-string input of type {native_type_name(data)!r}.", version='2.23', obj=data)
return data
def from_yaml_all(data):
- if isinstance(data, string_types):
- # The ``text_type`` call here strips any custom
- # string wrapper class, so that CSafeLoader can
- # read the data
- return yaml_load_all(text_type(to_text(data, errors='surrogate_or_strict')))
+ if data is None:
+ return [] # backward compatibility; ensure consistent result between classic/native Jinja for None/empty string input
+
+ if isinstance(data, str):
+ return yaml.load_all(data, Loader=_yaml_loader.AnsibleInstrumentedLoader) # type: ignore[arg-type]
+
+ display.deprecated(f"The from_yaml_all filter ignored non-string input of type {native_type_name(data)!r}.", version='2.23', obj=data)
return data
@@ -286,26 +326,10 @@ def get_encrypted_password(password, hashtype='sha512', salt=None, salt_size=Non
hashtype = passlib_mapping.get(hashtype, hashtype)
- unknown_passlib_hashtype = False
if PASSLIB_AVAILABLE and hashtype not in passlib_mapping and hashtype not in passlib_mapping.values():
- unknown_passlib_hashtype = True
- display.deprecated(
- f"Checking for unsupported password_hash passlib hashtype '{hashtype}'. "
- "This will be an error in the future as all supported hashtypes must be documented.",
- version='2.19'
- )
+ raise AnsibleFilterError(f"{hashtype} is not in the list of supported passlib algorithms: {', '.join(passlib_mapping)}")
- try:
- return do_encrypt(password, hashtype, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
- except AnsibleError as e:
- reraise(AnsibleFilterError, AnsibleFilterError(to_native(e), orig_exc=e), sys.exc_info()[2])
- except Exception as e:
- if unknown_passlib_hashtype:
- # This can occur if passlib.hash has the hashtype attribute, but it has a different signature than the valid choices.
- # In 2.19 this will replace the deprecation warning above and the extra exception handling can be deleted.
- choices = ', '.join(passlib_mapping)
- raise AnsibleFilterError(f"{hashtype} is not in the list of supported passlib algorithms: {choices}") from e
- raise
+ return do_encrypt(password, hashtype, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
def to_uuid(string, namespace=UUID_NAMESPACE_ANSIBLE):
@@ -319,19 +343,21 @@ def to_uuid(string, namespace=UUID_NAMESPACE_ANSIBLE):
return to_text(uuid.uuid5(uuid_namespace, to_native(string, errors='surrogate_or_strict')))
-def mandatory(a, msg=None):
+@accept_args_markers
+def mandatory(a: object, msg: str | None = None) -> object:
"""Make a variable mandatory."""
- from jinja2.runtime import Undefined
+ # DTFIX-FUTURE: deprecate this filter; there are much better ways via undef, etc...
+ # also remember to remove unit test checking for _undefined_name
+ if isinstance(a, UndefinedMarker):
+ if msg is not None:
+ raise AnsibleFilterError(to_text(msg))
- if isinstance(a, Undefined):
if a._undefined_name is not None:
- name = "'%s' " % to_text(a._undefined_name)
+ name = f'{to_text(a._undefined_name)!r} '
else:
name = ''
- if msg is not None:
- raise AnsibleFilterError(to_native(msg))
- raise AnsibleFilterError("Mandatory variable %s not defined." % name)
+ raise AnsibleFilterError(f"Mandatory variable {name}not defined.")
return a
@@ -345,9 +371,6 @@ def combine(*terms, **kwargs):
# allow the user to do `[dict1, dict2, ...] | combine`
dictionaries = flatten(terms, levels=1)
- # recursively check that every elements are defined (for jinja2)
- recursive_check_defined(dictionaries)
-
if not dictionaries:
return {}
@@ -393,6 +416,13 @@ def comment(text, style='plain', **kw):
}
}
+ if style not in comment_styles:
+ raise AnsibleTemplatePluginError(
+ message=f"Invalid style {style!r}.",
+ help_text=f"Available styles: {', '.join(comment_styles)}",
+ obj=style,
+ )
+
# Pointer to the right comment type
style_params = comment_styles[style]
@@ -453,7 +483,7 @@ def comment(text, style='plain', **kw):
@pass_environment
-def extract(environment, item, container, morekeys=None):
+def extract(environment: Environment, item, container, morekeys=None):
if morekeys is None:
keys = [item]
elif isinstance(morekeys, list):
@@ -462,18 +492,28 @@ def extract(environment, item, container, morekeys=None):
keys = [item, morekeys]
value = container
+
for key in keys:
- value = environment.getitem(value, key)
+ try:
+ value = environment.getitem(value, key)
+ except MarkerError as ex:
+ value = ex.source
return value
-def b64encode(string, encoding='utf-8'):
- return to_text(base64.b64encode(to_bytes(string, encoding=encoding, errors='surrogate_or_strict')))
+def b64encode(string, encoding='utf-8', urlsafe=False):
+ func = base64.b64encode
+ if urlsafe:
+ func = base64.urlsafe_b64encode
+ return to_text(func(to_bytes(string, encoding=encoding, errors='surrogate_or_strict')))
-def b64decode(string, encoding='utf-8'):
- return to_text(base64.b64decode(to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
+def b64decode(string, encoding='utf-8', urlsafe=False):
+ func = base64.b64decode
+ if urlsafe:
+ func = base64.urlsafe_b64decode
+ return to_text(func(to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
def flatten(mylist, levels=None, skip_nulls=True):
@@ -518,7 +558,7 @@ def subelements(obj, subelements, skip_missing=False):
elif isinstance(subelements, string_types):
subelement_list = subelements.split('.')
else:
- raise AnsibleFilterTypeError('subelements must be a list or a string')
+ raise AnsibleTypeError('subelements must be a list or a string')
results = []
@@ -532,10 +572,10 @@ def subelements(obj, subelements, skip_missing=False):
values = []
break
raise AnsibleFilterError("could not find %r key in iterated item %r" % (subelement, values))
- except TypeError:
- raise AnsibleFilterTypeError("the key %s should point to a dictionary, got '%s'" % (subelement, values))
+ except TypeError as ex:
+ raise AnsibleTypeError("the key %s should point to a dictionary, got '%s'" % (subelement, values)) from ex
if not isinstance(values, list):
- raise AnsibleFilterTypeError("the key %r should point to a list, got %r" % (subelement, values))
+ raise AnsibleTypeError("the key %r should point to a list, got %r" % (subelement, values))
for value in values:
results.append((element, value))
@@ -548,7 +588,7 @@ def dict_to_list_of_dict_key_value_elements(mydict, key_name='key', value_name='
with each having a 'key' and 'value' keys that correspond to the keys and values of the original """
if not isinstance(mydict, Mapping):
- raise AnsibleFilterTypeError("dict2items requires a dictionary, got %s instead." % type(mydict))
+ raise AnsibleTypeError("dict2items requires a dictionary, got %s instead." % type(mydict))
ret = []
for key in mydict:
@@ -561,17 +601,17 @@ def list_of_dict_key_value_elements_to_dict(mylist, key_name='key', value_name='
effectively as the reverse of dict2items """
if not is_sequence(mylist):
- raise AnsibleFilterTypeError("items2dict requires a list, got %s instead." % type(mylist))
+ raise AnsibleTypeError("items2dict requires a list, got %s instead." % type(mylist))
try:
return dict((item[key_name], item[value_name]) for item in mylist)
except KeyError:
- raise AnsibleFilterTypeError(
+ raise AnsibleTypeError(
"items2dict requires each dictionary in the list to contain the keys '%s' and '%s', got %s instead."
% (key_name, value_name, mylist)
)
except TypeError:
- raise AnsibleFilterTypeError("items2dict requires a list of dictionaries, got %s instead." % mylist)
+ raise AnsibleTypeError("items2dict requires a list of dictionaries, got %s instead." % mylist)
def path_join(paths):
@@ -581,7 +621,7 @@ def path_join(paths):
return os.path.join(paths)
if is_sequence(paths):
return os.path.join(*paths)
- raise AnsibleFilterTypeError("|path_join expects string or sequence, got %s instead." % type(paths))
+ raise AnsibleTypeError("|path_join expects string or sequence, got %s instead." % type(paths))
def commonpath(paths):
@@ -594,11 +634,90 @@ def commonpath(paths):
:rtype: str
"""
if not is_sequence(paths):
- raise AnsibleFilterTypeError("|commonpath expects sequence, got %s instead." % type(paths))
+ raise AnsibleTypeError("|commonpath expects sequence, got %s instead." % type(paths))
return os.path.commonpath(paths)
+class GroupTuple(t.NamedTuple):
+ """
+ Custom named tuple for the groupby filter with a public interface; silently ignored by unknown type checks.
+ This matches the internal implementation of the _GroupTuple returned by Jinja's built-in groupby filter.
+ """
+
+ grouper: t.Any
+ list: list[t.Any]
+
+ def __repr__(self) -> str:
+ return tuple.__repr__(self)
+
+
+_lazy_containers.register_known_types(GroupTuple)
+
+
+@pass_environment
+def _cleansed_groupby(*args, **kwargs):
+ res = sync_do_groupby(*args, **kwargs)
+ res = [GroupTuple(grouper=g.grouper, list=g.list) for g in res]
+
+ return res
+
+# DTFIX-FUTURE: make these dumb wrappers more dynamic
+
+
+@accept_args_markers
+def ansible_default(
+ value: t.Any,
+ default_value: t.Any = '',
+ boolean: bool = False,
+) -> t.Any:
+ """Updated `default` filter that only coalesces classic undefined objects; other Undefined-derived types (eg, ErrorMarker) pass through."""
+ validate_arg_type('boolean', boolean, bool)
+
+ if isinstance(value, UndefinedMarker):
+ return default_value
+
+ if boolean and not value:
+ return default_value
+
+ return value
+
+
+@accept_lazy_markers
+@functools.wraps(do_map)
+def wrapped_map(*args, **kwargs) -> t.Any:
+ return do_map(*args, **kwargs)
+
+
+@accept_lazy_markers
+@functools.wraps(do_select)
+def wrapped_select(*args, **kwargs) -> t.Any:
+ return do_select(*args, **kwargs)
+
+
+@accept_lazy_markers
+@functools.wraps(do_selectattr)
+def wrapped_selectattr(*args, **kwargs) -> t.Any:
+ return do_selectattr(*args, **kwargs)
+
+
+@accept_lazy_markers
+@functools.wraps(do_reject)
+def wrapped_reject(*args, **kwargs) -> t.Any:
+ return do_reject(*args, **kwargs)
+
+
+@accept_lazy_markers
+@functools.wraps(do_rejectattr)
+def wrapped_rejectattr(*args, **kwargs) -> t.Any:
+ return do_rejectattr(*args, **kwargs)
+
+
+@accept_args_markers
+def type_debug(obj: object) -> str:
+ return native_type_name(obj)
+
+
class FilterModule(object):
""" Ansible core jinja2 filters """
@@ -614,7 +733,7 @@ class FilterModule(object):
# json
'to_json': to_json,
'to_nice_json': to_nice_json,
- 'from_json': json.loads,
+ 'from_json': from_json,
# yaml
'to_yaml': to_yaml,
@@ -681,7 +800,7 @@ class FilterModule(object):
'comment': comment,
# debug
- 'type_debug': lambda o: o.__class__.__name__,
+ 'type_debug': type_debug,
# Data structures
'combine': combine,
@@ -691,4 +810,15 @@ class FilterModule(object):
'items2dict': list_of_dict_key_value_elements_to_dict,
'subelements': subelements,
'split': partial(unicode_wrap, text_type.split),
+ # FDI038 - replace this with a standard type compat shim
+ 'groupby': _cleansed_groupby,
+
+ # Jinja builtins that need special arg handling
+ 'd': ansible_default, # replaces the implementation instead of wrapping it
+ 'default': ansible_default, # replaces the implementation instead of wrapping it
+ 'map': wrapped_map,
+ 'select': wrapped_select,
+ 'selectattr': wrapped_selectattr,
+ 'reject': wrapped_reject,
+ 'rejectattr': wrapped_rejectattr,
}
diff --git a/lib/ansible/plugins/filter/encryption.py b/lib/ansible/plugins/filter/encryption.py
index 580e07bea20..78c50422c1c 100644
--- a/lib/ansible/plugins/filter/encryption.py
+++ b/lib/ansible/plugins/filter/encryption.py
@@ -2,80 +2,88 @@
from __future__ import annotations
-from jinja2.runtime import Undefined
-from jinja2.exceptions import UndefinedError
-
-from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
+from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native, to_bytes
-from ansible.module_utils.six import string_types, binary_type
-from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
-from ansible.parsing.vault import is_encrypted, VaultSecret, VaultLib
+from ansible._internal._templating._jinja_common import VaultExceptionMarker
+from ansible._internal._datatag._tags import VaultedValue
+from ansible.parsing.vault import is_encrypted, VaultSecret, VaultLib, VaultHelper
+from ansible import template as _template
from ansible.utils.display import Display
display = Display()
def do_vault(data, secret, salt=None, vault_id='filter_default', wrap_object=False, vaultid=None):
+ if not isinstance(secret, (str, bytes)):
+ raise TypeError(f"Secret passed is required to be a string, instead we got {type(secret)}.")
- if not isinstance(secret, (string_types, binary_type, Undefined)):
- raise AnsibleFilterTypeError("Secret passed is required to be a string, instead we got: %s" % type(secret))
-
- if not isinstance(data, (string_types, binary_type, Undefined)):
- raise AnsibleFilterTypeError("Can only vault strings, instead we got: %s" % type(data))
+ if not isinstance(data, (str, bytes)):
+ raise TypeError(f"Can only vault strings, instead we got {type(data)}.")
if vaultid is not None:
- display.deprecated("Use of undocumented 'vaultid', use 'vault_id' instead", version='2.20')
+ display.deprecated(
+ msg="Use of undocumented `vaultid`.",
+ version="2.20",
+ help_text="Use `vault_id` instead.",
+ )
+
if vault_id == 'filter_default':
vault_id = vaultid
else:
display.warning("Ignoring vaultid as vault_id is already set.")
- vault = ''
vs = VaultSecret(to_bytes(secret))
vl = VaultLib()
try:
vault = vl.encrypt(to_bytes(data), vs, vault_id, salt)
- except UndefinedError:
- raise
- except Exception as e:
- raise AnsibleFilterError("Unable to encrypt: %s" % to_native(e), orig_exc=e)
+ except Exception as ex:
+ raise AnsibleError("Unable to encrypt.") from ex
if wrap_object:
- vault = AnsibleVaultEncryptedUnicode(vault)
+ vault = VaultedValue(ciphertext=str(vault)).tag(secret)
else:
vault = to_native(vault)
return vault
+@_template.accept_args_markers
def do_unvault(vault, secret, vault_id='filter_default', vaultid=None):
+ if isinstance(vault, VaultExceptionMarker):
+ vault = vault._disarm()
+
+ if (first_marker := _template.get_first_marker_arg((vault, secret, vault_id, vaultid), {})) is not None:
+ return first_marker
- if not isinstance(secret, (string_types, binary_type, Undefined)):
- raise AnsibleFilterTypeError("Secret passed is required to be as string, instead we got: %s" % type(secret))
+ if not isinstance(secret, (str, bytes)):
+ raise TypeError(f"Secret passed is required to be as string, instead we got {type(secret)}.")
- if not isinstance(vault, (string_types, binary_type, AnsibleVaultEncryptedUnicode, Undefined)):
- raise AnsibleFilterTypeError("Vault should be in the form of a string, instead we got: %s" % type(vault))
+ if not isinstance(vault, (str, bytes)):
+ raise TypeError(f"Vault should be in the form of a string, instead we got {type(vault)}.")
if vaultid is not None:
- display.deprecated("Use of undocumented 'vaultid', use 'vault_id' instead", version='2.20')
+ display.deprecated(
+ msg="Use of undocumented `vaultid`.",
+ version="2.20",
+ help_text="Use `vault_id` instead.",
+ )
+
if vault_id == 'filter_default':
vault_id = vaultid
else:
display.warning("Ignoring vaultid as vault_id is already set.")
- data = ''
vs = VaultSecret(to_bytes(secret))
vl = VaultLib([(vault_id, vs)])
- if isinstance(vault, AnsibleVaultEncryptedUnicode):
- vault.vault = vl
- data = vault.data
- elif is_encrypted(vault):
+
+ if ciphertext := VaultHelper.get_ciphertext(vault, with_tags=True):
+ vault = ciphertext
+
+ if is_encrypted(vault):
try:
data = vl.decrypt(vault)
- except UndefinedError:
- raise
- except Exception as e:
- raise AnsibleFilterError("Unable to decrypt: %s" % to_native(e), orig_exc=e)
+ except Exception as ex:
+ raise AnsibleError("Unable to decrypt.") from ex
else:
data = vault
diff --git a/lib/ansible/plugins/filter/flatten.yml b/lib/ansible/plugins/filter/flatten.yml
index ae2d5eab9bf..540ca4a075d 100644
--- a/lib/ansible/plugins/filter/flatten.yml
+++ b/lib/ansible/plugins/filter/flatten.yml
@@ -7,8 +7,9 @@ DOCUMENTATION:
positional: _input, levels, skip_nulls
options:
_input:
- description: First dictionary to combine.
- type: dict
+ description: List to flatten.
+ type: list
+ elements: any
required: true
levels:
description: Number of recursive list depths to flatten.
diff --git a/lib/ansible/plugins/filter/human_to_bytes.yml b/lib/ansible/plugins/filter/human_to_bytes.yml
index 8932aaef9d6..f643cda54c1 100644
--- a/lib/ansible/plugins/filter/human_to_bytes.yml
+++ b/lib/ansible/plugins/filter/human_to_bytes.yml
@@ -8,7 +8,7 @@ DOCUMENTATION:
options:
_input:
description: human-readable description of a number of bytes.
- type: int
+ type: string
required: true
default_unit:
description: Unit to assume when input does not specify it.
@@ -31,7 +31,7 @@ EXAMPLES: |
# size => 2684354560
size: '{{ "2.5 gigabyte" | human_to_bytes }}'
- # size => 1234803098
+ # size => 1073741824
size: '{{ "1 Gigabyte" | human_to_bytes }}'
# this is an error, because gigggabyte is not a valid unit
diff --git a/lib/ansible/plugins/filter/mathstuff.py b/lib/ansible/plugins/filter/mathstuff.py
index d80eb3347c1..a9247a2c984 100644
--- a/lib/ansible/plugins/filter/mathstuff.py
+++ b/lib/ansible/plugins/filter/mathstuff.py
@@ -27,10 +27,9 @@ from collections.abc import Mapping, Iterable
from jinja2.filters import pass_environment
-from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
+from ansible.errors import AnsibleError
from ansible.module_utils.common.text import formatters
from ansible.module_utils.six import binary_type, text_type
-from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.utils.display import Display
try:
@@ -48,10 +47,11 @@ display = Display()
# explicitly set and cannot be handle (by Jinja2 w/o 'unique' or fallback version)
def unique(environment, a, case_sensitive=None, attribute=None):
- def _do_fail(e):
+ def _do_fail(ex):
if case_sensitive is False or attribute:
- raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
- "as it does not support the parameters supplied", orig_exc=e)
+ raise AnsibleError(
+ "Jinja2's unique filter failed and we cannot fall back to Ansible's version as it does not support the parameters supplied."
+ ) from ex
error = e = None
try:
@@ -63,14 +63,14 @@ def unique(environment, a, case_sensitive=None, attribute=None):
except Exception as e:
error = e
_do_fail(e)
- display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
+ display.error_as_warning('Falling back to Ansible unique filter as Jinja2 one failed.', e)
if not HAS_UNIQUE or error:
# handle Jinja2 specific attributes when using Ansible's version
if case_sensitive is False or attribute:
- raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive=False nor attribute parameters, "
- "you need a newer version of Jinja2 that provides their version of the filter.")
+ raise AnsibleError("Ansible's unique filter does not support case_sensitive=False nor attribute parameters, "
+ "you need a newer version of Jinja2 that provides their version of the filter.")
c = []
for x in a:
@@ -123,15 +123,15 @@ def logarithm(x, base=math.e):
return math.log10(x)
else:
return math.log(x, base)
- except TypeError as e:
- raise AnsibleFilterTypeError('log() can only be used on numbers: %s' % to_native(e))
+ except TypeError as ex:
+ raise AnsibleError('log() can only be used on numbers') from ex
def power(x, y):
try:
return math.pow(x, y)
- except TypeError as e:
- raise AnsibleFilterTypeError('pow() can only be used on numbers: %s' % to_native(e))
+ except TypeError as ex:
+ raise AnsibleError('pow() can only be used on numbers') from ex
def inversepower(x, base=2):
@@ -140,28 +140,28 @@ def inversepower(x, base=2):
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
- except (ValueError, TypeError) as e:
- raise AnsibleFilterTypeError('root() can only be used on numbers: %s' % to_native(e))
+ except (ValueError, TypeError) as ex:
+ raise AnsibleError('root() can only be used on numbers') from ex
def human_readable(size, isbits=False, unit=None):
""" Return a human-readable string """
try:
return formatters.bytes_to_human(size, isbits, unit)
- except TypeError as e:
- raise AnsibleFilterTypeError("human_readable() failed on bad input: %s" % to_native(e))
- except Exception:
- raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
+ except TypeError as ex:
+ raise AnsibleError("human_readable() failed on bad input") from ex
+ except Exception as ex:
+ raise AnsibleError("human_readable() can't interpret the input") from ex
def human_to_bytes(size, default_unit=None, isbits=False):
""" Return bytes count from a human-readable string """
try:
return formatters.human_to_bytes(size, default_unit, isbits)
- except TypeError as e:
- raise AnsibleFilterTypeError("human_to_bytes() failed on bad input: %s" % to_native(e))
- except Exception:
- raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
+ except TypeError as ex:
+ raise AnsibleError("human_to_bytes() failed on bad input") from ex
+ except Exception as ex:
+ raise AnsibleError("human_to_bytes() can't interpret the input") from ex
def rekey_on_member(data, key, duplicates='error'):
@@ -174,38 +174,31 @@ def rekey_on_member(data, key, duplicates='error'):
value would be duplicated or to overwrite previous entries if that's the case.
"""
if duplicates not in ('error', 'overwrite'):
- raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
+ raise AnsibleError(f"duplicates parameter to rekey_on_member has unknown value {duplicates!r}")
new_obj = {}
- # Ensure the positional args are defined - raise jinja2.exceptions.UndefinedError if not
- bool(data) and bool(key)
-
if isinstance(data, Mapping):
iterate_over = data.values()
elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
iterate_over = data
else:
- raise AnsibleFilterTypeError("Type is not a valid list, set, or dict")
+ raise AnsibleError("Type is not a valid list, set, or dict")
for item in iterate_over:
if not isinstance(item, Mapping):
- raise AnsibleFilterTypeError("List item is not a valid dict")
+ raise AnsibleError("List item is not a valid dict")
try:
key_elem = item[key]
except KeyError:
- raise AnsibleFilterError("Key {0} was not found".format(key))
- except TypeError as e:
- raise AnsibleFilterTypeError(to_native(e))
- except Exception as e:
- raise AnsibleFilterError(to_native(e))
+ raise AnsibleError(f"Key {key!r} was not found.", obj=item) from None
# Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
# minimum contain {key: key_elem}
if new_obj.get(key_elem, None):
if duplicates == 'error':
- raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
+ raise AnsibleError(f"Key {key_elem!r} is not unique, cannot convert to dict.")
elif duplicates == 'overwrite':
new_obj[key_elem] = item
else:
diff --git a/lib/ansible/plugins/filter/password_hash.yml b/lib/ansible/plugins/filter/password_hash.yml
index 5776cebfc5d..290f8bd9def 100644
--- a/lib/ansible/plugins/filter/password_hash.yml
+++ b/lib/ansible/plugins/filter/password_hash.yml
@@ -32,6 +32,14 @@ EXAMPLES: |
# pwdhash => "$6$/bQCntzQ7VrgVcFa$VaMkmevkY1dqrx8neaenUDlVU.6L/.ojRbrnI4ID.yBHU6XON1cB422scCiXfUL5wRucMdLgJU0Fn38uoeBni/"
pwdhash: "{{ 'testing' | password_hash }}"
+ # Using hash type
+ # wireguard_admin_password_hash => "$2b$12$ujYVRD9v9z87lpvLqeWNuOFDI4QzSSYHoRyYydW6XK4.kgqfwOXzO"
+ wireguard_admin_password_hash: "{{ 'vagrant-libvirt' | password_hash(hashtype='bcrypt') }}"
+
+ # Using salt value for idempotency
+ # wireguard_admin_password_hash => "$2b$12$abcdefghijklmnopqrstuuTEw8POU2MwwuYEM7WaKcjqZ948Hm7.W"
+ wireguard_admin_password_hash: "{{ 'vagrant-libvirt' | password_hash(hashtype='bcrypt', salt='abcdefghijklmnopqrstuv') }}"
+
RETURN:
_value:
description: The resulting password hash.
diff --git a/lib/ansible/plugins/filter/pow.yml b/lib/ansible/plugins/filter/pow.yml
index da2fa42739c..a46891c3029 100644
--- a/lib/ansible/plugins/filter/pow.yml
+++ b/lib/ansible/plugins/filter/pow.yml
@@ -3,7 +3,7 @@ DOCUMENTATION:
version_added: "1.9"
short_description: power of (math operation)
description:
- - Math operation that returns the Nth power of inputed number, C(X ^ N).
+ - Math operation that returns the Nth power of inputted number, C(X ^ N).
notes:
- This is a passthrough to Python's C(math.pow).
positional: _input, _power
diff --git a/lib/ansible/plugins/filter/regex_search.yml b/lib/ansible/plugins/filter/regex_search.yml
index e9ac11d9496..16a06b8076f 100644
--- a/lib/ansible/plugins/filter/regex_search.yml
+++ b/lib/ansible/plugins/filter/regex_search.yml
@@ -52,5 +52,5 @@ EXAMPLES: |
RETURN:
_value:
- description: Matched string or empty string if no match.
+ description: Matched string or C(None) if no match.
type: str
diff --git a/lib/ansible/plugins/filter/root.yml b/lib/ansible/plugins/filter/root.yml
index 263586b436d..f3ec74bead2 100644
--- a/lib/ansible/plugins/filter/root.yml
+++ b/lib/ansible/plugins/filter/root.yml
@@ -3,7 +3,7 @@ DOCUMENTATION:
version_added: "1.9"
short_description: root of (math operation)
description:
- - Math operation that returns the Nth root of inputed number C(X ^^ N).
+ - Math operation that returns the Nth root of inputted number C(X ^^ N).
positional: _input, base
options:
_input:
diff --git a/lib/ansible/plugins/filter/strftime.yml b/lib/ansible/plugins/filter/strftime.yml
index fffa6d447d1..d51e5817dc9 100644
--- a/lib/ansible/plugins/filter/strftime.yml
+++ b/lib/ansible/plugins/filter/strftime.yml
@@ -1,16 +1,16 @@
DOCUMENTATION:
name: strftime
version_added: "2.4"
- short_description: date formating
+ short_description: date formatting
description:
- - Using Python's C(strftime) function, take a data formating string and a date/time to create a formatted date.
+ - Using Python's C(strftime) function, take a data formatting string and a date/time to create a formatted date.
notes:
- This is a passthrough to Python's C(stftime), for a complete set of formatting options go to https://strftime.org/.
positional: _input, second, utc
options:
_input:
description:
- - A formating string following C(stftime) conventions.
+ - A formatting string following C(stftime) conventions.
- See L(the Python documentation, https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior) for a reference.
type: str
required: true
diff --git a/lib/ansible/plugins/filter/to_json.yml b/lib/ansible/plugins/filter/to_json.yml
index 003e5a19eb9..55b7607cab6 100644
--- a/lib/ansible/plugins/filter/to_json.yml
+++ b/lib/ansible/plugins/filter/to_json.yml
@@ -23,8 +23,9 @@ DOCUMENTATION:
default: True
version_added: '2.9'
allow_nan:
- description: When V(False), strict adherence to float value limits of the JSON specifications, so C(nan), C(inf) and C(-inf) values will produce errors.
- When V(True), JavaScript equivalents will be used (C(NaN), C(Infinity), C(-Infinity)).
+ description:
+ - When V(False), out-of-range float values C(nan), C(inf) and C(-inf) will result in an error.
+ - When V(True), out-of-range float values will be represented using their JavaScript equivalents, C(NaN), C(Infinity) and C(-Infinity).
default: True
type: bool
check_circular:
@@ -42,8 +43,11 @@ DOCUMENTATION:
separators:
description: The C(item) and C(key) separator to be used in the serialized output,
default may change depending on O(indent) and Python version.
- default: "(', ', ': ')"
- type: tuple
+ default:
+ - ', '
+ - ': '
+ type: list
+ elements: str
skipkeys:
description: If V(True), keys that are not basic Python types will be skipped.
default: False
diff --git a/lib/ansible/plugins/filter/to_nice_json.yml b/lib/ansible/plugins/filter/to_nice_json.yml
index abaeee0c071..2c87f3e6d67 100644
--- a/lib/ansible/plugins/filter/to_nice_json.yml
+++ b/lib/ansible/plugins/filter/to_nice_json.yml
@@ -23,8 +23,9 @@ DOCUMENTATION:
default: True
version_added: '2.9'
allow_nan:
- description: When V(False), strict adherence to float value limits of the JSON specification, so C(nan), C(inf) and C(-inf) values will produce errors.
- When V(True), JavaScript equivalents will be used (C(NaN), C(Infinity), C(-Infinity)).
+ description:
+ - When V(False), out-of-range float values C(nan), C(inf) and C(-inf) will result in an error.
+ - When V(True), out-of-range float values will be represented using their JavaScript equivalents, C(NaN), C(Infinity) and C(-Infinity).
default: True
type: bool
check_circular:
diff --git a/lib/ansible/plugins/filter/to_nice_yaml.yml b/lib/ansible/plugins/filter/to_nice_yaml.yml
index faf4c837928..85e512198a6 100644
--- a/lib/ansible/plugins/filter/to_nice_yaml.yml
+++ b/lib/ansible/plugins/filter/to_nice_yaml.yml
@@ -1,5 +1,5 @@
DOCUMENTATION:
- name: to_yaml
+ name: to_nice_yaml
author: core team
version_added: 'historical'
short_description: Convert variable to YAML string
@@ -20,14 +20,38 @@ DOCUMENTATION:
description: Affects sorting of dictionary keys.
default: True
type: bool
- #allow_unicode:
- # description:
- # type: bool
- # default: true
- #default_style=None, canonical=None, width=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None
+ default_style:
+ description:
+ - Indicates the style of the scalar.
+ choices:
+ - ''
+ - "'"
+ - '"'
+ - '|'
+ - '>'
+ type: string
+ canonical:
+ description:
+ - If set to V(True), export tag type to the output.
+ type: bool
+ width:
+ description: Set the preferred line width.
+ type: int
+ line_break:
+ description: Specify the line break.
+ type: string
+ encoding:
+ description: Specify the output encoding.
+ type: string
+ explicit_start:
+ description: If set to V(True), adds an explicit start using "---".
+ type: bool
+ explicit_end:
+ description: If set to V(True), adds an explicit end using "...".
+ type: bool
notes:
- More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details.
- - 'These parameters to C(yaml.dump) will be ignored, as they are overridden internally: I(default_flow_style)'
+ - 'These parameters to C(yaml.dump) will be ignored, as they are overridden internally: I(default_flow_style), I(allow_unicode).'
EXAMPLES: |
# dump variable in a template to create a YAML document
diff --git a/lib/ansible/plugins/filter/to_uuid.yml b/lib/ansible/plugins/filter/to_uuid.yml
index 50824779dad..a6523423a8b 100644
--- a/lib/ansible/plugins/filter/to_uuid.yml
+++ b/lib/ansible/plugins/filter/to_uuid.yml
@@ -7,7 +7,7 @@ DOCUMENTATION:
positional: _input, namespace
options:
_input:
- description: String to use as base fo the UUID.
+ description: String to use as base of the UUID.
type: str
required: true
namespace:
diff --git a/lib/ansible/plugins/filter/to_yaml.yml b/lib/ansible/plugins/filter/to_yaml.yml
index 224cf129f31..40086b1d8a5 100644
--- a/lib/ansible/plugins/filter/to_yaml.yml
+++ b/lib/ansible/plugins/filter/to_yaml.yml
@@ -20,25 +20,38 @@ DOCUMENTATION:
description: Affects sorting of dictionary keys.
default: True
type: bool
+ default_style:
+ description:
+ - Indicates the style of the scalar.
+ choices:
+ - ''
+ - "'"
+ - '"'
+ - '|'
+ - '>'
+ type: string
+ canonical:
+ description:
+ - If set to V(True), export tag type to the output.
+ type: bool
+ width:
+ description: Set the preferred line width.
+ type: integer
+ line_break:
+ description: Specify the line break.
+ type: string
+ encoding:
+ description: Specify the output encoding.
+ type: string
+ explicit_start:
+ description: If set to V(True), adds an explicit start using "---".
+ type: bool
+ explicit_end:
+ description: If set to V(True), adds an explicit end using "...".
+ type: bool
notes:
- More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details.
- # TODO: find docs for these
- #allow_unicode:
- # description:
- # type: bool
- # default: true
- #default_flow_style
- #default_style
- #canonical=None,
- #width=None,
- #line_break=None,
- #encoding=None,
- #explicit_start=None,
- #explicit_end=None,
- #version=None,
- #tags=None
-
EXAMPLES: |
# dump variable in a template to create a YAML document
{{ github_workflow | to_yaml }}
diff --git a/lib/ansible/plugins/filter/unvault.yml b/lib/ansible/plugins/filter/unvault.yml
index 82747a6fce3..3512fb08692 100644
--- a/lib/ansible/plugins/filter/unvault.yml
+++ b/lib/ansible/plugins/filter/unvault.yml
@@ -8,7 +8,7 @@ DOCUMENTATION:
positional: secret
options:
_input:
- description: Vault string, or an C(AnsibleVaultEncryptedUnicode) string object.
+ description: Vault string.
type: string
required: true
secret:
diff --git a/lib/ansible/plugins/filter/urlsplit.py b/lib/ansible/plugins/filter/urlsplit.py
index 3b1d35f6b59..8f777953a63 100644
--- a/lib/ansible/plugins/filter/urlsplit.py
+++ b/lib/ansible/plugins/filter/urlsplit.py
@@ -58,7 +58,6 @@ RETURN = r"""
from urllib.parse import urlsplit
-from ansible.errors import AnsibleFilterError
from ansible.utils import helpers
@@ -70,7 +69,7 @@ def split_url(value, query='', alias='urlsplit'):
# If no option is supplied, return the entire dictionary.
if query:
if query not in results:
- raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
+ raise ValueError(alias + ': unknown URL component: %s' % query)
return results[query]
else:
return results
diff --git a/lib/ansible/plugins/filter/vault.yml b/lib/ansible/plugins/filter/vault.yml
index 8e3437183f4..43e2801cf70 100644
--- a/lib/ansible/plugins/filter/vault.yml
+++ b/lib/ansible/plugins/filter/vault.yml
@@ -26,23 +26,28 @@ DOCUMENTATION:
default: 'filter_default'
wrap_object:
description:
- - This toggle can force the return of an C(AnsibleVaultEncryptedUnicode) string object, when V(False), you get a simple string.
+ - This toggle can force the return of a C(VaultedValue)-tagged string object, when V(False), you get a simple string.
- Mostly useful when combining with the C(to_yaml) filter to output the 'inline vault' format.
type: bool
default: False
EXAMPLES: |
- # simply encrypt my key in a vault
+ # Encrypt a value using the vault filter
vars:
- myvaultedkey: "{{ keyrawdata|vault(passphrase) }} "
+ myvaultedkey: "{{ 'my_secret_key' | vault('my_vault_password') }}"
- - name: save templated vaulted data
- template: src=dump_template_data.j2 dest=/some/key/vault.txt
- vars:
- mysalt: '{{2**256|random(seed=inventory_hostname)}}'
- template_data: '{{ secretdata|vault(vaultsecret, salt=mysalt) }}'
+ # Encrypt a value and save it to a file using the template module
+ vars:
+ template_data: "{{ 'my_sensitive_data' | vault('another_vault_password', salt=(2**256 | random(seed=inventory_hostname))) }}"
+
+ # The content of dump_template_data.j2 looks like
+ # Encrypted secret: {{ template_data }}
+ - name: Save vaulted data
+ template:
+ src: dump_template_data.j2
+ dest: /some/key/vault.txt
RETURN:
_value:
- description: The vault string that contains the secret data (or C(AnsibleVaultEncryptedUnicode) string object).
+ description: The vault string that contains the secret data (or C(VaultedValue)-tagged string object).
type: string
diff --git a/lib/ansible/plugins/filter/win_basename.yml b/lib/ansible/plugins/filter/win_basename.yml
index f89baa5a27d..3bf4c5621cf 100644
--- a/lib/ansible/plugins/filter/win_basename.yml
+++ b/lib/ansible/plugins/filter/win_basename.yml
@@ -5,6 +5,7 @@ DOCUMENTATION:
short_description: Get a Windows path's base name
description:
- Returns the last name component of a Windows path, what is left in the string that is not 'win_dirname'.
+ - While specifying an UNC (Universal Naming Convention) path, please make sure the path conforms to the UNC path syntax.
options:
_input:
description: A Windows path.
@@ -16,7 +17,11 @@ DOCUMENTATION:
EXAMPLES: |
# To get the last name of a file Windows path, like 'foo.txt' out of 'C:\Users\asdf\foo.txt'
- {{ mypath | win_basename }}
+ filename: "{{ mypath | win_basename }}"
+
+ # Get basename from the UNC path in the form of '\\\\'
+ # like '\\server1\test\foo.txt' returns 'foo.txt'
+ filename: "{{ mypath | win_basename }}"
RETURN:
_value:
diff --git a/lib/ansible/plugins/filter/win_dirname.yml b/lib/ansible/plugins/filter/win_dirname.yml
index dbc85c7716c..5a2e3a72c3c 100644
--- a/lib/ansible/plugins/filter/win_dirname.yml
+++ b/lib/ansible/plugins/filter/win_dirname.yml
@@ -5,6 +5,7 @@ DOCUMENTATION:
short_description: Get a Windows path's directory
description:
- Returns the directory component of a Windows path, what is left in the string that is not 'win_basename'.
+ - While specifying an UNC (Universal Naming Convention) path, please make sure the path conforms to the UNC path syntax.
options:
_input:
description: A Windows path.
@@ -18,6 +19,10 @@ EXAMPLES: |
# To get the last name of a file Windows path, like 'C:\users\asdf' out of 'C:\Users\asdf\foo.txt'
{{ mypath | win_dirname }}
+ # Get dirname from the UNC path in the form of '\\\\'
+ # like '\\server1\test\foo.txt' returns '\\\\server1\\test\\'
+ filename: "{{ mypath | win_dirname }}"
+
RETURN:
_value:
description: The directory from the Windows path provided.
diff --git a/lib/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py
index 324234cb7ec..348e8dc8834 100644
--- a/lib/ansible/plugins/inventory/__init__.py
+++ b/lib/ansible/plugins/inventory/__init__.py
@@ -17,24 +17,30 @@
from __future__ import annotations
+import functools
import hashlib
import os
import string
+import typing as t
from collections.abc import Mapping
-from ansible.errors import AnsibleError, AnsibleParserError
+from ansible import template as _template
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleValueOmittedError
from ansible.inventory.group import to_safe_group_name as original_safe
+from ansible.module_utils._internal import _plugin_info
from ansible.parsing.utils.addresses import parse_address
-from ansible.plugins import AnsiblePlugin
-from ansible.plugins.cache import CachePluginAdjudicator as CacheObject
+from ansible.parsing.dataloader import DataLoader
+from ansible.plugins import AnsiblePlugin, _ConfigurablePlugin
+from ansible.plugins.cache import CachePluginAdjudicator
from ansible.module_utils.common.text.converters import to_bytes, to_native
-from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
-from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars, load_extra_vars
+if t.TYPE_CHECKING:
+ from ansible.inventory.data import InventoryData
+
display = Display()
@@ -127,8 +133,11 @@ def expand_hostname_range(line=None):
def get_cache_plugin(plugin_name, **kwargs):
+ if not plugin_name:
+ raise AnsibleError("A cache plugin must be configured to use inventory caching.")
+
try:
- cache = CacheObject(plugin_name, **kwargs)
+ cache = CachePluginAdjudicator(plugin_name, **kwargs)
except AnsibleError as e:
if 'fact_caching_connection' in to_native(e):
raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set "
@@ -136,17 +145,22 @@ def get_cache_plugin(plugin_name, **kwargs):
"[inventory]: cache_connection;\nEnvironment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\n"
"ANSIBLE_CACHE_PLUGIN_CONNECTION." % plugin_name)
else:
- raise e
+ raise
- if plugin_name != 'memory' and kwargs and not getattr(cache._plugin, '_options', None):
+ if cache._plugin.ansible_name != 'ansible.builtin.memory' and kwargs and not getattr(cache._plugin, '_options', None):
raise AnsibleError('Unable to use cache plugin {0} for inventory. Cache options were provided but may not reconcile '
'correctly unless set via set_options. Refer to the porting guide if the plugin derives user settings '
'from ansible.constants.'.format(plugin_name))
return cache
-class BaseInventoryPlugin(AnsiblePlugin):
- """ Parses an Inventory Source"""
+class _BaseInventoryPlugin(AnsiblePlugin):
+ """
+ Internal base implementation for inventory plugins.
+
+ Do not inherit from this directly, use one of its public subclasses instead.
+ Used to introduce an extra layer in the class hierarchy to allow Constructed to subclass this while remaining a mixin for existing inventory plugins.
+ """
TYPE = 'generator'
@@ -156,16 +170,26 @@ class BaseInventoryPlugin(AnsiblePlugin):
# it by default.
_sanitize_group_name = staticmethod(to_safe_group_name)
- def __init__(self):
+ def __init__(self) -> None:
- super(BaseInventoryPlugin, self).__init__()
+ super().__init__()
self._options = {}
- self.inventory = None
self.display = display
- self._vars = {}
- def parse(self, inventory, loader, path, cache=True):
+ # These attributes are set by the parse() method on this (base) class.
+ self.loader: DataLoader | None = None
+ self.inventory: InventoryData | None = None
+ self._vars: dict[str, t.Any] | None = None
+
+ trusted_by_default: bool = False
+ """Inventory plugins that only source templates from trusted sources can set this True to have trust automatically applied to all templates."""
+
+ @functools.cached_property
+ def templar(self) -> _template.Templar:
+ return _template.Templar(loader=self.loader)
+
+ def parse(self, inventory: InventoryData, loader: DataLoader, path: str, cache: bool = True) -> None:
""" Populates inventory from the given data. Raises an error on any parse failure
:arg inventory: a copy of the previously accumulated inventory data,
to be updated with any new data this plugin provides.
@@ -178,10 +202,8 @@ class BaseInventoryPlugin(AnsiblePlugin):
:arg cache: a boolean that indicates if the plugin should use the cache or not
you can ignore if this plugin does not implement caching.
"""
-
self.loader = loader
self.inventory = inventory
- self.templar = Templar(loader=loader)
self._vars = load_extra_vars(loader)
def verify_file(self, path):
@@ -214,11 +236,10 @@ class BaseInventoryPlugin(AnsiblePlugin):
:arg path: path to common yaml format config file for this plugin
"""
- config = {}
try:
# avoid loader cache so meta: refresh_inventory can pick up config changes
# if we read more than once, fs cache should be good enough
- config = self.loader.load_from_file(path, cache='none')
+ config = self.loader.load_from_file(path, cache='none', trusted_as_template=True)
except Exception as e:
raise AnsibleParserError(to_native(e))
@@ -279,7 +300,11 @@ class BaseInventoryPlugin(AnsiblePlugin):
return (hostnames, port)
-class BaseFileInventoryPlugin(BaseInventoryPlugin):
+class BaseInventoryPlugin(_BaseInventoryPlugin):
+ """ Parses an Inventory Source """
+
+
+class BaseFileInventoryPlugin(_BaseInventoryPlugin):
""" Parses a File based Inventory Source"""
TYPE = 'storage'
@@ -289,51 +314,43 @@ class BaseFileInventoryPlugin(BaseInventoryPlugin):
super(BaseFileInventoryPlugin, self).__init__()
-class Cacheable(object):
+class Cacheable(_plugin_info.HasPluginInfo, _ConfigurablePlugin):
+ """Mixin for inventory plugins which support caching."""
- _cache = CacheObject()
+ _cache: CachePluginAdjudicator
@property
- def cache(self):
+ def cache(self) -> CachePluginAdjudicator:
return self._cache
- def load_cache_plugin(self):
+ def load_cache_plugin(self) -> None:
plugin_name = self.get_option('cache_plugin')
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(plugin_name, **cache_options)
- def get_cache_key(self, path):
- return "{0}_{1}".format(self.NAME, self._get_cache_prefix(path))
-
- def _get_cache_prefix(self, path):
- """ create predictable unique prefix for plugin/inventory """
-
- m = hashlib.sha1()
- m.update(to_bytes(self.NAME, errors='surrogate_or_strict'))
- d1 = m.hexdigest()
+ def get_cache_key(self, path: str) -> str:
+ return f'{self.ansible_name}_{self._get_cache_prefix(path)}'
- n = hashlib.sha1()
- n.update(to_bytes(path, errors='surrogate_or_strict'))
- d2 = n.hexdigest()
+ def _get_cache_prefix(self, path: str) -> str:
+ """Return a predictable unique key based on the given path."""
+ return 'k' + hashlib.sha256(f'{self.ansible_name}{path}'.encode(), usedforsecurity=False).hexdigest()[:6]
- return 's_'.join([d1[:5], d2[:5]])
+ def clear_cache(self) -> None:
+ self._cache.clear()
- def clear_cache(self):
- self._cache.flush()
-
- def update_cache_if_changed(self):
+ def update_cache_if_changed(self) -> None:
self._cache.update_cache_if_changed()
- def set_cache_plugin(self):
+ def set_cache_plugin(self) -> None:
self._cache.set_cache()
-class Constructable(object):
-
- def _compose(self, template, variables, disable_lookups=True):
+class Constructable(_BaseInventoryPlugin):
+ def _compose(self, template, variables, disable_lookups=...):
""" helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars"""
- t = self.templar
+ if disable_lookups is not ...:
+ self.display.deprecated("The disable_lookups arg has no effect.", version="2.23")
try:
use_extra = self.get_option('use_extra_vars')
@@ -341,12 +358,11 @@ class Constructable(object):
use_extra = False
if use_extra:
- t.available_variables = combine_vars(variables, self._vars)
+ self.templar.available_variables = combine_vars(variables, self._vars)
else:
- t.available_variables = variables
+ self.templar.available_variables = variables
- return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string),
- disable_lookups=disable_lookups)
+ return self.templar.evaluate_expression(template)
def _set_composite_vars(self, compose, variables, host, strict=False):
""" loops over compose entries to create vars for hosts """
@@ -368,10 +384,10 @@ class Constructable(object):
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
self.templar.available_variables = variables
for group_name in groups:
- conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
+ conditional = groups[group_name]
group_name = self._sanitize_group_name(group_name)
try:
- result = boolean(self.templar.template(conditional))
+ result = self.templar.evaluate_conditional(conditional)
except Exception as e:
if strict:
raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
@@ -385,6 +401,8 @@ class Constructable(object):
def _add_host_to_keyed_groups(self, keys, variables, host, strict=False, fetch_hostvars=True):
""" helper to create groups for plugins based on variable values and add the corresponding hosts to it"""
+ should_default_value = (None, '')
+
if keys and isinstance(keys, list):
for keyed in keys:
if keyed and isinstance(keyed, dict):
@@ -401,36 +419,39 @@ class Constructable(object):
trailing_separator = keyed.get('trailing_separator')
if trailing_separator is not None and default_value_name is not None:
raise AnsibleParserError("parameters are mutually exclusive for keyed groups: default_value|trailing_separator")
- if key or (key == '' and default_value_name is not None):
+
+ use_default = key in should_default_value and default_value_name is not None
+ if key or use_default:
prefix = keyed.get('prefix', '')
sep = keyed.get('separator', '_')
raw_parent_name = keyed.get('parent_group', None)
- if raw_parent_name:
- try:
- raw_parent_name = self.templar.template(raw_parent_name)
- except AnsibleError as e:
- if strict:
- raise AnsibleParserError("Could not generate parent group %s for group %s: %s" % (raw_parent_name, key, to_native(e)))
- continue
+
+ try:
+ raw_parent_name = self.templar.template(raw_parent_name)
+ except AnsibleValueOmittedError:
+ raw_parent_name = None
+ except Exception as ex:
+ if strict:
+ raise AnsibleParserError(f'Could not generate parent group {raw_parent_name!r} for group {key!r}: {ex}') from ex
+
+ continue
new_raw_group_names = []
- if isinstance(key, string_types):
- # if key is empty, 'default_value' will be used as group name
- if key == '' and default_value_name is not None:
- new_raw_group_names.append(default_value_name)
- else:
- new_raw_group_names.append(key)
+ if use_default:
+ new_raw_group_names.append(default_value_name)
+ elif isinstance(key, string_types):
+ new_raw_group_names.append(key)
elif isinstance(key, list):
for name in key:
# if list item is empty, 'default_value' will be used as group name
- if name == '' and default_value_name is not None:
+ if name in should_default_value and default_value_name is not None:
new_raw_group_names.append(default_value_name)
else:
new_raw_group_names.append(name)
elif isinstance(key, Mapping):
for (gname, gval) in key.items():
bare_name = '%s%s%s' % (gname, sep, gval)
- if gval == '':
+ if gval in should_default_value:
# key's value is empty
if default_value_name is not None:
bare_name = '%s%s%s' % (gname, sep, default_value_name)
diff --git a/lib/ansible/plugins/inventory/advanced_host_list.py b/lib/ansible/plugins/inventory/advanced_host_list.py
index 7a9646ef9ac..7f03558d573 100644
--- a/lib/ansible/plugins/inventory/advanced_host_list.py
+++ b/lib/ansible/plugins/inventory/advanced_host_list.py
@@ -31,6 +31,8 @@ class InventoryModule(BaseInventoryPlugin):
NAME = 'advanced_host_list'
+ # advanced_host_list does not set vars, so needs no special trust assistance from the inventory API
+
def verify_file(self, host_list):
valid = False
diff --git a/lib/ansible/plugins/inventory/auto.py b/lib/ansible/plugins/inventory/auto.py
index 81f0352911a..9bfd10f7695 100644
--- a/lib/ansible/plugins/inventory/auto.py
+++ b/lib/ansible/plugins/inventory/auto.py
@@ -30,6 +30,8 @@ class InventoryModule(BaseInventoryPlugin):
NAME = 'auto'
+ # no need to set trusted_by_default, since the consumers of this value will always consult the real plugin substituted during our parse()
+
def verify_file(self, path):
if not path.endswith('.yml') and not path.endswith('.yaml'):
return False
@@ -55,6 +57,11 @@ class InventoryModule(BaseInventoryPlugin):
raise AnsibleParserError("inventory source '{0}' could not be verified by inventory plugin '{1}'".format(path, plugin_name))
self.display.v("Using inventory plugin '{0}' to process inventory source '{1}'".format(plugin._load_name, path))
+
+ # unfortunate magic to swap the real plugin type we're proxying here into the inventory data API wrapper, so the wrapper can make the right compat
+ # decisions based on the metadata the real plugin provides instead of our metadata
+ inventory._target_plugin = plugin
+
plugin.parse(inventory, loader, path, cache=cache)
try:
plugin.update_cache_if_changed()
diff --git a/lib/ansible/plugins/inventory/constructed.py b/lib/ansible/plugins/inventory/constructed.py
index ee2b9b4295c..6954e3aeab5 100644
--- a/lib/ansible/plugins/inventory/constructed.py
+++ b/lib/ansible/plugins/inventory/constructed.py
@@ -82,12 +82,11 @@ EXAMPLES = r"""
import os
from ansible import constants as C
-from ansible.errors import AnsibleParserError, AnsibleOptionsError
+from ansible.errors import AnsibleParserError
from ansible.inventory.helpers import get_group_vars
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
-from ansible.module_utils.common.text.converters import to_native
+from ansible.plugins.loader import cache_loader
from ansible.utils.vars import combine_vars
-from ansible.vars.fact_cache import FactCache
from ansible.vars.plugins import get_vars_from_inventory_sources
@@ -96,11 +95,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
NAME = 'constructed'
- def __init__(self):
-
- super(InventoryModule, self).__init__()
-
- self._cache = FactCache()
+ # implicit trust behavior is already added by the YAML parser invoked by the loader
def verify_file(self, path):
@@ -147,26 +142,28 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
sources = inventory.processed_sources
except AttributeError:
if self.get_option('use_vars_plugins'):
- raise AnsibleOptionsError("The option use_vars_plugins requires ansible >= 2.11.")
+ raise
strict = self.get_option('strict')
- fact_cache = FactCache()
+
+ cache = cache_loader.get(C.CACHE_PLUGIN)
+
try:
# Go over hosts (less var copies)
for host in inventory.hosts:
# get available variables to templar
hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
- if host in fact_cache: # adds facts if cache is active
- hostvars = combine_vars(hostvars, fact_cache[host])
+ if cache.contains(host): # adds facts if cache is active
+ hostvars = combine_vars(hostvars, cache.get(host))
# create composite vars
self._set_composite_vars(self.get_option('compose'), hostvars, host, strict=strict)
# refetch host vars in case new ones have been created above
hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
- if host in self._cache: # adds facts if cache is active
- hostvars = combine_vars(hostvars, self._cache[host])
+ if cache.contains(host): # adds facts if cache is active
+ hostvars = combine_vars(hostvars, cache.get(host))
# constructed groups based on conditionals
self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict, fetch_hostvars=False)
@@ -174,5 +171,5 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
# constructed groups based variable values
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict, fetch_hostvars=False)
- except Exception as e:
- raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)), orig_exc=e)
+ except Exception as ex:
+ raise AnsibleParserError(f"Failed to parse {path!r}.") from ex
diff --git a/lib/ansible/plugins/inventory/generator.py b/lib/ansible/plugins/inventory/generator.py
index 49c8550403f..ba2570db7d8 100644
--- a/lib/ansible/plugins/inventory/generator.py
+++ b/lib/ansible/plugins/inventory/generator.py
@@ -84,6 +84,8 @@ class InventoryModule(BaseInventoryPlugin):
NAME = 'generator'
+ # implicit trust behavior is already added by the YAML parser invoked by the loader
+
def __init__(self):
super(InventoryModule, self).__init__()
@@ -100,15 +102,18 @@ class InventoryModule(BaseInventoryPlugin):
return valid
def template(self, pattern, variables):
- self.templar.available_variables = variables
- return self.templar.do_template(pattern)
+ # Allow pass-through of data structures for templating later (if applicable).
+ # This limitation was part of the original plugin implementation and was updated to maintain feature parity with the new templating API.
+ if not isinstance(pattern, str):
+ return pattern
+
+ return self.templar.copy_with_new_env(available_variables=variables).template(pattern)
def add_parents(self, inventory, child, parents, template_vars):
for parent in parents:
- try:
- groupname = self.template(parent['name'], template_vars)
- except (AttributeError, ValueError):
- raise AnsibleParserError("Element %s has a parent with no name element" % child['name'])
+ groupname = self.template(parent.get('name'), template_vars)
+ if not groupname:
+ raise AnsibleParserError(f"Element {child} has a parent with no name.")
if groupname not in inventory.groups:
inventory.add_group(groupname)
group = inventory.groups[groupname]
diff --git a/lib/ansible/plugins/inventory/host_list.py b/lib/ansible/plugins/inventory/host_list.py
index 8cfe9e50aa8..9d4ae2f6fac 100644
--- a/lib/ansible/plugins/inventory/host_list.py
+++ b/lib/ansible/plugins/inventory/host_list.py
@@ -35,6 +35,8 @@ class InventoryModule(BaseInventoryPlugin):
NAME = 'host_list'
+ # host_list does not set vars, so needs no special trust assistance from the inventory API
+
def verify_file(self, host_list):
valid = False
diff --git a/lib/ansible/plugins/inventory/ini.py b/lib/ansible/plugins/inventory/ini.py
index cd961bcdb06..0c90a1b1e81 100644
--- a/lib/ansible/plugins/inventory/ini.py
+++ b/lib/ansible/plugins/inventory/ini.py
@@ -73,7 +73,9 @@ host4 # same host as above, but member of 2 groups, will inherit vars from both
"""
import ast
+import os
import re
+import typing as t
import warnings
from ansible.inventory.group import to_safe_group_name
@@ -81,6 +83,7 @@ from ansible.plugins.inventory import BaseFileInventoryPlugin
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible._internal._datatag._tags import Origin, TrustedAsTemplate
from ansible.utils.shlex import shlex_split
@@ -93,18 +96,22 @@ class InventoryModule(BaseFileInventoryPlugin):
_COMMENT_MARKERS = frozenset((u';', u'#'))
b_COMMENT_MARKERS = frozenset((b';', b'#'))
- def __init__(self):
+ # template trust is applied internally to strings
+
+ def __init__(self) -> None:
super(InventoryModule, self).__init__()
- self.patterns = {}
- self._filename = None
+ self.patterns: dict[str, re.Pattern] = {}
+ self._origin: Origin | None = None
- def parse(self, inventory, loader, path, cache=True):
+ def verify_file(self, path):
+ # hardcode exclusion for TOML to prevent partial parsing of things we know we don't want
+ return super().verify_file(path) and os.path.splitext(path)[1] != '.toml'
- super(InventoryModule, self).parse(inventory, loader, path)
+ def parse(self, inventory, loader, path: str, cache=True):
- self._filename = path
+ super(InventoryModule, self).parse(inventory, loader, path)
try:
# Read in the hosts, groups, and variables defined in the inventory file.
@@ -132,14 +139,20 @@ class InventoryModule(BaseFileInventoryPlugin):
# Non-comment lines still have to be valid uf-8
data.append(to_text(line, errors='surrogate_or_strict'))
- self._parse(path, data)
- except Exception as e:
- raise AnsibleParserError(e)
+ self._origin = Origin(path=path, line_num=0)
+
+ try:
+ self._parse(data)
+ finally:
+ self._origin = self._origin.replace(line_num=None)
+
+ except Exception as ex:
+ raise AnsibleParserError('Failed to parse inventory.', obj=self._origin) from ex
def _raise_error(self, message):
- raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
+ raise AnsibleError(message)
- def _parse(self, path, lines):
+ def _parse(self, lines):
"""
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
@@ -155,9 +168,8 @@ class InventoryModule(BaseFileInventoryPlugin):
pending_declarations = {}
groupname = 'ungrouped'
state = 'hosts'
- self.lineno = 0
for line in lines:
- self.lineno += 1
+ self._origin = self._origin.replace(line_num=self._origin.line_num + 1)
line = line.strip()
# Skip empty lines and comments
@@ -189,7 +201,7 @@ class InventoryModule(BaseFileInventoryPlugin):
# declarations will take the appropriate action for a pending child group instead of
# incorrectly handling it as a var state pending declaration
if state == 'vars' and groupname not in pending_declarations:
- pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
+ pending_declarations[groupname] = dict(line=self._origin.line_num, state=state, name=groupname)
self.inventory.add_group(groupname)
@@ -229,7 +241,7 @@ class InventoryModule(BaseFileInventoryPlugin):
child = self._parse_group_name(line)
if child not in self.inventory.groups:
if child not in pending_declarations:
- pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
+ pending_declarations[child] = dict(line=self._origin.line_num, state=state, name=child, parents=[groupname])
else:
pending_declarations[child]['parents'].append(groupname)
else:
@@ -242,10 +254,11 @@ class InventoryModule(BaseFileInventoryPlugin):
# We report only the first such error here.
for g in pending_declarations:
decl = pending_declarations[g]
+ self._origin = self._origin.replace(line_num=decl['line'])
if decl['state'] == 'vars':
- raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
+ raise ValueError(f"Section [{decl['name']}:vars] not valid for undefined group {decl['name']!r}.")
elif decl['state'] == 'children':
- raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
+ raise ValueError(f"Section [{decl['parents'][-1]}:children] includes undefined group {decl['name']!r}.")
def _add_pending_children(self, group, pending):
for parent in pending[group]['parents']:
@@ -279,7 +292,7 @@ class InventoryModule(BaseFileInventoryPlugin):
if '=' in line:
(k, v) = [e.strip() for e in line.split("=", 1)]
- return (k, self._parse_value(v))
+ return (self._origin.tag(k), self._parse_value(v))
self._raise_error("Expected key=value, got: %s" % (line))
@@ -312,7 +325,7 @@ class InventoryModule(BaseFileInventoryPlugin):
if '=' not in t:
self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
(k, v) = t.split('=', 1)
- variables[k] = self._parse_value(v)
+ variables[self._origin.tag(k)] = self._parse_value(v)
return hostnames, port, variables
@@ -334,8 +347,27 @@ class InventoryModule(BaseFileInventoryPlugin):
return (hostnames, port)
- @staticmethod
- def _parse_value(v):
+ def _parse_recursive_coerce_types_and_tag(self, value: t.Any) -> t.Any:
+ if isinstance(value, str):
+ return TrustedAsTemplate().tag(self._origin.tag(value))
+ if isinstance(value, (list, tuple, set)):
+ # NB: intentional coercion of tuple/set to list, deal with it
+ return self._origin.tag([self._parse_recursive_coerce_types_and_tag(v) for v in value])
+ if isinstance(value, dict):
+ # FIXME: enforce keys are strings
+ return self._origin.tag({self._origin.tag(k): self._parse_recursive_coerce_types_and_tag(v) for k, v in value.items()})
+
+ if value is ...: # literal_eval parses ellipsis, but it's not a supported variable type
+ value = TrustedAsTemplate().tag("...")
+
+ if isinstance(value, complex): # convert unsupported variable types recognized by literal_eval back to str
+ value = TrustedAsTemplate().tag(str(value))
+
+ value = to_text(value, nonstring='passthru', errors='surrogate_or_strict')
+
+ return self._origin.tag(value)
+
+ def _parse_value(self, v: str) -> t.Any:
"""
Attempt to transform the string value from an ini file into a basic python object
(int, dict, list, unicode string, etc).
@@ -352,7 +384,9 @@ class InventoryModule(BaseFileInventoryPlugin):
except SyntaxError:
# Is this a hash with an equals at the end?
pass
- return to_text(v, nonstring='passthru', errors='surrogate_or_strict')
+
+ # this is mostly unnecessary, but prevents the (possible) case of bytes literals showing up in inventory
+ return self._parse_recursive_coerce_types_and_tag(v)
def _compile_patterns(self):
"""
diff --git a/lib/ansible/plugins/inventory/script.py b/lib/ansible/plugins/inventory/script.py
index 9c8ecf54541..6aac35022f5 100644
--- a/lib/ansible/plugins/inventory/script.py
+++ b/lib/ansible/plugins/inventory/script.py
@@ -98,7 +98,7 @@ EXAMPLES = r'''# fmt: code
def get_api_data(namespace: str, pretty=False) -> str:
"""
:param namespace: parameter for our custom api
- :param pretty: Human redable JSON vs machine readable
+ :param pretty: Human readable JSON vs machine readable
:return: JSON string
"""
found_data = list(MyInventoryAPI(namespace))
@@ -153,148 +153,136 @@ EXAMPLES = r'''# fmt: code
'''
+import json
import os
+import shlex
import subprocess
+import typing as t
-from collections.abc import Mapping
-
-from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.module_utils.basic import json_dict_bytes_to_unicode
-from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.errors import AnsibleError, AnsibleJSONParserError
+from ansible.inventory.data import InventoryData
+from ansible.module_utils.datatag import native_type_name
+from ansible.module_utils.common.json import get_decoder
+from ansible.parsing.dataloader import DataLoader
from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible._internal._datatag._tags import TrustedAsTemplate, Origin
from ansible.utils.display import Display
+from ansible._internal._json._profiles import _legacy, _inventory_legacy
display = Display()
class InventoryModule(BaseInventoryPlugin):
- """ Host inventory parser for ansible using external inventory scripts. """
+ """Host inventory parser for ansible using external inventory scripts."""
NAME = 'script'
- def __init__(self):
-
+ def __init__(self) -> None:
super(InventoryModule, self).__init__()
- self._hosts = set()
-
- def verify_file(self, path):
- """ Verify if file is usable by this plugin, base does minimal accessibility check """
-
- valid = super(InventoryModule, self).verify_file(path)
-
- if valid:
- # not only accessible, file must be executable and/or have shebang
- shebang_present = False
- try:
- with open(path, 'rb') as inv_file:
- initial_chars = inv_file.read(2)
- if initial_chars.startswith(b'#!'):
- shebang_present = True
- except Exception:
- pass
+ self._hosts: set[str] = set()
- if not os.access(path, os.X_OK) and not shebang_present:
- valid = False
-
- return valid
-
- def parse(self, inventory, loader, path, cache=None):
+ def verify_file(self, path: str) -> bool:
+ return super(InventoryModule, self).verify_file(path) and os.access(path, os.X_OK)
+ def parse(self, inventory: InventoryData, loader: DataLoader, path: str, cache: bool = False) -> None:
super(InventoryModule, self).parse(inventory, loader, path)
- self.set_options()
- # Support inventory scripts that are not prefixed with some
- # path information but happen to be in the current working
- # directory when '.' is not in PATH.
- cmd = [path, "--list"]
-
- try:
- try:
- sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError as e:
- raise AnsibleParserError("problem running %s (%s)" % (' '.join(cmd), to_native(e)))
- (stdout, stderr) = sp.communicate()
+ self.set_options()
- path = to_native(path)
- err = to_native(stderr or "")
+ origin = Origin(description=f'')
- if err and not err.endswith('\n'):
- err += '\n'
+ data, stderr, stderr_help_text = run_command(path, ['--list'], origin)
- if sp.returncode != 0:
- raise AnsibleError("Inventory script (%s) had an execution error: %s " % (path, err))
+ try:
+ profile_name = detect_profile_name(data)
+ decoder = get_decoder(profile_name)
+ except Exception as ex:
+ raise AnsibleError(
+ message="Unable to get JSON decoder for inventory script result.",
+ help_text=stderr_help_text,
+ # obj will be added by inventory manager
+ ) from ex
- # make sure script output is unicode so that json loader will output unicode strings itself
+ try:
try:
- data = to_text(stdout, errors="strict")
- except Exception as e:
- raise AnsibleError("Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}".format(path, to_native(e)))
+ processed = json.loads(data, cls=decoder)
+ except Exception as json_ex:
+ AnsibleJSONParserError.handle_exception(json_ex, origin)
+ except Exception as ex:
+ raise AnsibleError(
+ message="Inventory script result could not be parsed as JSON.",
+ help_text=stderr_help_text,
+ # obj will be added by inventory manager
+ ) from ex
+
+ # if no other errors happened, and you want to force displaying stderr, do so now
+ if stderr and self.get_option('always_show_stderr'):
+ self.display.error(msg=stderr)
+
+ data_from_meta: dict | None = None
+
+ # A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
+ # if this "hostvars" exists at all then do not call --host for each # host.
+ # This is for efficiency and scripts should still return data
+ # if called with --host for backwards compat with 1.2 and earlier.
+ for (group, gdata) in processed.items():
+ if group == '_meta':
+ data_from_meta = gdata.get('hostvars')
+
+ if not isinstance(data_from_meta, dict):
+ raise TypeError(f"Value contains '_meta.hostvars' which is {native_type_name(data_from_meta)!r} instead of {native_type_name(dict)!r}.")
+ else:
+ self._parse_group(group, gdata, origin)
+
+ if data_from_meta is None:
+ display.deprecated(
+ msg="Inventory scripts should always provide 'meta.hostvars'. "
+ "Host variables will be collected by running the inventory script with the '--host' option for each host.",
+ version='2.23',
+ obj=origin,
+ )
+
+ for host in self._hosts:
+ if data_from_meta is None:
+ got = self.get_host_variables(path, host, origin)
+ else:
+ got = data_from_meta.get(host, {})
- try:
- processed = self.loader.load(data, json_only=True)
- except Exception as e:
- raise AnsibleError("failed to parse executable inventory script results from {0}: {1}\n{2}".format(path, to_native(e), err))
-
- # if no other errors happened and you want to force displaying stderr, do so now
- if stderr and self.get_option('always_show_stderr'):
- self.display.error(msg=to_text(err))
-
- if not isinstance(processed, Mapping):
- raise AnsibleError("failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}".format(path, err))
-
- group = None
- data_from_meta = None
-
- # A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
- # if this "hostvars" exists at all then do not call --host for each # host.
- # This is for efficiency and scripts should still return data
- # if called with --host for backwards compat with 1.2 and earlier.
- for (group, gdata) in processed.items():
- if group == '_meta':
- if 'hostvars' in gdata:
- data_from_meta = gdata['hostvars']
- else:
- self._parse_group(group, gdata)
-
- for host in self._hosts:
- got = {}
- if data_from_meta is None:
- got = self.get_host_variables(path, host)
- else:
- try:
- got = data_from_meta.get(host, {})
- except AttributeError as e:
- raise AnsibleError("Improperly formatted host information for %s: %s" % (host, to_native(e)), orig_exc=e)
-
- self._populate_host_vars([host], got)
-
- except Exception as e:
- raise AnsibleParserError(to_native(e))
-
- def _parse_group(self, group, data):
+ self._populate_host_vars([host], got)
+ def _parse_group(self, group: str, data: t.Any, origin: Origin) -> None:
+ """Normalize and ingest host/var information for the named group."""
group = self.inventory.add_group(group)
if not isinstance(data, dict):
data = {'hosts': data}
- # is not those subkeys, then simplified syntax, host with vars
+ display.deprecated(
+ msg=f"Group {group!r} was converted to {native_type_name(dict)!r} from {native_type_name(data)!r}.",
+ version='2.23',
+ obj=origin,
+ )
elif not any(k in data for k in ('hosts', 'vars', 'children')):
data = {'hosts': [group], 'vars': data}
+ display.deprecated(
+ msg=f"Treating malformed group {group!r} as the sole host of that group. Variables provided in this manner cannot be templated.",
+ version='2.23',
+ obj=origin,
+ )
- if 'hosts' in data:
- if not isinstance(data['hosts'], list):
- raise AnsibleError("You defined a group '%s' with bad data for the host list:\n %s" % (group, data))
+ if (data_hosts := data.get('hosts', ...)) is not ...:
+ if not isinstance(data_hosts, list):
+ raise TypeError(f"Value contains '{group}.hosts' which is {native_type_name(data_hosts)!r} instead of {native_type_name(list)!r}.")
- for hostname in data['hosts']:
+ for hostname in data_hosts:
self._hosts.add(hostname)
self.inventory.add_host(hostname, group)
- if 'vars' in data:
- if not isinstance(data['vars'], dict):
- raise AnsibleError("You defined a group '%s' with bad data for variables:\n %s" % (group, data))
+ if (data_vars := data.get('vars', ...)) is not ...:
+ if not isinstance(data_vars, dict):
+ raise TypeError(f"Value contains '{group}.vars' which is {native_type_name(data_vars)!r} instead of {native_type_name(dict)!r}.")
- for k, v in data['vars'].items():
+ for k, v in data_vars.items():
self.inventory.set_variable(group, k, v)
if group != '_meta' and isinstance(data, dict) and 'children' in data:
@@ -302,22 +290,102 @@ class InventoryModule(BaseInventoryPlugin):
child_name = self.inventory.add_group(child_name)
self.inventory.add_child(group, child_name)
- def get_host_variables(self, path, host):
- """ Runs