diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml
index 14ce6a240a9..d2078a43cf0 100644
--- a/.azure-pipelines/azure-pipelines.yml
+++ b/.azure-pipelines/azure-pipelines.yml
@@ -31,7 +31,7 @@ variables:
- name: fetchDepth
value: 500
- name: defaultContainer
- value: quay.io/ansible/azure-pipelines-test-container:4.0.1
+ value: quay.io/ansible/azure-pipelines-test-container:6.0.0
pool: Standard
@@ -59,6 +59,7 @@ stages:
- test: '3.10'
- test: 3.11
- test: 3.12
+ - test: 3.13
- stage: Windows
dependsOn: []
jobs:
@@ -67,9 +68,16 @@ stages:
nameFormat: Server {0}
testFormat: windows/{0}/1
targets:
- - test: 2016
- - test: 2019
- - test: 2022
+ - name: 2016 WinRM HTTP
+ test: 2016/winrm/http
+ - name: 2019 WinRM HTTPS
+ test: 2019/winrm/https
+ - name: 2022 WinRM HTTPS
+ test: 2022/winrm/https
+ - name: 2022 PSRP HTTP
+ test: 2022/psrp/http
+ - name: 2022 SSH Key
+ test: 2022/ssh/key
- stage: Remote
dependsOn: []
jobs:
@@ -78,14 +86,14 @@ stages:
targets:
- name: macOS 14.3
test: macos/14.3
- - name: RHEL 9.3 py39
- test: rhel/9.3@3.9
- - name: RHEL 9.3 py311
- test: rhel/9.3@3.11
+ - name: RHEL 9.4 py39
+ test: rhel/9.4@3.9
+ - name: RHEL 9.4 py312
+ test: rhel/9.4@3.12
- name: FreeBSD 13.3
test: freebsd/13.3
- - name: FreeBSD 14.0
- test: freebsd/14.0
+ - name: FreeBSD 14.1
+ test: freebsd/14.1
groups:
- 1
- 2
@@ -94,12 +102,12 @@ stages:
targets:
- name: macOS 14.3
test: macos/14.3
- - name: RHEL 9.3
- test: rhel/9.3
+ - name: RHEL 9.4
+ test: rhel/9.4
- name: FreeBSD 13.3
test: freebsd/13.3
- - name: FreeBSD 14.0
- test: freebsd/14.0
+ - name: FreeBSD 14.1
+ test: freebsd/14.1
groups:
- 3
- 4
@@ -107,44 +115,44 @@ stages:
- template: templates/matrix.yml # context/controller (ansible-test container management)
parameters:
targets:
- - name: Alpine 3.19
- test: alpine/3.19
- - name: Fedora 39
- test: fedora/39
- - name: RHEL 9.3
- test: rhel/9.3
- - name: Ubuntu 22.04
- test: ubuntu/22.04
+ - name: Alpine 3.20
+ test: alpine/3.20
+ - name: Fedora 40
+ test: fedora/40
+ - name: RHEL 9.4
+ test: rhel/9.4
+ - name: Ubuntu 24.04
+ test: ubuntu/24.04
groups:
- 6
- stage: Docker
dependsOn: []
jobs:
- - template: templates/matrix.yml
+ - template: templates/matrix.yml # context/target
parameters:
testFormat: linux/{0}
targets:
- - name: Alpine 3.19
- test: alpine319
- - name: Fedora 39
- test: fedora39
- - name: Ubuntu 20.04
- test: ubuntu2004
+ - name: Alpine 3.20
+ test: alpine320
+ - name: Fedora 40
+ test: fedora40
- name: Ubuntu 22.04
test: ubuntu2204
+ - name: Ubuntu 24.04
+ test: ubuntu2404
groups:
- 1
- 2
- - template: templates/matrix.yml
+ - template: templates/matrix.yml # context/controller
parameters:
testFormat: linux/{0}
targets:
- - name: Alpine 3.19
- test: alpine319
- - name: Fedora 39
- test: fedora39
- - name: Ubuntu 22.04
- test: ubuntu2204
+ - name: Alpine 3.20
+ test: alpine320
+ - name: Fedora 40
+ test: fedora40
+ - name: Ubuntu 24.04
+ test: ubuntu2404
groups:
- 3
- 4
@@ -157,9 +165,9 @@ stages:
nameFormat: Python {0}
testFormat: galaxy/{0}/1
targets:
- - test: '3.10'
- test: 3.11
- test: 3.12
+ - test: 3.13
- stage: Generic
dependsOn: []
jobs:
@@ -168,9 +176,9 @@ stages:
nameFormat: Python {0}
testFormat: generic/{0}/1
targets:
- - test: '3.10'
- test: 3.11
- test: 3.12
+ - test: 3.13
- stage: Incidental_Windows
displayName: Incidental Windows
dependsOn: []
@@ -180,9 +188,16 @@ stages:
nameFormat: Server {0}
testFormat: i/windows/{0}
targets:
- - test: 2016
- - test: 2019
- - test: 2022
+ - name: 2016 WinRM HTTP
+ test: 2016/winrm/http
+ - name: 2019 WinRM HTTPS
+ test: 2019/winrm/https
+ - name: 2022 WinRM HTTPS
+ test: 2022/winrm/https
+ - name: 2022 PSRP HTTP
+ test: 2022/psrp/http
+ - name: 2022 SSH Key
+ test: 2022/ssh/key
- stage: Incidental
dependsOn: []
jobs:
@@ -192,8 +207,6 @@ stages:
targets:
- name: IOS Python
test: ios/csr1000v/
- - name: VyOS Python
- test: vyos/1.1.8/
- stage: Summary
condition: succeededOrFailed()
dependsOn:
diff --git a/.azure-pipelines/commands/incidental/vyos.sh b/.azure-pipelines/commands/incidental/vyos.sh
deleted file mode 120000
index cad3e41b707..00000000000
--- a/.azure-pipelines/commands/incidental/vyos.sh
+++ /dev/null
@@ -1 +0,0 @@
-network.sh
\ No newline at end of file
diff --git a/.azure-pipelines/commands/incidental/windows.sh b/.azure-pipelines/commands/incidental/windows.sh
index 24272f62baf..f5a3070c457 100755
--- a/.azure-pipelines/commands/incidental/windows.sh
+++ b/.azure-pipelines/commands/incidental/windows.sh
@@ -6,6 +6,8 @@ declare -a args
IFS='/:' read -ra args <<< "$1"
version="${args[1]}"
+connection="${args[2]}"
+connection_setting="${args[3]}"
target="shippable/windows/incidental/"
@@ -26,11 +28,7 @@ if [ -s /tmp/windows.txt ] || [ "${CHANGED:+$CHANGED}" == "" ]; then
echo "Detected changes requiring integration tests specific to Windows:"
cat /tmp/windows.txt
- echo "Running Windows integration tests for multiple versions concurrently."
-
- platforms=(
- --windows "${version}"
- )
+ echo "Running Windows integration tests for the version ${version}."
else
echo "No changes requiring integration tests specific to Windows were detected."
echo "Running Windows integration tests for a single version only: ${single_version}"
@@ -39,14 +37,10 @@ else
echo "Skipping this job since it is for: ${version}"
exit 0
fi
-
- platforms=(
- --windows "${version}"
- )
fi
# shellcheck disable=SC2086
ansible-test windows-integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
- "${platforms[@]}" \
- --docker default --python "${python_default}" \
- --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
+ --controller "docker:default,python=${python_default}" \
+ --target "remote:windows/${version},connection=${connection}+${connection_setting},provider=${provider}" \
+ --remote-terminate always --remote-stage "${stage}"
diff --git a/.azure-pipelines/commands/windows.sh b/.azure-pipelines/commands/windows.sh
index 693d4f24bdc..622eb9e2d5e 100755
--- a/.azure-pipelines/commands/windows.sh
+++ b/.azure-pipelines/commands/windows.sh
@@ -6,7 +6,9 @@ declare -a args
IFS='/:' read -ra args <<< "$1"
version="${args[1]}"
-group="${args[2]}"
+connection="${args[2]}"
+connection_setting="${args[3]}"
+group="${args[4]}"
target="shippable/windows/group${group}/"
@@ -31,11 +33,7 @@ if [ -s /tmp/windows.txt ] || [ "${CHANGED:+$CHANGED}" == "" ]; then
echo "Detected changes requiring integration tests specific to Windows:"
cat /tmp/windows.txt
- echo "Running Windows integration tests for multiple versions concurrently."
-
- platforms=(
- --windows "${version}"
- )
+ echo "Running Windows integration tests for the version ${version}."
else
echo "No changes requiring integration tests specific to Windows were detected."
echo "Running Windows integration tests for a single version only: ${single_version}"
@@ -44,17 +42,13 @@ else
echo "Skipping this job since it is for: ${version}"
exit 0
fi
-
- platforms=(
- --windows "${version}"
- )
fi
-for version in "${python_versions[@]}"; do
+for py_version in "${python_versions[@]}"; do
changed_all_target="all"
changed_all_mode="default"
- if [ "${version}" == "${python_default}" ]; then
+ if [ "${py_version}" == "${python_default}" ]; then
# smoketest tests
if [ "${CHANGED}" ]; then
# with change detection enabled run tests for anything changed
@@ -80,7 +74,7 @@ for version in "${python_versions[@]}"; do
fi
# terminate remote instances on the final python version tested
- if [ "${version}" = "${python_versions[-1]}" ]; then
+ if [ "${py_version}" = "${python_versions[-1]}" ]; then
terminate="always"
else
terminate="never"
@@ -88,7 +82,8 @@ for version in "${python_versions[@]}"; do
# shellcheck disable=SC2086
ansible-test windows-integration --color -v --retry-on-error "${ci}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
- "${platforms[@]}" --changed-all-target "${changed_all_target}" --changed-all-mode "${changed_all_mode}" \
- --docker default --python "${version}" \
- --remote-terminate "${terminate}" --remote-stage "${stage}" --remote-provider "${provider}"
+ --changed-all-target "${changed_all_target}" --changed-all-mode "${changed_all_mode}" \
+ --controller "docker:default,python=${py_version}" \
+ --target "remote:windows/${version},connection=${connection}+${connection_setting},provider=${provider}" \
+ --remote-terminate "${terminate}" --remote-stage "${stage}"
done
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 35b4523e017..fc15ea5dfc2 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -4,9 +4,14 @@ Hi! Nice to see you here!
## QUESTIONS ?
-Please see the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) for information on how to ask questions on the [mailing lists](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information) and IRC.
+If you have questions about anything related to Ansible, get in touch with us!
+See [Communicating with the Ansible community](https://docs.ansible.com/ansible/devel/community/communication.html) to find out how.
-The GitHub issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, as the community page explains best.
+The [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html) also explains how to contribute
+and interact with the project, including how to submit bug reports and code to Ansible.
+
+Please note that the GitHub issue tracker is not the best place to ask questions for several reasons.
+You'll get more helpful, and quicker, responses in the forum.
## CONTRIBUTING ?
@@ -14,15 +19,18 @@ By contributing to this project you agree to the [Developer Certificate of Origi
The Ansible project is licensed under the [GPL-3.0](COPYING) or later. Some portions of the code fall under other licenses as noted in individual files.
-The Ansible project accepts contributions through GitHub pull requests. Please review the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) for more information on contributing to Ansible.
+The Ansible project accepts contributions through GitHub pull requests.
+Please review the [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html) for more information on contributing to Ansible.
## BUG TO REPORT ?
-First and foremost, also check the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
+First and foremost, also check the [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html).
-You can report bugs or make enhancement requests at the [Ansible GitHub issue page](http://github.com/ansible/ansible/issues/new/choose) by filling out the issue template that will be presented.
+You can report bugs or make enhancement requests at
+the [Ansible GitHub issue page](http://github.com/ansible/ansible/issues/new/choose) by filling out the issue template that will be presented.
-Also please make sure you are testing on the latest released version of Ansible or the development branch; see the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details.
+Also please make sure you are testing on the latest released version of Ansible or the development branch.
+See the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details.
Thanks!
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 3159784d158..8f4944c43c0 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -19,13 +19,14 @@ body:
Also test if the latest release and devel branch are affected too.
- **Tip:** If you are seeking community support, please consider
- [starting a mailing list thread or chatting in IRC][ML||IRC].
+ **Tip:** If you are seeking community support, please see
+ [Communicating with the Ansible community][communication] to
+ get in touch and ask questions.
- [ML||IRC]:
- https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--bug_report.yml#mailing-list-information
+ [communication]:
+ https://docs.ansible.com/ansible/devel/community/communication.html
[issue search]: ../search?q=is%3Aissue&type=issues
@@ -54,7 +55,7 @@ body:
Why?
- We would do it by ourselves but unfortunatelly, the curent
+ We would do it by ourselves but unfortunately, the current
edition of GitHub Issue Forms Alpha does not support this yet 🤷
@@ -258,7 +259,7 @@ body:
description: |
Read the [Ansible Code of Conduct][CoC] first.
- [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--bug_report.yml
+ [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--bug_report.yml
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 74ec5696fdf..6aa4a2b7647 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -2,7 +2,7 @@
blank_issues_enabled: false # default: true
contact_links:
- name: 🔐 Security bug report 🔥
- url: https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser
+ url: https://docs.ansible.com/ansible/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser
about: |
Please learn how to report security vulnerabilities here.
@@ -11,12 +11,12 @@ contact_links:
a prompt response.
For more information, see
- https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
+ https://docs.ansible.com/ansible/devel/community/reporting_bugs_and_features.html
- name: 📝 Ansible Code of Conduct
- url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser
+ url: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser
about: ❤ Be nice to other members of the community. ☮ Behave.
-- name: 💬 Talks to the community
- url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
+- name: 💬 Talk to the community
+ url: https://docs.ansible.com/ansible/devel/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
about: Please ask and answer usage questions here
- name: ⚡ Working groups
url: https://github.com/ansible/community/wiki
diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml
index ca62bb55a77..efe8d1c2035 100644
--- a/.github/ISSUE_TEMPLATE/documentation_report.yml
+++ b/.github/ISSUE_TEMPLATE/documentation_report.yml
@@ -22,12 +22,14 @@ body:
Also test if the latest release and devel branch are affected too.
- **Tip:** If you are seeking community support, please consider
- [starting a mailing list thread or chatting in IRC][ML||IRC].
+ **Tip:** If you are seeking community support, please see
+ [Communicating with the Ansible community][communication] to
+ get in touch and ask questions.
- [ML||IRC]:
- https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--documentation_report.yml#mailing-list-information
+
+ [communication]:
+ https://docs.ansible.com/ansible/devel/community/communication.html
[issue search]: ../search?q=is%3Aissue&type=issues
@@ -205,7 +207,7 @@ body:
description: |
Read the [Ansible Code of Conduct][CoC] first.
- [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--documentation_report.yml
+ [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--documentation_report.yml
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index dd39c40de1c..2fce680fe64 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -21,8 +21,7 @@ body:
If unsure, consider filing a [new proposal] instead outlining your
use-cases, the research and implementation considerations. Then,
- start a discussion on one of the public [IRC meetings] we have just
- for this.
+ start a discussion in the [Ansible forum][forum].
@@ -44,21 +43,22 @@ body:
Also test if the devel branch does not already implement this.
- **Tip:** If you are seeking community support, please consider
- [starting a mailing list thread or chatting in IRC][ML||IRC].
+ **Tip:** If you are seeking community support, please see
+ [Communicating with the Ansible community][communication] to
+ get in touch and ask questions.
[contribute to collections]:
https://docs.ansible.com/ansible-core/devel/community/contributing_maintained_collections.html?utm_medium=github&utm_source=issue_form--feature_request.yml
- [IRC meetings]:
- https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--feature_request.yml#irc-meetings
+ [communication]:
+ https://docs.ansible.com/ansible/devel/community/communication.html
[issue search]: ../search?q=is%3Aissue&type=issues
- [ML||IRC]:
- https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--feature_request.yml#mailing-list-information
+ [forum help]:
+ https://forum.ansible.com/c/help/6
[new proposal]: ../../proposals/issues/new
@@ -109,7 +109,7 @@ body:
Why?
- We would do it by ourselves but unfortunatelly, the curent
+ We would do it by ourselves but unfortunately, the current
edition of GitHub Issue Forms Alpha does not support this yet 🤷
@@ -185,7 +185,7 @@ body:
description: |
Read the [Ansible Code of Conduct][CoC] first.
- [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--feature_request.yml
+ [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--feature_request.yml
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
diff --git a/.gitignore b/.gitignore
index 8b244f60ee7..57019fd1ab6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -92,6 +92,8 @@ Vagrantfile
/lib/ansible_base.egg-info/
# First used in the `devel` branch during Ansible 2.11 development.
/lib/ansible_core.egg-info/
+# First used in the `devel` branch during Ansible 2.18 development.
+/ansible_core.egg-info/
# vendored lib dir
lib/ansible/_vendor/*
!lib/ansible/_vendor/__init__.py
diff --git a/MANIFEST.in b/MANIFEST.in
index bf7a6a047e2..fa609f52e9a 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,11 +1,10 @@
include COPYING
-include bin/*
include changelogs/CHANGELOG*.rst
include changelogs/changelog.yaml
include licenses/*.txt
include requirements.txt
recursive-include packaging *.py *.j2
recursive-include test/integration *
-recursive-include test/sanity *.in *.json *.py *.txt
+recursive-include test/sanity *.in *.json *.py *.txt *.ini
recursive-include test/support *.py *.ps1 *.psm1 *.cs *.md
recursive-include test/units *
diff --git a/README.md b/README.md
index 4db066f0901..9685e77748d 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
[![PyPI version](https://img.shields.io/pypi/v/ansible-core.svg)](https://pypi.org/project/ansible-core)
[![Docs badge](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.ansible.com/ansible/latest/)
-[![Chat badge](https://img.shields.io/badge/chat-IRC-brightgreen.svg)](https://docs.ansible.com/ansible/latest/community/communication.html)
+[![Chat badge](https://img.shields.io/badge/chat-IRC-brightgreen.svg)](https://docs.ansible.com/ansible/devel/community/communication.html)
[![Build Status](https://dev.azure.com/ansible/ansible/_apis/build/status/CI?branchName=devel)](https://dev.azure.com/ansible/ansible/_build/latest?definitionId=20&branchName=devel)
-[![Ansible Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
-[![Ansible mailing lists](https://img.shields.io/badge/mailing%20lists-Ansible-orange.svg)](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information)
+[![Ansible Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html)
+[![Ansible mailing lists](https://img.shields.io/badge/mailing%20lists-Ansible-orange.svg)](https://docs.ansible.com/ansible/devel/community/communication.html#mailing-list-information)
[![Repository License](https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg)](COPYING)
[![Ansible CII Best Practices certification](https://bestpractices.coreinfrastructure.org/projects/2372/badge)](https://bestpractices.coreinfrastructure.org/projects/2372)
@@ -40,21 +40,33 @@ features and fixes, directly. Although it is reasonably stable, you are more lik
breaking changes when running the `devel` branch. We recommend getting involved
in the Ansible community if you want to run the `devel` branch.
-## Get Involved
+## Communication
-* Read [Community Information](https://docs.ansible.com/ansible/latest/community) for all
+Join the Ansible forum to ask questions, get help, and interact with the
+community.
+
+* [Get Help](https://forum.ansible.com/c/help/6): Find help or share your Ansible knowledge to help others.
+ Use tags to filter and subscribe to posts, such as the following:
+ * Posts tagged with [ansible](https://forum.ansible.com/tag/ansible)
+ * Posts tagged with [ansible-core](https://forum.ansible.com/tag/ansible-core)
+ * Posts tagged with [playbook](https://forum.ansible.com/tag/playbook)
+* [Social Spaces](https://forum.ansible.com/c/chat/4): Meet and interact with fellow enthusiasts.
+* [News & Announcements](https://forum.ansible.com/c/news/5): Track project-wide announcements including social events.
+* [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): Get release announcements and important changes.
+
+For more ways to get in touch, see [Communicating with the Ansible community](https://docs.ansible.com/ansible/devel/community/communication.html).
+
+## Contribute to Ansible
+
+* Check out the [Contributor's Guide](./.github/CONTRIBUTING.md).
+* Read [Community Information](https://docs.ansible.com/ansible/devel/community) for all
kinds of ways to contribute to and interact with the project,
- including mailing list information and how to submit bug reports and
- code to Ansible.
-* Join a [Working Group](https://docs.ansible.com/ansible/devel/community/communication.html#working-groups),
- an organized community devoted to a specific technology domain or platform.
+ including how to submit bug reports and code to Ansible.
* Submit a proposed code update through a pull request to the `devel` branch.
* Talk to us before making larger changes
to avoid duplicate efforts. This not only helps everyone
know what is going on, but it also helps save time and effort if we decide
some changes are needed.
-* For a list of email lists, IRC channels and Working Groups, see the
- [Communication page](https://docs.ansible.com/ansible/devel/community/communication.html)
## Coding Guidelines
@@ -67,7 +79,7 @@ We document our Coding Guidelines in the [Developer Guide](https://docs.ansible.
* The `devel` branch corresponds to the release actively under development.
* The `stable-2.X` branches correspond to stable releases.
-* Create a branch based on `devel` and set up a [dev environment](https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_general.html#common-environment-setup) if you want to open a PR.
+* Create a branch based on `devel` and set up a [dev environment](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_general.html#common-environment-setup) if you want to open a PR.
* See the [Ansible release and maintenance](https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html) page for information about active branches.
## Roadmap
diff --git a/bin/ansible-connection b/bin/ansible-connection
deleted file mode 120000
index a20affdbe6a..00000000000
--- a/bin/ansible-connection
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ansible/cli/scripts/ansible_connection_cli_stub.py
\ No newline at end of file
diff --git a/changelogs/fragments/42960_vault_password.yml b/changelogs/fragments/42960_vault_password.yml
new file mode 100644
index 00000000000..db6b1b811d7
--- /dev/null
+++ b/changelogs/fragments/42960_vault_password.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+- vault - handle vault password file value when it is directory (https://github.com/ansible/ansible/issues/42960).
diff --git a/changelogs/fragments/46314.yml b/changelogs/fragments/46314.yml
new file mode 100644
index 00000000000..9958061e8f6
--- /dev/null
+++ b/changelogs/fragments/46314.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - apt - report changed=True when some packages are being removed (https://github.com/ansible/ansible/issues/46314).
diff --git a/changelogs/fragments/46742-atomic_move-fix-setgid.yml b/changelogs/fragments/46742-atomic_move-fix-setgid.yml
new file mode 100644
index 00000000000..4c408262b47
--- /dev/null
+++ b/changelogs/fragments/46742-atomic_move-fix-setgid.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - atomic_move - fix using the setgid bit on the parent directory when creating files (https://github.com/ansible/ansible/issues/46742, https://github.com/ansible/ansible/issues/67177).
diff --git a/changelogs/fragments/62151-loop_control-until.yml b/changelogs/fragments/62151-loop_control-until.yml
new file mode 100644
index 00000000000..70a17ee47ff
--- /dev/null
+++ b/changelogs/fragments/62151-loop_control-until.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - loop_control - add a break_when option to to break out of a task loop early based on Jinja2 expressions (https://github.com/ansible/ansible/issues/83442).
diff --git a/changelogs/fragments/64092-get_url_verify_tmpsrc_checksum.yml b/changelogs/fragments/64092-get_url_verify_tmpsrc_checksum.yml
new file mode 100644
index 00000000000..8f650175411
--- /dev/null
+++ b/changelogs/fragments/64092-get_url_verify_tmpsrc_checksum.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - get_url - Verify checksum using tmpsrc, not dest (https://github.com/ansible/ansible/pull/64092)
diff --git a/changelogs/fragments/72321_git.yml b/changelogs/fragments/72321_git.yml
new file mode 100644
index 00000000000..8ba0ca7558c
--- /dev/null
+++ b/changelogs/fragments/72321_git.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+- git - check if git version is available or not before using it for comparison (https://github.com/ansible/ansible/issues/72321).
diff --git a/changelogs/fragments/81770-add-uid-guid-minmax-keys.yml b/changelogs/fragments/81770-add-uid-guid-minmax-keys.yml
new file mode 100644
index 00000000000..e617e6e22b4
--- /dev/null
+++ b/changelogs/fragments/81770-add-uid-guid-minmax-keys.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - Add ``uid_min``, ``uid_max`` to the user plugin to overwrite the defaults provided by the ``/etc/login.defs`` file (https://github.com/ansible/ansible/pull/81770).
+ - Add ``gid_min``, ``gid_max`` to the group plugin to overwrite the defaults provided by the ``/etc/login.defs`` file (https://github.com/ansible/ansible/pull/81770).
diff --git a/changelogs/fragments/82075.yml b/changelogs/fragments/82075.yml
new file mode 100644
index 00000000000..fccdd8eced8
--- /dev/null
+++ b/changelogs/fragments/82075.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - addressed issue of trailing text been ignored, non-ASCII characters are parsed, enhance white space handling and fixed overly permissive issue of human_to_bytes filter(https://github.com/ansible/ansible/issues/82075)
diff --git a/changelogs/fragments/82307-handlers-lockstep-linear-fix.yml b/changelogs/fragments/82307-handlers-lockstep-linear-fix.yml
new file mode 100644
index 00000000000..da97a9753bb
--- /dev/null
+++ b/changelogs/fragments/82307-handlers-lockstep-linear-fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fix handlers not being executed in lockstep using the linear strategy in some cases (https://github.com/ansible/ansible/issues/82307)
diff --git a/changelogs/fragments/82490_creating_user_dir_using_tilde_always_reports_changed.yml b/changelogs/fragments/82490_creating_user_dir_using_tilde_always_reports_changed.yml
new file mode 100644
index 00000000000..f7abc1335c6
--- /dev/null
+++ b/changelogs/fragments/82490_creating_user_dir_using_tilde_always_reports_changed.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - fixed the issue of creating user directory using tilde(~) always reported "changed".(https://github.com/ansible/ansible/issues/82490)
diff --git a/changelogs/fragments/82535-properly-quote-shell.yml b/changelogs/fragments/82535-properly-quote-shell.yml
new file mode 100644
index 00000000000..be93f30c59d
--- /dev/null
+++ b/changelogs/fragments/82535-properly-quote-shell.yml
@@ -0,0 +1,2 @@
+bugfixes:
+- shell plugin - properly quote all needed components of shell commands (https://github.com/ansible/ansible/issues/82535)
diff --git a/changelogs/fragments/82671-ansible-doc-role-examples.yml b/changelogs/fragments/82671-ansible-doc-role-examples.yml
new file mode 100644
index 00000000000..7f041babc5b
--- /dev/null
+++ b/changelogs/fragments/82671-ansible-doc-role-examples.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - "ansible-doc - show examples in role entrypoint argument specs (https://github.com/ansible/ansible/pull/82671)."
diff --git a/changelogs/fragments/82831_countme_yum_repository.yml b/changelogs/fragments/82831_countme_yum_repository.yml
new file mode 100644
index 00000000000..7f6bec4c487
--- /dev/null
+++ b/changelogs/fragments/82831_countme_yum_repository.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - support the countme option when using yum_repository
diff --git a/changelogs/fragments/83019-linear-handlers-lockstep-fix.yml b/changelogs/fragments/83019-linear-handlers-lockstep-fix.yml
new file mode 100644
index 00000000000..5ee00904199
--- /dev/null
+++ b/changelogs/fragments/83019-linear-handlers-lockstep-fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "linear strategy: fix handlers included via ``include_tasks`` handler to be executed in lockstep (https://github.com/ansible/ansible/issues/83019)"
diff --git a/changelogs/fragments/83031.yml b/changelogs/fragments/83031.yml
new file mode 100644
index 00000000000..32d7c09529d
--- /dev/null
+++ b/changelogs/fragments/83031.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - lookup - Fixed examples of csv lookup plugin (https://github.com/ansible/ansible/issues/83031).
diff --git a/changelogs/fragments/83155-ansible-doc-paragraphs.yml b/changelogs/fragments/83155-ansible-doc-paragraphs.yml
new file mode 100644
index 00000000000..b92bd526b77
--- /dev/null
+++ b/changelogs/fragments/83155-ansible-doc-paragraphs.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "ansible-doc - format top-level descriptions with multiple paragraphs as multiple paragraphs, instead of concatenating them (https://github.com/ansible/ansible/pull/83155)."
diff --git a/changelogs/fragments/83235-copy-module-update-mtime.yml b/changelogs/fragments/83235-copy-module-update-mtime.yml
new file mode 100644
index 00000000000..7dd36aff642
--- /dev/null
+++ b/changelogs/fragments/83235-copy-module-update-mtime.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - copy - mtime/atime not updated. Fix now update mtime/atime(https://github.com/ansible/ansible/issues/83013)
diff --git a/changelogs/fragments/83294-meta-host_pinned-affinity.yml b/changelogs/fragments/83294-meta-host_pinned-affinity.yml
new file mode 100644
index 00000000000..b85d3f84999
--- /dev/null
+++ b/changelogs/fragments/83294-meta-host_pinned-affinity.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fix for ``meta`` tasks breaking host/fork affinity with ``host_pinned`` strategy (https://github.com/ansible/ansible/issues/83294)
diff --git a/changelogs/fragments/83327.yml b/changelogs/fragments/83327.yml
new file mode 100644
index 00000000000..8cdd448aa5d
--- /dev/null
+++ b/changelogs/fragments/83327.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - fixed unit test test_borken_cowsay to address mock not been properly applied when existing unix system already have cowsay installed.
\ No newline at end of file
diff --git a/changelogs/fragments/83331.yml b/changelogs/fragments/83331.yml
new file mode 100644
index 00000000000..9de98f282ba
--- /dev/null
+++ b/changelogs/fragments/83331.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - facts - add facts about x86_64 flags to detect microarchitecture (https://github.com/ansible/ansible/issues/83331).
diff --git a/changelogs/fragments/83373-dnf5-wildcard.yml b/changelogs/fragments/83373-dnf5-wildcard.yml
new file mode 100644
index 00000000000..3cb6e362aa6
--- /dev/null
+++ b/changelogs/fragments/83373-dnf5-wildcard.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - dnf, dnf5 - fix for installing a set of packages by specifying them using a wildcard character (https://github.com/ansible/ansible/issues/83373)
diff --git a/changelogs/fragments/83392-fix-memory-issues-handlers.yml b/changelogs/fragments/83392-fix-memory-issues-handlers.yml
new file mode 100644
index 00000000000..7e4c2e5599f
--- /dev/null
+++ b/changelogs/fragments/83392-fix-memory-issues-handlers.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fix rapid memory usage growth when notifying handlers using the ``listen`` keyword (https://github.com/ansible/ansible/issues/83392)
diff --git a/changelogs/fragments/83406-dnf-fix-arch-cmp.yml b/changelogs/fragments/83406-dnf-fix-arch-cmp.yml
new file mode 100644
index 00000000000..c890d662e44
--- /dev/null
+++ b/changelogs/fragments/83406-dnf-fix-arch-cmp.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - dnf - fix an issue where two packages of the same ``evr`` but different arch failed to install (https://github.com/ansible/ansible/issues/83406)
diff --git a/changelogs/fragments/83447-end_host-rescue-rc.yml b/changelogs/fragments/83447-end_host-rescue-rc.yml
new file mode 100644
index 00000000000..b4d82414971
--- /dev/null
+++ b/changelogs/fragments/83447-end_host-rescue-rc.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "``end_host`` - fix incorrect return code when executing ``end_host`` in the ``rescue`` section (https://github.com/ansible/ansible/issues/83447)"
diff --git a/changelogs/fragments/83457-service_facts-openbsd-dont-crash-in-equals.yml b/changelogs/fragments/83457-service_facts-openbsd-dont-crash-in-equals.yml
new file mode 100644
index 00000000000..fc711ce390b
--- /dev/null
+++ b/changelogs/fragments/83457-service_facts-openbsd-dont-crash-in-equals.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - service_facts - don't crash if OpenBSD rcctl variable contains '=' character (https://github.com/ansible/ansible/issues/83457)
diff --git a/changelogs/fragments/83469-http-test-container.yml b/changelogs/fragments/83469-http-test-container.yml
new file mode 100644
index 00000000000..d39bb4c4e3e
--- /dev/null
+++ b/changelogs/fragments/83469-http-test-container.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - "ansible-test - update HTTP test container to 3.2.0 (https://github.com/ansible/ansible/pull/83469)."
diff --git a/changelogs/fragments/83480-fix-support-discard.yml b/changelogs/fragments/83480-fix-support-discard.yml
new file mode 100644
index 00000000000..8cedf8206b6
--- /dev/null
+++ b/changelogs/fragments/83480-fix-support-discard.yml
@@ -0,0 +1,6 @@
+---
+bugfixes:
+ - facts - `support_discard` now returns `0` if either `discard_granularity`
+ or `discard_max_hw_bytes` is zero; otherwise it returns the value of
+ `discard_granularity`, as before
+ (https://github.com/ansible/ansible/pull/83480).
diff --git a/changelogs/fragments/83498-command-tb-env.yml b/changelogs/fragments/83498-command-tb-env.yml
new file mode 100644
index 00000000000..b28ad18114a
--- /dev/null
+++ b/changelogs/fragments/83498-command-tb-env.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fix a traceback when an environment variable contains certain special characters (https://github.com/ansible/ansible/issues/83498)
diff --git a/changelogs/fragments/83508_mount_facts.yml b/changelogs/fragments/83508_mount_facts.yml
new file mode 100644
index 00000000000..baa7e592b18
--- /dev/null
+++ b/changelogs/fragments/83508_mount_facts.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add a new mount_facts module to support gathering information about mounts that are excluded by default fact gathering.
diff --git a/changelogs/fragments/83530-validate-modules-casing.yml b/changelogs/fragments/83530-validate-modules-casing.yml
new file mode 100644
index 00000000000..d00a344d2fe
--- /dev/null
+++ b/changelogs/fragments/83530-validate-modules-casing.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - "validate-modules sanity test - reject option/aliases names that are identical up to casing but belong to different options (https://github.com/ansible/ansible/pull/83530)."
diff --git a/changelogs/fragments/83540-update_disto_version.yml b/changelogs/fragments/83540-update_disto_version.yml
new file mode 100644
index 00000000000..47141746bb6
--- /dev/null
+++ b/changelogs/fragments/83540-update_disto_version.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - distro package - update the distro package version from 1.8.0 to 1.9.0 (https://github.com/ansible/ansible/issues/82935)
diff --git a/changelogs/fragments/83541-add-sl-micro-suse-family.yaml b/changelogs/fragments/83541-add-sl-micro-suse-family.yaml
new file mode 100644
index 00000000000..ea259e12a8f
--- /dev/null
+++ b/changelogs/fragments/83541-add-sl-micro-suse-family.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+- distribution.py - Added SL-Micro in Suse OS Family. (https://github.com/ansible/ansible/pull/83541)
diff --git a/changelogs/fragments/83575-fix-sanity-ignore-for-find.yml b/changelogs/fragments/83575-fix-sanity-ignore-for-find.yml
new file mode 100644
index 00000000000..85929d35174
--- /dev/null
+++ b/changelogs/fragments/83575-fix-sanity-ignore-for-find.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - find - change the datatype of ``elements`` to ``path`` in option ``paths`` (https://github.com/ansible/ansible/pull/83575).
diff --git a/changelogs/fragments/83599-validate-modules-aliases.yml b/changelogs/fragments/83599-validate-modules-aliases.yml
new file mode 100644
index 00000000000..69a3514fc58
--- /dev/null
+++ b/changelogs/fragments/83599-validate-modules-aliases.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - "validate-modules sanity test - detect if names of an option (option name + aliases) do not match between argument spec and documentation
+ (https://github.com/ansible/ansible/issues/83598, https://github.com/ansible/ansible/pull/83599)."
diff --git a/changelogs/fragments/83601-debconf-normalize-bools.yml b/changelogs/fragments/83601-debconf-normalize-bools.yml
new file mode 100644
index 00000000000..e2eec66a8d3
--- /dev/null
+++ b/changelogs/fragments/83601-debconf-normalize-bools.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - debconf - fix normalization of value representation for boolean vtypes in new packages (https://github.com/ansible/ansible/issues/83594)
diff --git a/changelogs/fragments/83619-loop-label-register.yml b/changelogs/fragments/83619-loop-label-register.yml
new file mode 100644
index 00000000000..aab82f0dff9
--- /dev/null
+++ b/changelogs/fragments/83619-loop-label-register.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fix an issue where registered variable was not available for templating in ``loop_control.label`` on skipped looped tasks (https://github.com/ansible/ansible/issues/83619)
diff --git a/changelogs/fragments/83716-enable-subkey-fingerprint-validation-in-rpm-key-module.yml b/changelogs/fragments/83716-enable-subkey-fingerprint-validation-in-rpm-key-module.yml
new file mode 100644
index 00000000000..70306ff57f1
--- /dev/null
+++ b/changelogs/fragments/83716-enable-subkey-fingerprint-validation-in-rpm-key-module.yml
@@ -0,0 +1,4 @@
+---
+minor_changes:
+ - rpm_key - allow validation of gpg key with a subkey fingerprint
+ - rpm_key - enable gpg validation that requires presence of multiple fingerprints
diff --git a/changelogs/fragments/83755-ini-new-interpolation-option.yml b/changelogs/fragments/83755-ini-new-interpolation-option.yml
new file mode 100644
index 00000000000..03b7fe1c3dc
--- /dev/null
+++ b/changelogs/fragments/83755-ini-new-interpolation-option.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ini lookup - add new ``interpolation`` option (https://github.com/ansible/ansible/issues/83755)
diff --git a/changelogs/fragments/83803-collection-import-poll-interval.yml b/changelogs/fragments/83803-collection-import-poll-interval.yml
new file mode 100644
index 00000000000..e984bf33007
--- /dev/null
+++ b/changelogs/fragments/83803-collection-import-poll-interval.yml
@@ -0,0 +1,4 @@
+minor_changes:
+- >-
+ ``ansible-galaxy collection publish`` - add configuration options for the initial poll interval
+ and the exponential when checking the import status of a collection, since the default is relatively slow.
diff --git a/changelogs/fragments/83831-runtime-metadata-fix.yml b/changelogs/fragments/83831-runtime-metadata-fix.yml
new file mode 100644
index 00000000000..89e0ec7df22
--- /dev/null
+++ b/changelogs/fragments/83831-runtime-metadata-fix.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "runtime-metadata sanity test - do not crash on deprecations if ``galaxy.yml`` contains an empty ``version`` field (https://github.com/ansible/ansible/pull/83831)."
+ - "Fix ``SemanticVersion.parse()`` to store the version string so that ``__repr__`` reports it instead of ``None`` (https://github.com/ansible/ansible/pull/83831)."
diff --git a/changelogs/fragments/83874-include-parse-error-location.yml b/changelogs/fragments/83874-include-parse-error-location.yml
new file mode 100644
index 00000000000..3c9a3df5180
--- /dev/null
+++ b/changelogs/fragments/83874-include-parse-error-location.yml
@@ -0,0 +1,4 @@
+bugfixes:
+ - >-
+ include_tasks - Display location when attempting to load a task list where ``include_*`` did not specify any value -
+ https://github.com/ansible/ansible/issues/83874
diff --git a/changelogs/fragments/83960-dnf5-state-installed-fix.yml b/changelogs/fragments/83960-dnf5-state-installed-fix.yml
new file mode 100644
index 00000000000..a99d705d0a9
--- /dev/null
+++ b/changelogs/fragments/83960-dnf5-state-installed-fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "dnf5 - re-introduce the ``state: installed`` alias to ``state: present`` (https://github.com/ansible/ansible/issues/83960)"
diff --git a/changelogs/fragments/PowerShell-AddType-temp.yml b/changelogs/fragments/PowerShell-AddType-temp.yml
new file mode 100644
index 00000000000..6019f8058ed
--- /dev/null
+++ b/changelogs/fragments/PowerShell-AddType-temp.yml
@@ -0,0 +1,7 @@
+bugfixes:
+- >-
+ powershell - Implement more robust deletion mechanism for C# code compilation
+ temporary files. This should avoid scenarios where the underlying temporary
+ directory may be temporarily locked by antivirus tools or other IO problems.
+ A failure to delete one of these temporary directories will result in a
+ warning rather than an outright failure.
diff --git a/changelogs/fragments/action-plugin-docs-sidecar.yml b/changelogs/fragments/action-plugin-docs-sidecar.yml
new file mode 100644
index 00000000000..c9faa07140a
--- /dev/null
+++ b/changelogs/fragments/action-plugin-docs-sidecar.yml
@@ -0,0 +1,2 @@
+bugfixes:
+- ansible-test action-plugin-docs - Fix to check for sidecar documentation for action plugins
diff --git a/changelogs/fragments/add_systemd_facts.yml b/changelogs/fragments/add_systemd_facts.yml
new file mode 100644
index 00000000000..93af448a7f3
--- /dev/null
+++ b/changelogs/fragments/add_systemd_facts.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - facts - add systemd version and features
diff --git a/changelogs/fragments/ansible-config-validate.yml b/changelogs/fragments/ansible-config-validate.yml
new file mode 100644
index 00000000000..fab48db9026
--- /dev/null
+++ b/changelogs/fragments/ansible-config-validate.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-config has new 'validate' option to find mispelled/forgein configurations in ini file or environment variables.
diff --git a/changelogs/fragments/ansible-doc-color.yml b/changelogs/fragments/ansible-doc-color.yml
new file mode 100644
index 00000000000..045f212a58e
--- /dev/null
+++ b/changelogs/fragments/ansible-doc-color.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - ansible-doc - make colors configurable.
diff --git a/changelogs/fragments/ansible-doc-inicate.yml b/changelogs/fragments/ansible-doc-inicate.yml
new file mode 100644
index 00000000000..519730869fd
--- /dev/null
+++ b/changelogs/fragments/ansible-doc-inicate.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - ansible-doc - fixed "inicates" typo in output
diff --git a/changelogs/fragments/ansible-doc.yml b/changelogs/fragments/ansible-doc.yml
new file mode 100644
index 00000000000..4e52017ac97
--- /dev/null
+++ b/changelogs/fragments/ansible-doc.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - ansible-doc - handle no_fail condition for role.
diff --git a/changelogs/fragments/ansible-galaxy-install-archive-url-timeout.yml b/changelogs/fragments/ansible-galaxy-install-archive-url-timeout.yml
new file mode 100644
index 00000000000..bf87d435409
--- /dev/null
+++ b/changelogs/fragments/ansible-galaxy-install-archive-url-timeout.yml
@@ -0,0 +1,4 @@
+bugfixes:
+- >-
+ ``ansible-galaxy role install`` - update the default timeout to download
+ archive URLs from 20 seconds to 60 (https://github.com/ansible/ansible/issues/83521).
diff --git a/changelogs/fragments/ansible-galaxy-install-help.yml b/changelogs/fragments/ansible-galaxy-install-help.yml
new file mode 100644
index 00000000000..31b6de9a3ca
--- /dev/null
+++ b/changelogs/fragments/ansible-galaxy-install-help.yml
@@ -0,0 +1,5 @@
+bugfixes:
+- >-
+ Add descriptions for ``ansible-galaxy install --help` and ``ansible-galaxy role|collection install --help``.
+- >-
+ ``ansible-galaxy install --help`` - Fix the usage text and document that the requirements file passed to ``-r`` can include collections and roles.
diff --git a/changelogs/fragments/ansible-galaxy-role-install-symlink.yml b/changelogs/fragments/ansible-galaxy-role-install-symlink.yml
new file mode 100644
index 00000000000..c2003b15cd2
--- /dev/null
+++ b/changelogs/fragments/ansible-galaxy-role-install-symlink.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - ansible-galaxy role install - fix symlinks (https://github.com/ansible/ansible/issues/82702, https://github.com/ansible/ansible/issues/81965).
diff --git a/changelogs/fragments/ansible-test-coverage-update.yml b/changelogs/fragments/ansible-test-coverage-update.yml
new file mode 100644
index 00000000000..69b8e9b36f8
--- /dev/null
+++ b/changelogs/fragments/ansible-test-coverage-update.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Update ``coverage`` to version 7.6.1.
diff --git a/changelogs/fragments/ansible-test-error-message-improvement.yml b/changelogs/fragments/ansible-test-error-message-improvement.yml
new file mode 100644
index 00000000000..a07a255ac23
--- /dev/null
+++ b/changelogs/fragments/ansible-test-error-message-improvement.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Improve the error message shown when an unknown ``--remote`` or ``--docker`` option is given.
diff --git a/changelogs/fragments/ansible-test-http-test-container-update.yml b/changelogs/fragments/ansible-test-http-test-container-update.yml
new file mode 100644
index 00000000000..1b76ac52852
--- /dev/null
+++ b/changelogs/fragments/ansible-test-http-test-container-update.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Update ``http-test-container`` to version 3.0.0.
diff --git a/changelogs/fragments/ansible-test-nios-container.yml b/changelogs/fragments/ansible-test-nios-container.yml
new file mode 100644
index 00000000000..eb719ee8af7
--- /dev/null
+++ b/changelogs/fragments/ansible-test-nios-container.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Update ``nios-test-container`` to version 5.0.0.
diff --git a/changelogs/fragments/ansible-test-no-egg-info.yml b/changelogs/fragments/ansible-test-no-egg-info.yml
new file mode 100644
index 00000000000..4b36efa13b8
--- /dev/null
+++ b/changelogs/fragments/ansible-test-no-egg-info.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - An ``ansible_core.egg-info`` directory is no longer generated when running tests.
diff --git a/changelogs/fragments/ansible-test-platform-updates.yml b/changelogs/fragments/ansible-test-platform-updates.yml
new file mode 100644
index 00000000000..a42a833c7ee
--- /dev/null
+++ b/changelogs/fragments/ansible-test-platform-updates.yml
@@ -0,0 +1,12 @@
+minor_changes:
+ - ansible-test - Replace Fedora 39 container and remote with Fedora 40.
+ - ansible-test - Replace Alpine 3.19 container and remote with Alpine 3.20.
+ - ansible-test - Replace Ubuntu 20.04 container with Ubuntu 24.04 container.
+ - ansible-test - Add Ubuntu 24.04 remote.
+ - ansible-test - Replace RHEL 9.3 remote with RHEL 9.4.
+ - ansible-test - Replace FreeBSD 14.0 remote with FreeBSD 14.1.
+known_issues:
+ - ansible-test - When using the Fedora 40 container with Podman on a Ubuntu 24.04 host,
+ the ``unix-chkpwd`` AppArmor profile must be disabled on the host to allow SSH connections to the container.
+ - ansible-test - When using ansible-test containers with Podman on a Ubuntu 24.04 host,
+ ansible-test must be run as a non-root user to avoid permission issues caused by AppArmor.
diff --git a/changelogs/fragments/ansible-test-pypi-test-container-update.yml b/changelogs/fragments/ansible-test-pypi-test-container-update.yml
new file mode 100644
index 00000000000..91a1f5b1a57
--- /dev/null
+++ b/changelogs/fragments/ansible-test-pypi-test-container-update.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Update ``pypi-test-container`` to version 3.2.0.
diff --git a/changelogs/fragments/ansible-test-sanity-empty-init.yml b/changelogs/fragments/ansible-test-sanity-empty-init.yml
new file mode 100644
index 00000000000..0ba349bc773
--- /dev/null
+++ b/changelogs/fragments/ansible-test-sanity-empty-init.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - The ``empty-init`` sanity test no longer applies to ``module_utils`` packages.
diff --git a/changelogs/fragments/ansible-test-sanity-test-requirements.yml b/changelogs/fragments/ansible-test-sanity-test-requirements.yml
new file mode 100644
index 00000000000..2bfd645e903
--- /dev/null
+++ b/changelogs/fragments/ansible-test-sanity-test-requirements.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Updated the frozen requirements for all sanity tests.
diff --git a/changelogs/fragments/ansible-test-utility-container-update.yml b/changelogs/fragments/ansible-test-utility-container-update.yml
new file mode 100644
index 00000000000..86498e2bc47
--- /dev/null
+++ b/changelogs/fragments/ansible-test-utility-container-update.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Update ``ansible-test-utility-container`` to version 3.1.0.
diff --git a/changelogs/fragments/ansible-test-venv-bootstrap.yml b/changelogs/fragments/ansible-test-venv-bootstrap.yml
new file mode 100644
index 00000000000..3f6d8aa2b8d
--- /dev/null
+++ b/changelogs/fragments/ansible-test-venv-bootstrap.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - ansible-test - Virtual environments created by ansible-test no longer include the ``wheel`` or ``setuptools`` packages.
+ - ansible-test - Upgrade ``pip`` used in ansible-test managed virtual environments from version 24.0 to 24.2.
diff --git a/changelogs/fragments/ansible-test-vyos.yml b/changelogs/fragments/ansible-test-vyos.yml
new file mode 100644
index 00000000000..4def23b9028
--- /dev/null
+++ b/changelogs/fragments/ansible-test-vyos.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Removed the ``vyos/1.1.8`` network remote as it is no longer functional.
diff --git a/changelogs/fragments/ansible-test-windows-remote.yml b/changelogs/fragments/ansible-test-windows-remote.yml
new file mode 100644
index 00000000000..2ab8bd79266
--- /dev/null
+++ b/changelogs/fragments/ansible-test-windows-remote.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - ansible-test - Connection options can be set for ansible-test managed remote Windows instances.
diff --git a/changelogs/fragments/ansible_connection_path.yml b/changelogs/fragments/ansible_connection_path.yml
new file mode 100644
index 00000000000..d1eb1866fbe
--- /dev/null
+++ b/changelogs/fragments/ansible_connection_path.yml
@@ -0,0 +1,8 @@
+bugfixes:
+ - persistent connection plugins - The correct Ansible persistent connection helper is now always used.
+ Previously, the wrong script could be used, depending on the value of the ``PATH`` environment variable.
+ As a result, users were sometimes required to set ``ANSIBLE_CONNECTION_PATH`` to use the correct script.
+deprecated_features:
+ - persistent connection plugins - The ``ANSIBLE_CONNECTION_PATH`` config option no longer has any effect, and will be removed in a future release.
+breaking_changes:
+ - persistent connection plugins - The ``ANSIBLE_CONNECTION_PATH`` config option no longer has any effect.
diff --git a/changelogs/fragments/ansible_managed_restore.yml b/changelogs/fragments/ansible_managed_restore.yml
new file mode 100644
index 00000000000..63d15bf9dca
--- /dev/null
+++ b/changelogs/fragments/ansible_managed_restore.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - ansible_managed restored it's 'templatability' by ensuring the possible injection routes are cut off earlier in the process.
diff --git a/changelogs/fragments/apk_package_facts.yml b/changelogs/fragments/apk_package_facts.yml
new file mode 100644
index 00000000000..6eb1a351625
--- /dev/null
+++ b/changelogs/fragments/apk_package_facts.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - package_facts - apk fix when cache is empty (https://github.com/ansible/ansible/issues/83126).
diff --git a/changelogs/fragments/apt_cache.yml b/changelogs/fragments/apt_cache.yml
new file mode 100644
index 00000000000..d407431e034
--- /dev/null
+++ b/changelogs/fragments/apt_cache.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - apt_* - add more info messages raised while updating apt cache (https://github.com/ansible/ansible/issues/77941).
diff --git a/changelogs/fragments/assemble.yml b/changelogs/fragments/assemble.yml
new file mode 100644
index 00000000000..27b66551492
--- /dev/null
+++ b/changelogs/fragments/assemble.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - assemble - update argument_spec with 'decrypt' option which is required by action plugin (https://github.com/ansible/ansible/issues/80840).
diff --git a/changelogs/fragments/atomic_update_perms_time.yml b/changelogs/fragments/atomic_update_perms_time.yml
new file mode 100644
index 00000000000..f776845e380
--- /dev/null
+++ b/changelogs/fragments/atomic_update_perms_time.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - module_utils atomic_move (used by most file based modules), now correctly handles permission copy and setting mtime correctly across all paths
diff --git a/changelogs/fragments/become-runas-system.yml b/changelogs/fragments/become-runas-system.yml
new file mode 100644
index 00000000000..2ad7e465628
--- /dev/null
+++ b/changelogs/fragments/become-runas-system.yml
@@ -0,0 +1,4 @@
+bugfixes:
+ - ->
+ runas become - Generate new token for the SYSTEM token to use for become. This should result in the full SYSTEM
+ token being used and problems starting the process that fails with ``The process creation has been blocked``.
diff --git a/changelogs/fragments/cleanup-outdated-galaxy-install-info.yml b/changelogs/fragments/cleanup-outdated-galaxy-install-info.yml
new file mode 100644
index 00000000000..7fde75f13e7
--- /dev/null
+++ b/changelogs/fragments/cleanup-outdated-galaxy-install-info.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - ansible-galaxy collection install - remove old installation info when installing collections (https://github.com/ansible/ansible/issues/83182).
diff --git a/changelogs/fragments/colors_for_included_events.yml b/changelogs/fragments/colors_for_included_events.yml
new file mode 100644
index 00000000000..0ff029717cb
--- /dev/null
+++ b/changelogs/fragments/colors_for_included_events.yml
@@ -0,0 +1,4 @@
+bugfixes:
+ - COLOR_SKIP will not alter "included" events color display anymore.
+minor_changes:
+ - Introducing COLOR_INCLUDED parameter. This can set a specific color for "included" events.
diff --git a/changelogs/fragments/correct-callback-fqcn-old-style-action-invocation.yml b/changelogs/fragments/correct-callback-fqcn-old-style-action-invocation.yml
new file mode 100644
index 00000000000..675439604ea
--- /dev/null
+++ b/changelogs/fragments/correct-callback-fqcn-old-style-action-invocation.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - Fix the task attribute ``resolved_action`` to show the FQCN instead of ``None`` when ``action`` or ``local_action`` is used in the playbook.
+ - Fix using ``module_defaults`` with ``local_action``/``action`` (https://github.com/ansible/ansible/issues/81905).
diff --git a/changelogs/fragments/correct_connection_callback.yml b/changelogs/fragments/correct_connection_callback.yml
new file mode 100644
index 00000000000..1e59691a3ae
--- /dev/null
+++ b/changelogs/fragments/correct_connection_callback.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Callbacks now correctly get the resolved connection plugin name as the connection used.
diff --git a/changelogs/fragments/darwin_facts.yml b/changelogs/fragments/darwin_facts.yml
new file mode 100644
index 00000000000..bad6d97a3c7
--- /dev/null
+++ b/changelogs/fragments/darwin_facts.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - Darwin - add unit tests for Darwin hardware fact gathering.
diff --git a/changelogs/fragments/delay_type.yml b/changelogs/fragments/delay_type.yml
new file mode 100644
index 00000000000..03b44dc0989
--- /dev/null
+++ b/changelogs/fragments/delay_type.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - delay keyword is now a float, matching the underlying 'time' API and user expectations.
diff --git a/changelogs/fragments/deprecate-safe-evals.yml b/changelogs/fragments/deprecate-safe-evals.yml
new file mode 100644
index 00000000000..9aea56f67b7
--- /dev/null
+++ b/changelogs/fragments/deprecate-safe-evals.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - Deprecate ``ansible.module_utils.basic.AnsibleModule.safe_eval`` and ``ansible.module_utils.common.safe_eval`` as they are no longer used.
diff --git a/changelogs/fragments/dinit.yml b/changelogs/fragments/dinit.yml
new file mode 100644
index 00000000000..4122fa7b9a5
--- /dev/null
+++ b/changelogs/fragments/dinit.yml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - service_mgr - add support for dinit service manager (https://github.com/ansible/ansible/pull/83489).
diff --git a/changelogs/fragments/display_fix_log_severity.yml b/changelogs/fragments/display_fix_log_severity.yml
new file mode 100644
index 00000000000..943896ca558
--- /dev/null
+++ b/changelogs/fragments/display_fix_log_severity.yml
@@ -0,0 +1,4 @@
+bugfixes:
+ - display now does a better job of mapping warnings/errors to the proper log severity when using ansible.log. We still use color as a fallback mapping (now prioritiezed by severity) but mostly rely on it beind directly set by warnning/errors calls.
+minor_changes:
+ - ansible.log now also shows log severity field
diff --git a/changelogs/fragments/dnf-exceptions-vs-text.yml b/changelogs/fragments/dnf-exceptions-vs-text.yml
new file mode 100644
index 00000000000..59e4bf3aecf
--- /dev/null
+++ b/changelogs/fragments/dnf-exceptions-vs-text.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - dnf - minor internal changes in how the errors from the dnf API are handled; rely solely on the exceptions rather than inspecting text embedded in them
diff --git a/changelogs/fragments/dnf-remove-legacy-code.yml b/changelogs/fragments/dnf-remove-legacy-code.yml
new file mode 100644
index 00000000000..980f9824a79
--- /dev/null
+++ b/changelogs/fragments/dnf-remove-legacy-code.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - dnf - remove legacy code for unsupported dnf versions
diff --git a/changelogs/fragments/dnf5-enable-disable-plugins.yml b/changelogs/fragments/dnf5-enable-disable-plugins.yml
new file mode 100644
index 00000000000..5d4eacfbac0
--- /dev/null
+++ b/changelogs/fragments/dnf5-enable-disable-plugins.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - dnf5 - implement ``enable_plugin`` and ``disable_plugin`` options
diff --git a/changelogs/fragments/dnf_handle_downloaderror.yml b/changelogs/fragments/dnf_handle_downloaderror.yml
new file mode 100644
index 00000000000..ec4e0e147f4
--- /dev/null
+++ b/changelogs/fragments/dnf_handle_downloaderror.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - dnf - Ensure that we are handling DownloadError properly in the dnf module
diff --git a/changelogs/fragments/dwim_is_role_fix_task_relative.yml b/changelogs/fragments/dwim_is_role_fix_task_relative.yml
new file mode 100644
index 00000000000..bb4c6b39c09
--- /dev/null
+++ b/changelogs/fragments/dwim_is_role_fix_task_relative.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fix using the current task's directory for looking up relative paths within roles (https://github.com/ansible/ansible/issues/82695).
diff --git a/changelogs/fragments/empty_log_path.yml b/changelogs/fragments/empty_log_path.yml
new file mode 100644
index 00000000000..c8e5022bbb5
--- /dev/null
+++ b/changelogs/fragments/empty_log_path.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - display - warn user about empty log filepath (https://github.com/ansible/ansible/issues/79959).
diff --git a/changelogs/fragments/end_role.yml b/changelogs/fragments/end_role.yml
new file mode 100644
index 00000000000..702199207de
--- /dev/null
+++ b/changelogs/fragments/end_role.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Add a new meta task ``end_role`` (https://github.com/ansible/ansible/issues/22286)
diff --git a/changelogs/fragments/extras_fix.yml b/changelogs/fragments/extras_fix.yml
new file mode 100644
index 00000000000..9d8e24594f4
--- /dev/null
+++ b/changelogs/fragments/extras_fix.yml
@@ -0,0 +1,4 @@
+bugfixes:
+ - connection plugins using the 'extras' option feature would need variables to match the plugin's loaded name,
+ sometimes requiring fqcn, which is not the same as the documented/declared/expected variables.
+ Now we fall back to the 'basename' of the fqcn, but plugin authors can still set the expected value directly.
diff --git a/changelogs/fragments/fetch-filename.yml b/changelogs/fragments/fetch-filename.yml
new file mode 100644
index 00000000000..f921f346a59
--- /dev/null
+++ b/changelogs/fragments/fetch-filename.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - psrp - Fix bug when attempting to fetch a file path that contains special glob characters like ``[]``
+ - ssh - Fix bug when attempting to fetch a file path with characters that should be quoted when using the ``piped`` transfer method
diff --git a/changelogs/fragments/file_hardlink.yml b/changelogs/fragments/file_hardlink.yml
new file mode 100644
index 00000000000..26c5eeaf7c6
--- /dev/null
+++ b/changelogs/fragments/file_hardlink.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - file - retrieve the link's full path when hard linking a soft link with follow (https://github.com/ansible/ansible/issues/33911).
diff --git a/changelogs/fragments/fix-inconsistent-csvfile-missing-search-error.yml b/changelogs/fragments/fix-inconsistent-csvfile-missing-search-error.yml
new file mode 100644
index 00000000000..9d0dcf935c6
--- /dev/null
+++ b/changelogs/fragments/fix-inconsistent-csvfile-missing-search-error.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - csvfile lookup - give an error when no search term is provided using modern config syntax (https://github.com/ansible/ansible/issues/83689).
diff --git a/changelogs/fragments/fix-module-defaults-deprecations.yml b/changelogs/fragments/fix-module-defaults-deprecations.yml
new file mode 100644
index 00000000000..e0242aae3ca
--- /dev/null
+++ b/changelogs/fragments/fix-module-defaults-deprecations.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - module_defaults - do not display action/module deprecation warnings when using an action_group that contains a deprecated plugin (https://github.com/ansible/ansible/issues/83490).
diff --git a/changelogs/fragments/fix_floating_ints.yml b/changelogs/fragments/fix_floating_ints.yml
new file mode 100644
index 00000000000..7a8df434a07
--- /dev/null
+++ b/changelogs/fragments/fix_floating_ints.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Avoid truncating floats when casting into int, as it can lead to truncation and unexpected results. 0.99999 will be 0, not 1.
diff --git a/changelogs/fragments/fix_log_verbosity.yml b/changelogs/fragments/fix_log_verbosity.yml
new file mode 100644
index 00000000000..72ccb3a5528
--- /dev/null
+++ b/changelogs/fragments/fix_log_verbosity.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - ensure we have logger before we log when we have increased verbosity.
diff --git a/changelogs/fragments/freebsd_disk_regex.yml b/changelogs/fragments/freebsd_disk_regex.yml
new file mode 100644
index 00000000000..74930f212c9
--- /dev/null
+++ b/changelogs/fragments/freebsd_disk_regex.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - freebsd - update disk and slices regex for fact gathering (https://github.com/ansible/ansible/pull/82081).
diff --git a/changelogs/fragments/freebsd_facts_refactor.yml b/changelogs/fragments/freebsd_facts_refactor.yml
new file mode 100644
index 00000000000..f0f01354f76
--- /dev/null
+++ b/changelogs/fragments/freebsd_facts_refactor.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - freebsd - refactor dmidecode fact gathering code for simplicity.
diff --git a/changelogs/fragments/freebsd_service.yml b/changelogs/fragments/freebsd_service.yml
new file mode 100644
index 00000000000..dcf5f8cc005
--- /dev/null
+++ b/changelogs/fragments/freebsd_service.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - service - fix order of CLI arguments on FreeBSD (https://github.com/ansible/ansible/pull/81377).
diff --git a/changelogs/fragments/galaxy-reauth-error-handling.yml b/changelogs/fragments/galaxy-reauth-error-handling.yml
new file mode 100644
index 00000000000..35c169b8e0b
--- /dev/null
+++ b/changelogs/fragments/galaxy-reauth-error-handling.yml
@@ -0,0 +1,2 @@
+minor_changes:
+- ansible-galaxy - Handle authentication errors and token expiration
diff --git a/changelogs/fragments/gather-s390-sysinfo.yml b/changelogs/fragments/gather-s390-sysinfo.yml
new file mode 100644
index 00000000000..7a9a60d0ff8
--- /dev/null
+++ b/changelogs/fragments/gather-s390-sysinfo.yml
@@ -0,0 +1,2 @@
+minor_changes:
+- fact gathering - Gather /proc/sysinfo facts on s390 Linux on Z
diff --git a/changelogs/fragments/gather_facts_single.yml b/changelogs/fragments/gather_facts_single.yml
new file mode 100644
index 00000000000..65e4f57193d
--- /dev/null
+++ b/changelogs/fragments/gather_facts_single.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - setup module (fact gathering), added fallbcak code path to handle mount fact gathering in linux when threading is not available
diff --git a/changelogs/fragments/getoffmylawn.yml b/changelogs/fragments/getoffmylawn.yml
new file mode 100644
index 00000000000..1cc805c1798
--- /dev/null
+++ b/changelogs/fragments/getoffmylawn.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - plugins, deprecations and warnings concerning configuration are now displayed to the user, technical issue that prevented 'de-duplication' have been resolved.
diff --git a/changelogs/fragments/hostvars_fix.yml b/changelogs/fragments/hostvars_fix.yml
new file mode 100644
index 00000000000..b9b3c33f5f8
--- /dev/null
+++ b/changelogs/fragments/hostvars_fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - templating hostvars under native jinja will not cause serialization errors anymore.
diff --git a/changelogs/fragments/known_hosts_module_args.yml b/changelogs/fragments/known_hosts_module_args.yml
new file mode 100644
index 00000000000..24998856293
--- /dev/null
+++ b/changelogs/fragments/known_hosts_module_args.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - known_hosts - the returned module invocation now accurately reflects the module arguments.
diff --git a/changelogs/fragments/language.yml b/changelogs/fragments/language.yml
new file mode 100644
index 00000000000..8e9569f491a
--- /dev/null
+++ b/changelogs/fragments/language.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - Set LANGUAGE environment variable is set to a non-English locale (https://github.com/ansible/ansible/issues/83608).
diff --git a/changelogs/fragments/linear_started_name.yml b/changelogs/fragments/linear_started_name.yml
new file mode 100644
index 00000000000..e620680bb6d
--- /dev/null
+++ b/changelogs/fragments/linear_started_name.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - linear strategy now provides a properly templated task name to the v2_runner_on_started callback event.
diff --git a/changelogs/fragments/linux_network_get.yml b/changelogs/fragments/linux_network_get.yml
new file mode 100644
index 00000000000..01af77621d3
--- /dev/null
+++ b/changelogs/fragments/linux_network_get.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - linux - remove extraneous get_bin_path API call.
diff --git a/changelogs/fragments/local_facts_d.yml b/changelogs/fragments/local_facts_d.yml
new file mode 100644
index 00000000000..884abc74ba3
--- /dev/null
+++ b/changelogs/fragments/local_facts_d.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - local - handle error while parsing values in ini files (https://github.com/ansible/ansible/issues/82717).
diff --git a/changelogs/fragments/mask_me.yml b/changelogs/fragments/mask_me.yml
new file mode 100644
index 00000000000..57aac99aa9a
--- /dev/null
+++ b/changelogs/fragments/mask_me.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - systemd_service - handle mask operation failure (https://github.com/ansible/ansible/issues/81649).
diff --git a/changelogs/fragments/mc_fix.yml b/changelogs/fragments/mc_fix.yml
new file mode 100644
index 00000000000..efe7642600e
--- /dev/null
+++ b/changelogs/fragments/mc_fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - config, restored the ability to set module compression via a variable
diff --git a/changelogs/fragments/package_facts_aliases.yml b/changelogs/fragments/package_facts_aliases.yml
new file mode 100644
index 00000000000..9e408ff6eba
--- /dev/null
+++ b/changelogs/fragments/package_facts_aliases.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - package_facts module now supports using aliases for supported package managers, for example managers=yum or managers=dnf will resolve to using the underlying rpm.
diff --git a/changelogs/fragments/package_facts_warnings.yml b/changelogs/fragments/package_facts_warnings.yml
new file mode 100644
index 00000000000..0edb03f052e
--- /dev/null
+++ b/changelogs/fragments/package_facts_warnings.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - package_facts - returns the correct warning when package listing fails.
+ - package_facts - no longer fails silently when the selected package manager is unable to list packages.
diff --git a/changelogs/fragments/powershell-clixml.yml b/changelogs/fragments/powershell-clixml.yml
new file mode 100644
index 00000000000..3da3222d754
--- /dev/null
+++ b/changelogs/fragments/powershell-clixml.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - powershell - Improve CLIXML decoding to decode all control characters and
+ unicode characters that are encoded as surrogate pairs.
diff --git a/changelogs/fragments/psrp-extras.yml b/changelogs/fragments/psrp-extras.yml
new file mode 100644
index 00000000000..ec114c35bff
--- /dev/null
+++ b/changelogs/fragments/psrp-extras.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - psrp - Remove connection plugin extras vars lookup. This should have no affect on existing users as all options
+ have been documented.
diff --git a/changelogs/fragments/psrp-version-req.yml b/changelogs/fragments/psrp-version-req.yml
new file mode 100644
index 00000000000..1a1ccf0661d
--- /dev/null
+++ b/changelogs/fragments/psrp-version-req.yml
@@ -0,0 +1,2 @@
+bugfixes:
+- Add a version ceiling constraint for pypsrp to avoid potential breaking changes in the 1.0.0 release.
diff --git a/changelogs/fragments/python-3.13.yml b/changelogs/fragments/python-3.13.yml
new file mode 100644
index 00000000000..43a8021a921
--- /dev/null
+++ b/changelogs/fragments/python-3.13.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - ansible-test - Add support for Python 3.13.
+ - Add ``python3.13`` to the default ``INTERPRETER_PYTHON_FALLBACK`` list.
diff --git a/changelogs/fragments/raw_clean_msg.yml b/changelogs/fragments/raw_clean_msg.yml
new file mode 100644
index 00000000000..ebae8dd4523
--- /dev/null
+++ b/changelogs/fragments/raw_clean_msg.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - the raw arguments error now just displays the short names of modules instead of every possible variation
diff --git a/changelogs/fragments/remove-deprecated-get_delegated_vars.yml b/changelogs/fragments/remove-deprecated-get_delegated_vars.yml
new file mode 100644
index 00000000000..8cc0659ee57
--- /dev/null
+++ b/changelogs/fragments/remove-deprecated-get_delegated_vars.yml
@@ -0,0 +1,2 @@
+removed_features:
+ - Remove deprecated `VariableManager._get_delegated_vars` method (https://github.com/ansible/ansible/issues/82950)
diff --git a/changelogs/fragments/remove-python3.10-controller-support.yml b/changelogs/fragments/remove-python3.10-controller-support.yml
new file mode 100644
index 00000000000..2196392201d
--- /dev/null
+++ b/changelogs/fragments/remove-python3.10-controller-support.yml
@@ -0,0 +1,2 @@
+removed_features:
+ - Removed Python 3.10 as a supported version on the controller. Python 3.11 or newer is required.
diff --git a/changelogs/fragments/replace_regex.yml b/changelogs/fragments/replace_regex.yml
new file mode 100644
index 00000000000..dccad8e3e9b
--- /dev/null
+++ b/changelogs/fragments/replace_regex.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - replace - Updated before/after example (https://github.com/ansible/ansible/issues/83390).
diff --git a/changelogs/fragments/secrets.yml b/changelogs/fragments/secrets.yml
new file mode 100644
index 00000000000..a07b0bb7346
--- /dev/null
+++ b/changelogs/fragments/secrets.yml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - replace random with secrets library.
diff --git a/changelogs/fragments/selinux_import.yml b/changelogs/fragments/selinux_import.yml
new file mode 100644
index 00000000000..881e41959ef
--- /dev/null
+++ b/changelogs/fragments/selinux_import.yml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - remove extraneous selinux import (https://github.com/ansible/ansible/issues/83657).
diff --git a/changelogs/fragments/service_facts_systemd_fix.yml b/changelogs/fragments/service_facts_systemd_fix.yml
new file mode 100644
index 00000000000..36064424389
--- /dev/null
+++ b/changelogs/fragments/service_facts_systemd_fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - service_facts will now detect failed services more accurately across systemd implementations.
diff --git a/changelogs/fragments/shell-environment.yml b/changelogs/fragments/shell-environment.yml
new file mode 100644
index 00000000000..5c49a5f85f5
--- /dev/null
+++ b/changelogs/fragments/shell-environment.yml
@@ -0,0 +1,4 @@
+minor_changes:
+ - Removed the shell ``environment`` config entry as this is already covered by the play/task directives documentation
+ and the value itself is not used in the shell plugins. This should remove any confusion around how people set the
+ environment for a task.
diff --git a/changelogs/fragments/ssh-windows.yml b/changelogs/fragments/ssh-windows.yml
new file mode 100644
index 00000000000..678f4b4603f
--- /dev/null
+++ b/changelogs/fragments/ssh-windows.yml
@@ -0,0 +1,13 @@
+breaking_changes:
+- >-
+ Stopped wrapping all commands sent over SSH on a Windows target with a
+ ``powershell.exe`` executable. This results in one less process being started
+ on each command for Windows to improve efficiency, simplify the code, and
+ make ``raw`` an actual raw command run with the default shell configured on
+ the Windows sshd settings. This should have no affect on most tasks except
+ for ``raw`` which now is not guaranteed to always be running in a PowerShell
+ shell and from having the console output codepage set to UTF-8. To avoid this
+ issue either swap to using ``ansible.windows.win_command``,
+ ``ansible.windows.win_shell``, ``ansible.windows.win_powershell`` or manually
+ wrap the raw command with the shell commands needed to set the output console
+ encoding.
diff --git a/changelogs/fragments/suppress-paramiko-warnings.yml b/changelogs/fragments/suppress-paramiko-warnings.yml
new file mode 100644
index 00000000000..15c726cb366
--- /dev/null
+++ b/changelogs/fragments/suppress-paramiko-warnings.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - Suppress cryptography deprecation warnings for Blowfish and TripleDES when the ``paramiko`` Python module is installed.
diff --git a/changelogs/fragments/sysctl_fact_fix.yml b/changelogs/fragments/sysctl_fact_fix.yml
new file mode 100644
index 00000000000..55f51b91428
--- /dev/null
+++ b/changelogs/fragments/sysctl_fact_fix.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - setup/gather_facts will skip missing ``sysctl`` instead of being a fatal error (https://github.com/ansible/ansible/pull/81297).
diff --git a/changelogs/fragments/systemd_facts.yml b/changelogs/fragments/systemd_facts.yml
new file mode 100644
index 00000000000..2015e64babc
--- /dev/null
+++ b/changelogs/fragments/systemd_facts.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - systemd facts - handle AttributeError raised while gathering facts on non-systemd hosts.
diff --git a/changelogs/fragments/timedout_test.yml b/changelogs/fragments/timedout_test.yml
new file mode 100644
index 00000000000..7784b691da5
--- /dev/null
+++ b/changelogs/fragments/timedout_test.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - timedout test for checking if a task result represents a 'timed out' task.
diff --git a/changelogs/fragments/timeout_show_frame.yml b/changelogs/fragments/timeout_show_frame.yml
new file mode 100644
index 00000000000..fcdb61beef2
--- /dev/null
+++ b/changelogs/fragments/timeout_show_frame.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - task timeout now returns timedout key with frame/code that was in execution when the timeout is triggered.
diff --git a/changelogs/fragments/unarchive_differs.yml b/changelogs/fragments/unarchive_differs.yml
new file mode 100644
index 00000000000..c95af7e2573
--- /dev/null
+++ b/changelogs/fragments/unarchive_differs.yml
@@ -0,0 +1,4 @@
+---
+bugfixes:
+ - unarchive - trigger change when size and content differ when other
+ properties are unchanged (https://github.com/ansible/ansible/pull/83454).
diff --git a/changelogs/fragments/unarchive_timestamp.yml b/changelogs/fragments/unarchive_timestamp.yml
new file mode 100644
index 00000000000..a945b9c41d6
--- /dev/null
+++ b/changelogs/fragments/unarchive_timestamp.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - unarchive - Better handling of files with an invalid timestamp in zip file (https://github.com/ansible/ansible/issues/81092).
diff --git a/changelogs/fragments/uri_follow_redirect.yml b/changelogs/fragments/uri_follow_redirect.yml
new file mode 100644
index 00000000000..1df21a486cb
--- /dev/null
+++ b/changelogs/fragments/uri_follow_redirect.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - uri - deprecate 'yes' and 'no' value for 'follow_redirects' parameter.
diff --git a/changelogs/fragments/vaulted_file_and_fixes.yml b/changelogs/fragments/vaulted_file_and_fixes.yml
new file mode 100644
index 00000000000..fbb85ec5471
--- /dev/null
+++ b/changelogs/fragments/vaulted_file_and_fixes.yml
@@ -0,0 +1,5 @@
+bugfixes:
+ - vault.is_encrypted_file is now optimized to be called in runtime and not for being called in tests
+ - vault_encrypted test documentation, name and examples have been fixed, other parts were clarified
+minor_changes:
+ - vaulted_file test filter added, to test if the provided path is an 'Ansible vaulted' file
diff --git a/changelogs/fragments/winrm-quota.yml b/changelogs/fragments/winrm-quota.yml
new file mode 100644
index 00000000000..2a84f3315dc
--- /dev/null
+++ b/changelogs/fragments/winrm-quota.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - winrm - Add retry after exceeding commands per user quota that can occur in loops and action plugins running
+ multiple commands.
diff --git a/changelogs/fragments/yum_repository.yml b/changelogs/fragments/yum_repository.yml
new file mode 100644
index 00000000000..508760614dc
--- /dev/null
+++ b/changelogs/fragments/yum_repository.yml
@@ -0,0 +1,9 @@
+deprecated_features:
+ - yum_repository - deprecate ``async`` option as it has been removed in RHEL 8 and will be removed in ansible-core 2.22.
+ - >-
+ yum_repository - the following options are deprecated: ``deltarpm_metadata_percentage``, ``gpgcakey``, ``http_caching``,
+ ``keepalive``, ``metadata_expire_filter``, ``mirrorlist_expire``, ``protect``, ``ssl_check_cert_permissions``,
+ ``ui_repoid_vars`` as they have no effect for dnf as an underlying package manager.
+ The options will be removed in ansible-core 2.22.
+minor_changes:
+ - yum_repository - add ``excludepkgs`` alias to the ``exclude`` option.
diff --git a/hacking/README.md b/hacking/README.md
index 51f17202ed5..534a7e4db0e 100644
--- a/hacking/README.md
+++ b/hacking/README.md
@@ -5,7 +5,7 @@ env-setup
---------
The 'env-setup' script modifies your environment to allow you to run
-ansible from a git checkout using python >= 3.10.
+ansible from a git checkout using python >= 3.11.
First, set up your environment to run from the checkout:
@@ -18,7 +18,7 @@ and do not wish to install them from your operating system package manager, you
can install them from pip
```shell
-easy_install pip # if pip is not already available
+python -Im ensurepip # if pip is not already available
pip install -r requirements.txt
```
diff --git a/hacking/backport/README.md b/hacking/backport/README.md
index 3fb212b33e0..ce7112b22f9 100644
--- a/hacking/backport/README.md
+++ b/hacking/backport/README.md
@@ -4,7 +4,7 @@ This directory contains scripts useful for dealing with and maintaining
backports. Scripts in it depend on pygithub, and expect a valid environment
variable called `GITHUB_TOKEN`.
-To generate a Github token, go to https://github.com/settings/tokens/new
+To generate a Github token, go to
## `backport_of_line_adder.py`
diff --git a/hacking/env-setup b/hacking/env-setup
index 0a86e0fe4fb..df1ea4020f2 100644
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -57,22 +57,6 @@ expr "$PYTHONPATH" : "${ANSIBLE_TEST_PREFIX_PYTHONPATH}.*" > /dev/null || prepen
expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || prepend_path PATH "$PREFIX_PATH"
expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || prepend_path MANPATH "$PREFIX_MANPATH"
-#
-# Generate egg_info so that pkg_resources works
-#
-
-# Do the work in a function so we don't repeat ourselves later
-gen_egg_info()
-{
- # check for current and past egg-info directory names
- if ls "$PREFIX_PYTHONPATH"/ansible*.egg-info >/dev/null 2>&1; then
- # bypass shell aliases with leading backslash
- # see https://github.com/ansible/ansible/pull/11967
- \rm -rf "$PREFIX_PYTHONPATH"/ansible*.egg-info
- fi
- "$PYTHON_BIN" setup.py egg_info
-}
-
if [ "$ANSIBLE_DEV_HOME" != "$PWD" ] ; then
current_dir="$PWD"
else
@@ -81,10 +65,8 @@ fi
(
cd "$ANSIBLE_DEV_HOME"
if [ "$verbosity" = silent ] ; then
- gen_egg_info > /dev/null 2>&1 &
find . -type f -name "*.pyc" -exec rm -f {} \; > /dev/null 2>&1
else
- gen_egg_info
find . -type f -name "*.pyc" -exec rm -f {} \;
fi
cd "$current_dir"
diff --git a/hacking/env-setup.fish b/hacking/env-setup.fish
index 529b57333ef..ee945ec1452 100644
--- a/hacking/env-setup.fish
+++ b/hacking/env-setup.fish
@@ -64,25 +64,11 @@ if not set -q PYTHON_BIN
end
end
-# Generate egg_info so that pkg_resources works
-function gen_egg_info
- # Check if ansible*.egg-info directory exists and remove if found
- if test -d $PREFIX_PYTHONPATH/ansible*.egg-info
- rm -rf $PREFIX_PYTHONPATH/ansible*.egg-info
- end
- # Execute setup.py egg_info using the chosen Python interpreter
- eval $PYTHON_BIN setup.py egg_info
-end
-
pushd $ANSIBLE_HOME
if test -n "$QUIET"
- # Run gen_egg_info in the background and redirect output to /dev/null
- gen_egg_info &> /dev/null
# Remove any .pyc files found
find . -type f -name "*.pyc" -exec rm -f '{}' ';' &> /dev/null
else
- # Run gen_egg_info
- gen_egg_info
# Remove any .pyc files found
find . -type f -name "*.pyc" -exec rm -f '{}' ';'
# Display setup details
diff --git a/hacking/ticket_stubs/bug_internal_api.md b/hacking/ticket_stubs/bug_internal_api.md
index 76a3bb085ca..89162558ca0 100644
--- a/hacking/ticket_stubs/bug_internal_api.md
+++ b/hacking/ticket_stubs/bug_internal_api.md
@@ -13,11 +13,11 @@ but this does not seem to match that case.
If you really need a stable API target to use Ansible, consider using ansible-runner:
-* https://github.com/ansible/ansible-runner
+*
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/bug_wrong_repo.md b/hacking/ticket_stubs/bug_wrong_repo.md
index b711e85f3ae..ed115232a20 100644
--- a/hacking/ticket_stubs/bug_wrong_repo.md
+++ b/hacking/ticket_stubs/bug_wrong_repo.md
@@ -8,29 +8,28 @@ This appears to be something that should be filed against another project or bug
<< CHOOSE AS APPROPRIATE >>
-* https://github.com/ansible-community/ansible-lint
-* https://github.com/ansible/ansible-runner
-* https://github.com/ansible/ansible-navigator
-* https://github.com/ansible-community/antsibull
-* https://github.com/ansible-community/ara
-* https://github.com/ansible/awx
-* https://github.com/ansible-collections/community.general
-* https://github.com/ansible-community/molecule
-* For AAP or Tower licensees report issues via your Red Hat representative or https://issues.redhat.com
+*
+*
+*
+*
+*
+*
+*
+*
+* For AAP Customer issues please see
If you can stop by the tracker or forum for one of those projects, we'd appreciate it.
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
Should you still wish to discuss things further, or if you disagree with our thought process, please stop by one of our two mailing lists:
-* https://groups.google.com/forum/#!forum/ansible-devel
+* [ansible-core on the Ansible Forum](https://forum.ansible.com/tag/ansible-core)
* Matrix: [#devel:ansible.im](https://matrix.to/#/#devel:ansible.im)
-* IRC: #ansible-devel on [irc.libera.chat](https://libera.chat/)
We'd be happy to discuss things.
See this page for a complete list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again!
diff --git a/hacking/ticket_stubs/collections.md b/hacking/ticket_stubs/collections.md
index eecd8151f15..3698ea14bd9 100644
--- a/hacking/ticket_stubs/collections.md
+++ b/hacking/ticket_stubs/collections.md
@@ -2,15 +2,16 @@ Hi!
Thank you very much for your submission to Ansible. It means a lot to us that you've taken the time to contribute.
-Since Ansible 2.10 we are no longer accepting new modules/plugins into Ansible core. However, we recommend looking into providing this functionality through Ansible Galaxy via Ansible Collections. You can find more information about collections at:
+Since Ansible 2.10 we are no longer accepting new modules/plugins into Ansible core.
+However, we recommend looking into providing this functionality through Ansible Galaxy via Ansible Collections. You can find more information about collections at:
-* https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html.
+* .
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
The mailing list and irc are great ways to ask questions, or post if you don't think this particular issue is resolved.
See this page for a complete and up to date list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md b/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
index 3f4de70d1ff..708eedc53d1 100644
--- a/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
+++ b/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
@@ -9,13 +9,13 @@ Assuming that you wanted to create actual contribution, I think that
you may want to learn and read through the following articles I've
gathered for you:
-• https://opensource.guide/how-to-contribute/
-• https://docs.ansible.com/ansible/devel/community/
+•
+•
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
- https://docs.ansible.com/ansible/latest/community/communication.html
+
Have a nice day!
diff --git a/hacking/ticket_stubs/no_thanks.md b/hacking/ticket_stubs/no_thanks.md
index 2e2143fe619..8c32b6bc4f9 100644
--- a/hacking/ticket_stubs/no_thanks.md
+++ b/hacking/ticket_stubs/no_thanks.md
@@ -11,8 +11,9 @@ However, we're absolutely always up for discussion.
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
-In the future, sometimes starting a discussion on the development list prior to implementing a feature can make getting things included a little easier, but it's not always necessary.
+In the future, sometimes starting a discussion on the development list prior to implementing
+a feature can make getting things included a little easier, but it's not always necessary.
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/pr_duplicate.md b/hacking/ticket_stubs/pr_duplicate.md
index 01a2a72809f..080e4e4abf1 100644
--- a/hacking/ticket_stubs/pr_duplicate.md
+++ b/hacking/ticket_stubs/pr_duplicate.md
@@ -15,6 +15,6 @@ In the future, sometimes starting a discussion on the development list prior to
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/pr_merged.md b/hacking/ticket_stubs/pr_merged.md
index 0183ee90630..5d354e3586f 100644
--- a/hacking/ticket_stubs/pr_merged.md
+++ b/hacking/ticket_stubs/pr_merged.md
@@ -1,7 +1,7 @@
Hi!
This has been merged in, and will also be included in the next major release.
-For more info on our process see https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html#ansible-core-workflow
+For more info on our process see
If you or anyone else has any further questions, please let us know by stopping by one of the mailing lists or chat channels, as appropriate.
@@ -10,6 +10,6 @@ The mailing list and irc are great ways to ask questions, or post if you don't t
See this page for a complete and up to date list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you!
diff --git a/hacking/ticket_stubs/proposal.md b/hacking/ticket_stubs/proposal.md
index 25d4cb403fe..2d8182f12be 100644
--- a/hacking/ticket_stubs/proposal.md
+++ b/hacking/ticket_stubs/proposal.md
@@ -3,16 +3,15 @@ Hi!
Ansible has a Proposal process for large feature ideas or changes in current design and functionality, such as this.
If you are still interested in seeing this new feature get into Ansible, please submit a proposal for it using this process.
-https://github.com/ansible/proposals/blob/master/proposals_process_proposal.md
+
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-The mailing list and irc are great ways to ask questions, or post if you don't think this particular issue is resolved.
+The Forum is the best ways to ask questions, or post if you don't think this particular issue is resolved.
-* #ansible-devel on [irc.libera.chat](https://libera.chat/)
-* https://groups.google.com/forum/#!forum/ansible-devel
+*
Or check this page for a more complete list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you!
diff --git a/hacking/ticket_stubs/question_not_bug.md b/hacking/ticket_stubs/question_not_bug.md
index f4b143fbb60..dab0d2edba1 100644
--- a/hacking/ticket_stubs/question_not_bug.md
+++ b/hacking/ticket_stubs/question_not_bug.md
@@ -2,14 +2,13 @@ Hi!
Thanks very much for your interest in Ansible. It means a lot to us.
-This appears to be a user question, and we'd like to direct these kinds of things to either the mailing list or the IRC channel.
+This appears to be a user question, and we'd like to direct these topic to the Ansible Forum.
-* IRC: #ansible on [irc.libera.chat](https://libera.chat/)
-* mailing list: https://groups.google.com/forum/#!forum/ansible-project
+* [Ansible Forum](https://forum.ansible.com)
-See this page for a complete and up to date list of communication channels and their purposes:
+See this page for a complete and up to date list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If don't you think this particular issue is resolved, you should still stop by there first, we'd appreciate it.
diff --git a/hacking/ticket_stubs/resolved.md b/hacking/ticket_stubs/resolved.md
index 8eedbcfc156..f040d6d05a4 100644
--- a/hacking/ticket_stubs/resolved.md
+++ b/hacking/ticket_stubs/resolved.md
@@ -11,6 +11,6 @@ The mailing list and irc are great ways to ask questions, or post if you don't t
See this page for a complete list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you!
diff --git a/hacking/ticket_stubs/wider_discussion.md b/hacking/ticket_stubs/wider_discussion.md
index 74585816fc7..3ab9073f443 100644
--- a/hacking/ticket_stubs/wider_discussion.md
+++ b/hacking/ticket_stubs/wider_discussion.md
@@ -8,14 +8,13 @@ Reasons for this include:
* INSERT REASONS!
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-Can you please post on ansible-development list so we can talk about this idea with the wider group?
+Can you please post Ansible Forum so we can talk about this idea with the wider group?
-* https://groups.google.com/forum/#!forum/ansible-devel
+* [Ansible Core on the Ansible Forum](https://forum.ansible.com/tag/ansible-core)
* Matrix: [#devel:ansible.im](https://matrix.to/#/#devel:ansible.im)
-* #ansible-devel on [irc.libera.chat](https://libera.chat/)
For other alternatives, check this page for a more complete list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/update-sanity-requirements.py b/hacking/update-sanity-requirements.py
index 997d6dbf87a..aaaa803cde8 100755
--- a/hacking/update-sanity-requirements.py
+++ b/hacking/update-sanity-requirements.py
@@ -52,7 +52,6 @@ class SanityTest:
if pip_freeze.stdout:
raise Exception(f'Initial virtual environment is not empty:\n{pip_freeze.stdout}')
- subprocess.run(pip + ['install', 'wheel'], env=env, check=True) # make bdist_wheel available during pip install
subprocess.run(pip + ['install', '-r', self.source_path], env=env, check=True)
freeze_options = ['--all']
diff --git a/lib/ansible/__main__.py b/lib/ansible/__main__.py
index cb7006285b4..afdd2849739 100644
--- a/lib/ansible/__main__.py
+++ b/lib/ansible/__main__.py
@@ -3,9 +3,6 @@
from __future__ import annotations
import argparse
-import importlib
-import os
-import sys
from importlib.metadata import distribution
@@ -19,22 +16,10 @@ def main():
ep_map = {_short_name(ep.name): ep for ep in dist.entry_points if ep.group == 'console_scripts'}
parser = argparse.ArgumentParser(prog='python -m ansible', add_help=False)
- parser.add_argument('entry_point', choices=list(ep_map) + ['test'])
+ parser.add_argument('entry_point', choices=list(ep_map))
args, extra = parser.parse_known_args()
- if args.entry_point == 'test':
- ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- source_root = os.path.join(ansible_root, 'test', 'lib')
-
- if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', '__init__.py')):
- # running from source, use that version of ansible-test instead of any version that may already be installed
- sys.path.insert(0, source_root)
-
- module = importlib.import_module('ansible_test._util.target.cli.ansible_test_cli_stub')
- main = module.main
- else:
- main = ep_map[args.entry_point].load()
-
+ main = ep_map[args.entry_point].load()
main([args.entry_point] + extra)
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
index b8da2dbd50f..8b12aec17f4 100644
--- a/lib/ansible/cli/__init__.py
+++ b/lib/ansible/cli/__init__.py
@@ -11,9 +11,9 @@ import sys
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
-if sys.version_info < (3, 10):
+if sys.version_info < (3, 11):
raise SystemExit(
- 'ERROR: Ansible requires Python 3.10 or newer on the controller. '
+ 'ERROR: Ansible requires Python 3.11 or newer on the controller. '
'Current version: %s' % ''.join(sys.version.splitlines())
)
@@ -167,19 +167,7 @@ class CLI(ABC):
else:
display.v(u"No config file found; using defaults")
- # warn about deprecated config options
- for deprecated in C.config.DEPRECATED:
- name = deprecated[0]
- why = deprecated[1]['why']
- if 'alternatives' in deprecated[1]:
- alt = ', use %s instead' % deprecated[1]['alternatives']
- else:
- alt = ''
- ver = deprecated[1].get('version')
- date = deprecated[1].get('date')
- collection_name = deprecated[1].get('collection_name')
- display.deprecated("%s option, %s%s" % (name, why, alt),
- version=ver, date=date, collection_name=collection_name)
+ C.handle_config_noise(display)
@staticmethod
def split_vault_id(vault_id):
diff --git a/lib/ansible/cli/config.py b/lib/ansible/cli/config.py
index e7f240c80d4..da9231af74b 100755
--- a/lib/ansible/cli/config.py
+++ b/lib/ansible/cli/config.py
@@ -9,9 +9,10 @@ from __future__ import annotations
from ansible.cli import CLI
import os
-import yaml
import shlex
import subprocess
+import sys
+import yaml
from collections.abc import Mapping
@@ -21,7 +22,7 @@ import ansible.plugins.loader as plugin_loader
from ansible import constants as C
from ansible.cli.arguments import option_helpers as opt_help
from ansible.config.manager import ConfigManager, Setting
-from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleRequiredOptionError
from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
from ansible.module_utils.common.json import json_dump
from ansible.module_utils.six import string_types
@@ -34,6 +35,9 @@ from ansible.utils.path import unfrackpath
display = Display()
+_IGNORE_CHANGED = frozenset({'_terms', '_input'})
+
+
def yaml_dump(data, default_flow_style=False, default_style=None):
return yaml.dump(data, Dumper=AnsibleDumper, default_flow_style=default_flow_style, default_style=default_style)
@@ -49,6 +53,37 @@ def get_constants():
return get_constants.cvars
+def _ansible_env_vars(varname):
+ ''' return true or false depending if variable name is possibly a 'configurable' ansible env variable '''
+ return all(
+ [
+ varname.startswith("ANSIBLE_"),
+ not varname.startswith(("ANSIBLE_TEST_", "ANSIBLE_LINT_")),
+ varname not in ("ANSIBLE_CONFIG", "ANSIBLE_DEV_HOME"),
+ ]
+ )
+
+
+def _get_evar_list(settings):
+ data = []
+ for setting in settings:
+ if 'env' in settings[setting] and settings[setting]['env']:
+ for varname in settings[setting]['env']:
+ data.append(varname.get('name'))
+ return data
+
+
+def _get_ini_entries(settings):
+ data = {}
+ for setting in settings:
+ if 'ini' in settings[setting] and settings[setting]['ini']:
+ for kv in settings[setting]['ini']:
+ if not kv['section'] in data:
+ data[kv['section']] = set()
+ data[kv['section']].add(kv['key'])
+ return data
+
+
class ConfigCLI(CLI):
""" Config command line class """
@@ -99,9 +134,13 @@ class ConfigCLI(CLI):
init_parser.add_argument('--disabled', dest='commented', action='store_true', default=False,
help='Prefixes all entries with a comment character to disable them')
- # search_parser = subparsers.add_parser('find', help='Search configuration')
- # search_parser.set_defaults(func=self.execute_search)
- # search_parser.add_argument('args', help='Search term', metavar='')
+ validate_parser = subparsers.add_parser('validate',
+ help='Validate the configuration file and environment variables. '
+ 'By default it only checks the base settings without accounting for plugins (see -t).',
+ parents=[common])
+ validate_parser.set_defaults(func=self.execute_validate)
+ validate_parser.add_argument('--format', '-f', dest='format', action='store', choices=['ini', 'env'] , default='ini',
+ help='Output format for init')
def post_process_args(self, options):
options = super(ConfigCLI, self).post_process_args(options)
@@ -113,6 +152,10 @@ class ConfigCLI(CLI):
super(ConfigCLI, self).run()
+ # initialize each galaxy server's options from known listed servers
+ self._galaxy_servers = [s for s in C.GALAXY_SERVER_LIST or [] if s] # clean list, reused later here
+ C.config.load_galaxy_server_defs(self._galaxy_servers)
+
if context.CLIARGS['config_file']:
self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False)
b_config = to_bytes(self.config_file)
@@ -226,11 +269,17 @@ class ConfigCLI(CLI):
'''
build a dict with the list requested configs
'''
+
config_entries = {}
if context.CLIARGS['type'] in ('base', 'all'):
# this dumps main/common configs
config_entries = self.config.get_configuration_definitions(ignore_private=True)
+ # for base and all, we include galaxy servers
+ config_entries['GALAXY_SERVERS'] = {}
+ for server in self._galaxy_servers:
+ config_entries['GALAXY_SERVERS'][server] = self.config.get_configuration_definitions('galaxy_server', server)
+
if context.CLIARGS['type'] != 'base':
config_entries['PLUGINS'] = {}
@@ -239,6 +288,7 @@ class ConfigCLI(CLI):
for ptype in C.CONFIGURABLE_PLUGINS:
config_entries['PLUGINS'][ptype.upper()] = self._list_plugin_settings(ptype)
elif context.CLIARGS['type'] != 'base':
+ # only for requested types
config_entries['PLUGINS'][context.CLIARGS['type']] = self._list_plugin_settings(context.CLIARGS['type'], context.CLIARGS['args'])
return config_entries
@@ -358,7 +408,7 @@ class ConfigCLI(CLI):
elif default is None:
default = ''
- if context.CLIARGS['commented']:
+ if context.CLIARGS.get('commented', False):
entry['key'] = ';%s' % entry['key']
key = desc + '\n%s=%s' % (entry['key'], default)
@@ -408,13 +458,13 @@ class ConfigCLI(CLI):
entries = []
for setting in sorted(config):
- changed = (config[setting].origin not in ('default', 'REQUIRED'))
+ changed = (config[setting].origin not in ('default', 'REQUIRED') and setting not in _IGNORE_CHANGED)
if context.CLIARGS['format'] == 'display':
if isinstance(config[setting], Setting):
# proceed normally
value = config[setting].value
- if config[setting].origin == 'default':
+ if config[setting].origin == 'default' or setting in _IGNORE_CHANGED:
color = 'green'
value = self.config.template_default(value, get_constants())
elif config[setting].origin == 'REQUIRED':
@@ -431,6 +481,8 @@ class ConfigCLI(CLI):
else:
entry = {}
for key in config[setting]._fields:
+ if key == 'type':
+ continue
entry[key] = getattr(config[setting], key)
if not context.CLIARGS['only_changed'] or changed:
@@ -439,7 +491,10 @@ class ConfigCLI(CLI):
return entries
def _get_global_configs(self):
- config = self.config.get_configuration_definitions(ignore_private=True).copy()
+
+ # Add base
+ config = self.config.get_configuration_definitions(ignore_private=True)
+ # convert to settings
for setting in config.keys():
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, variables=get_constants())
config[setting] = Setting(setting, v, o, None)
@@ -451,7 +506,7 @@ class ConfigCLI(CLI):
# prep loading
loader = getattr(plugin_loader, '%s_loader' % ptype)
- # acumulators
+ # accumulators
output = []
config_entries = {}
@@ -468,7 +523,7 @@ class ConfigCLI(CLI):
plugin_cs = loader.all(class_only=True)
for plugin in plugin_cs:
- # in case of deprecastion they diverge
+ # in case of deprecation they diverge
finalname = name = plugin._load_name
if name.startswith('_'):
if os.path.islink(plugin._original_path):
@@ -491,12 +546,9 @@ class ConfigCLI(CLI):
for setting in config_entries[finalname].keys():
try:
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, plugin_type=ptype, plugin_name=name, variables=get_constants())
- except AnsibleError as e:
- if to_text(e).startswith('No setting was provided for required configuration'):
- v = None
- o = 'REQUIRED'
- else:
- raise e
+ except AnsibleRequiredOptionError:
+ v = None
+ o = 'REQUIRED'
if v is None and o is None:
# not all cases will be error
@@ -516,17 +568,60 @@ class ConfigCLI(CLI):
return output
+ def _get_galaxy_server_configs(self):
+
+ output = []
+ # add galaxy servers
+ for server in self._galaxy_servers:
+ server_config = {}
+ s_config = self.config.get_configuration_definitions('galaxy_server', server)
+ for setting in s_config.keys():
+ try:
+ v, o = C.config.get_config_value_and_origin(setting, plugin_type='galaxy_server', plugin_name=server, cfile=self.config_file)
+ except AnsibleError as e:
+ if s_config[setting].get('required', False):
+ v = None
+ o = 'REQUIRED'
+ else:
+ raise e
+ if v is None and o is None:
+ # not all cases will be error
+ o = 'REQUIRED'
+ server_config[setting] = Setting(setting, v, o, None)
+ if context.CLIARGS['format'] == 'display':
+ if not context.CLIARGS['only_changed'] or server_config:
+ equals = '=' * len(server)
+ output.append(f'\n{server}\n{equals}')
+ output.extend(self._render_settings(server_config))
+ else:
+ output.append({server: server_config})
+
+ return output
+
def execute_dump(self):
'''
Shows the current settings, merges ansible.cfg if specified
'''
- if context.CLIARGS['type'] == 'base':
- # deal with base
- output = self._get_global_configs()
- elif context.CLIARGS['type'] == 'all':
+ output = []
+ if context.CLIARGS['type'] in ('base', 'all'):
# deal with base
output = self._get_global_configs()
- # deal with plugins
+
+ # add galaxy servers
+ server_config_list = self._get_galaxy_server_configs()
+ if context.CLIARGS['format'] == 'display':
+ output.append('\nGALAXY_SERVERS:\n')
+ output.extend(server_config_list)
+ else:
+ configs = {}
+ for server_config in server_config_list:
+ server = list(server_config.keys())[0]
+ server_reduced_config = server_config.pop(server)
+ configs[server] = server_reduced_config
+ output.append({'GALAXY_SERVERS': configs})
+
+ if context.CLIARGS['type'] == 'all':
+ # add all plugins
for ptype in C.CONFIGURABLE_PLUGINS:
plugin_list = self._get_plugin_configs(ptype, context.CLIARGS['args'])
if context.CLIARGS['format'] == 'display':
@@ -539,8 +634,9 @@ class ConfigCLI(CLI):
else:
pname = '%s_PLUGINS' % ptype.upper()
output.append({pname: plugin_list})
- else:
- # deal with plugins
+
+ elif context.CLIARGS['type'] != 'base':
+ # deal with specific plugin
output = self._get_plugin_configs(context.CLIARGS['type'], context.CLIARGS['args'])
if context.CLIARGS['format'] == 'display':
@@ -552,6 +648,73 @@ class ConfigCLI(CLI):
self.pager(to_text(text, errors='surrogate_or_strict'))
+ def execute_validate(self):
+
+ found = False
+ config_entries = self._list_entries_from_args()
+ plugin_types = config_entries.pop('PLUGINS', None)
+ galaxy_servers = config_entries.pop('GALAXY_SERVERS', None)
+
+ if context.CLIARGS['format'] == 'ini':
+ if C.CONFIG_FILE is not None:
+ # validate ini config since it is found
+
+ sections = _get_ini_entries(config_entries)
+ # Also from plugins
+ if plugin_types:
+ for ptype in plugin_types:
+ for plugin in plugin_types[ptype].keys():
+ plugin_sections = _get_ini_entries(plugin_types[ptype][plugin])
+ for s in plugin_sections:
+ if s in sections:
+ sections[s].update(plugin_sections[s])
+ else:
+ sections[s] = plugin_sections[s]
+ if galaxy_servers:
+ for server in galaxy_servers:
+ server_sections = _get_ini_entries(galaxy_servers[server])
+ for s in server_sections:
+ if s in sections:
+ sections[s].update(server_sections[s])
+ else:
+ sections[s] = server_sections[s]
+ if sections:
+ p = C.config._parsers[C.CONFIG_FILE]
+ for s in p.sections():
+ # check for valid sections
+ if s not in sections:
+ display.error(f"Found unknown section '{s}' in '{C.CONFIG_FILE}.")
+ found = True
+ continue
+
+ # check keys in valid sections
+ for k in p.options(s):
+ if k not in sections[s]:
+ display.error(f"Found unknown key '{k}' in section '{s}' in '{C.CONFIG_FILE}.")
+ found = True
+
+ elif context.CLIARGS['format'] == 'env':
+ # validate any 'ANSIBLE_' env vars found
+ evars = [varname for varname in os.environ.keys() if _ansible_env_vars(varname)]
+ if evars:
+ data = _get_evar_list(config_entries)
+ if plugin_types:
+ for ptype in plugin_types:
+ for plugin in plugin_types[ptype].keys():
+ data.extend(_get_evar_list(plugin_types[ptype][plugin]))
+
+ for evar in evars:
+ if evar not in data:
+ display.error(f"Found unknown environment variable '{evar}'.")
+ found = True
+
+ # we found discrepancies!
+ if found:
+ sys.exit(1)
+
+ # allsgood
+ display.display("All configurations seem valid!")
+
def main(args=None):
ConfigCLI.cli_executor(args)
diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py
index 5805b97fce8..60f9cdd84a7 100755
--- a/lib/ansible/cli/console.py
+++ b/lib/ansible/cli/console.py
@@ -545,7 +545,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
if path:
module_loader.add_directory(path)
- # dynamically add 'cannonical' modules as commands, aliases coudld be used and dynamically loaded
+ # dynamically add 'canonical' modules as commands, aliases could be used and dynamically loaded
self.modules = self.list_modules()
for module in self.modules:
setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
index 16033f8e112..a6a73b50b7b 100755
--- a/lib/ansible/cli/doc.py
+++ b/lib/ansible/cli/doc.py
@@ -50,7 +50,7 @@ PB_OBJECTS = ['Play', 'Role', 'Block', 'Task', 'Handler']
PB_LOADED = {}
SNIPPETS = ['inventory', 'lookup', 'module']
-# harcoded from ascii values
+# hardcoded from ascii values
STYLE = {
'BLINK': '\033[5m',
'BOLD': '\033[1m',
@@ -71,8 +71,14 @@ NOCOLOR = {
'PLUGIN': r'[%s]',
}
-# TODO: make configurable
-ref_style = {'MODULE': 'yellow', 'REF': 'magenta', 'LINK': 'cyan', 'DEP': 'magenta', 'CONSTANT': 'dark gray', 'PLUGIN': 'yellow'}
+ref_style = {
+ 'MODULE': C.COLOR_DOC_MODULE,
+ 'REF': C.COLOR_DOC_REFERENCE,
+ 'LINK': C.COLOR_DOC_LINK,
+ 'DEP': C.COLOR_DOC_DEPRECATED,
+ 'CONSTANT': C.COLOR_DOC_CONSTANT,
+ 'PLUGIN': C.COLOR_DOC_PLUGIN,
+}
def jdump(text):
@@ -381,6 +387,12 @@ class RoleMixin(object):
for role, collection, role_path in (roles | collroles):
argspec = self._load_argspec(role, role_path, collection)
+ if 'error' in argspec:
+ if fail_on_errors:
+ raise argspec['exception']
+ else:
+ display.warning('Skipping role (%s) due to: %s' % (role, argspec['error']), True)
+ continue
fqcn, doc = self._build_doc(role, role_path, collection, argspec, entry_point)
if doc:
result[fqcn] = doc
@@ -881,6 +893,7 @@ class DocCLI(CLI, RoleMixin):
plugin_type = context.CLIARGS['type'].lower()
do_json = context.CLIARGS['json_format'] or context.CLIARGS['dump']
listing = context.CLIARGS['list_files'] or context.CLIARGS['list_dir']
+ no_fail = bool(not context.CLIARGS['no_fail_on_errors'])
if context.CLIARGS['list_files']:
content = 'files'
@@ -903,7 +916,6 @@ class DocCLI(CLI, RoleMixin):
docs['all'] = {}
for ptype in ptypes:
- no_fail = bool(not context.CLIARGS['no_fail_on_errors'])
if ptype == 'role':
roles = self._create_role_list(fail_on_errors=no_fail)
docs['all'][ptype] = self._create_role_doc(roles.keys(), context.CLIARGS['entry_point'], fail_on_errors=no_fail)
@@ -929,7 +941,7 @@ class DocCLI(CLI, RoleMixin):
if plugin_type == 'keyword':
docs = DocCLI._get_keywords_docs(context.CLIARGS['args'])
elif plugin_type == 'role':
- docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'])
+ docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'], fail_on_errors=no_fail)
else:
# display specific plugin docs
docs = self._get_plugins_docs(plugin_type, context.CLIARGS['args'])
@@ -1083,7 +1095,7 @@ class DocCLI(CLI, RoleMixin):
text = DocCLI.get_man_text(doc, collection_name, plugin_type)
except Exception as e:
display.vvv(traceback.format_exc())
- raise AnsibleError("Unable to retrieve documentation from '%s' due to: %s" % (plugin, to_native(e)), orig_exc=e)
+ raise AnsibleError("Unable to retrieve documentation from '%s'" % (plugin), orig_exc=e)
return text
@@ -1189,7 +1201,7 @@ class DocCLI(CLI, RoleMixin):
opt_leadin = "-"
key = "%s%s %s" % (base_indent, opt_leadin, _format(o, 'yellow'))
- # description is specifically formated and can either be string or list of strings
+ # description is specifically formatted and can either be string or list of strings
if 'description' not in opt:
raise AnsibleError("All (sub-)options and return values must have a 'description' field")
text.append('')
@@ -1296,14 +1308,15 @@ class DocCLI(CLI, RoleMixin):
if doc.get('description'):
if isinstance(doc['description'], list):
- desc = " ".join(doc['description'])
+ descs = doc['description']
else:
- desc = doc['description']
- text.append("%s" % DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
+ descs = [doc['description']]
+ for desc in descs:
+ text.append("%s" % DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
text.append('')
if doc.get('options'):
- text.append(_format("Options", 'bold') + " (%s inicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
+ text.append(_format("Options", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
if doc.get('attributes', False):
@@ -1338,6 +1351,17 @@ class DocCLI(CLI, RoleMixin):
# use empty indent since this affects the start of the yaml doc, not it's keys
text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), ''))
+ if doc.get('examples', False):
+ text.append('')
+ text.append(_format("EXAMPLES:", 'bold'))
+ if isinstance(doc['examples'], string_types):
+ text.append(doc.pop('examples').strip())
+ else:
+ try:
+ text.append(yaml_dump(doc.pop('examples'), indent=2, default_flow_style=False))
+ except Exception as e:
+ raise AnsibleParserError("Unable to parse examples section", orig_exc=e)
+
return text
@staticmethod
@@ -1355,12 +1379,13 @@ class DocCLI(CLI, RoleMixin):
text.append("> %s %s (%s)" % (plugin_type.upper(), _format(doc.pop('plugin_name'), 'bold'), doc.pop('filename')))
if isinstance(doc['description'], list):
- desc = " ".join(doc.pop('description'))
+ descs = doc.pop('description')
else:
- desc = doc.pop('description')
+ descs = [doc.pop('description')]
text.append('')
- text.append(DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=base_indent, subsequent_indent=base_indent))
+ for desc in descs:
+ text.append(DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=base_indent, subsequent_indent=base_indent))
if display.verbosity > 0:
doc['added_in'] = DocCLI._format_version_added(doc.pop('version_added', 'historical'), doc.pop('version_added_collection', 'ansible-core'))
@@ -1368,16 +1393,15 @@ class DocCLI(CLI, RoleMixin):
if doc.get('deprecated', False):
text.append(_format("DEPRECATED: ", 'bold', 'DEP'))
if isinstance(doc['deprecated'], dict):
- if 'removed_at_date' in doc['deprecated']:
- text.append(
- "\tReason: %(why)s\n\tWill be removed in a release after %(removed_at_date)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated')
- )
- else:
- if 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
- doc['deprecated']['removed_in'] = doc['deprecated']['version']
- text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
+ if 'removed_at_date' not in doc['deprecated'] and 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
+ doc['deprecated']['removed_in'] = doc['deprecated']['version']
+ try:
+ text.append('\t' + C.config.get_deprecated_msg_from_config(doc['deprecated'], True))
+ except KeyError as e:
+ raise AnsibleError("Invalid deprecation documentation structure", orig_exc=e)
else:
- text.append("%s" % doc.pop('deprecated'))
+ text.append("%s" % doc['deprecated'])
+ del doc['deprecated']
if doc.pop('has_action', False):
text.append("")
@@ -1385,7 +1409,7 @@ class DocCLI(CLI, RoleMixin):
if doc.get('options', False):
text.append("")
- text.append(_format("OPTIONS", 'bold') + " (%s inicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
+ text.append(_format("OPTIONS", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent, man=(display.verbosity == 0))
if doc.get('attributes', False):
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
index 805bd650372..19ccd400445 100755
--- a/lib/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -55,7 +55,6 @@ from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
-from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
@@ -66,27 +65,6 @@ from ansible.utils.vars import load_extra_vars
display = Display()
urlparse = six.moves.urllib.parse.urlparse
-# config definition by position: name, required, type
-SERVER_DEF = [
- ('url', True, 'str'),
- ('username', False, 'str'),
- ('password', False, 'str'),
- ('token', False, 'str'),
- ('auth_url', False, 'str'),
- ('api_version', False, 'int'),
- ('validate_certs', False, 'bool'),
- ('client_id', False, 'str'),
- ('timeout', False, 'int'),
-]
-
-# config definition fields
-SERVER_ADDITIONAL = {
- 'api_version': {'default': None, 'choices': [2, 3]},
- 'validate_certs': {'cli': [{'name': 'validate_certs'}]},
- 'timeout': {'default': C.GALAXY_SERVER_TIMEOUT, 'cli': [{'name': 'timeout'}]},
- 'token': {'default': None},
-}
-
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
@@ -490,12 +468,31 @@ class GalaxyCLI(CLI):
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
- args_kwargs['help'] = 'Role name, URL or tar file'
+ args_kwargs['help'] = 'Role name, URL or tar file. This is mutually exclusive with -r.'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
+ if self._implicit_role:
+ # might install both roles and collections
+ description_text = (
+ 'Install roles and collections from file(s), URL(s) or Ansible '
+ 'Galaxy to the first entry in the config COLLECTIONS_PATH for collections '
+ 'and first entry in the config ROLES_PATH for roles. '
+ 'The first entry in the config ROLES_PATH can be overridden by --roles-path '
+ 'or -p, but this will result in only roles being installed.'
+ )
+ prog = 'ansible-galaxy install'
+ else:
+ prog = parser._prog_prefix
+ description_text = (
+ 'Install {0}(s) from file(s), URL(s) or Ansible '
+ 'Galaxy to the first entry in the config {1}S_PATH '
+ 'unless overridden by --{0}s-path.'.format(galaxy_type, galaxy_type.upper())
+ )
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
- 'Galaxy'.format(galaxy_type))
+ 'Galaxy'.format(galaxy_type),
+ description=description_text,
+ prog=prog,)
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
@@ -548,8 +545,12 @@ class GalaxyCLI(CLI):
'This does not apply to collections in remote Git repositories or URLs to remote tarballs.'
)
else:
- install_parser.add_argument('-r', '--role-file', dest='requirements',
- help='A file containing a list of roles to be installed.')
+ if self._implicit_role:
+ install_parser.add_argument('-r', '--role-file', dest='requirements',
+ help='A file containing a list of collections and roles to be installed.')
+ else:
+ install_parser.add_argument('-r', '--role-file', dest='requirements',
+ help='A file containing a list of roles to be installed.')
r_re = re.compile(r'^(?] for the values url, username, password, and token.
- config_dict = dict((k, server_config_def(server_key, k, req, ensure_type)) for k, req, ensure_type in SERVER_DEF)
- defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
- C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
# resolve the config created options above with existing config and user options
- server_options = C.config.get_plugin_options('galaxy_server', server_key)
+ server_options = C.config.get_plugin_options(plugin_type='galaxy_server', name=server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
index fb3321efa9a..eb8436636e2 100755
--- a/lib/ansible/cli/pull.py
+++ b/lib/ansible/cli/pull.py
@@ -12,7 +12,7 @@ from ansible.cli import CLI
import datetime
import os
import platform
-import random
+import secrets
import shlex
import shutil
import socket
@@ -140,7 +140,7 @@ class PullCLI(CLI):
if options.sleep:
try:
- secs = random.randint(0, int(options.sleep))
+ secs = secrets.randbelow(int(options.sleep))
options.sleep = secs
except ValueError:
raise AnsibleOptionsError("%s is not a number." % options.sleep)
diff --git a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
old mode 100755
new mode 100644
index 9455b9851a9..701dcdaa198
--- a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
+++ b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
@@ -1,10 +1,8 @@
-#!/usr/bin/env python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import fcntl
-import hashlib
import io
import os
import pickle
@@ -40,13 +38,6 @@ def read_stream(byte_stream):
if len(data) < size:
raise Exception("EOF found before data was complete")
- data_hash = to_text(byte_stream.readline().strip())
- if data_hash != hashlib.sha1(data).hexdigest():
- raise Exception("Read {0} bytes, but data did not match checksum".format(size))
-
- # restore escaped loose \r characters
- data = data.replace(br'\r', b'\r')
-
return data
@@ -221,7 +212,7 @@ def main(args=None):
""" Called to initiate the connect to the remote device
"""
- parser = opt_help.create_base_parser(prog='ansible-connection')
+ parser = opt_help.create_base_parser(prog=None)
opt_help.add_verbosity_options(parser)
parser.add_argument('playbook_pid')
parser.add_argument('task_uuid')
diff --git a/lib/ansible/config/base.yml b/lib/ansible/config/base.yml
index 7fed922f053..445fd4540a1 100644
--- a/lib/ansible/config/base.yml
+++ b/lib/ansible/config/base.yml
@@ -1,6 +1,14 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
+_ANSIBLE_CONNECTION_PATH:
+ env:
+ - name: _ANSIBLE_CONNECTION_PATH
+ name: Overrides the location of the Ansible persistent connection helper script.
+ description:
+ - For internal use only.
+ type: path
+ version_added: "2.18"
ANSIBLE_HOME:
name: The Ansible home path
description:
@@ -25,6 +33,9 @@ ANSIBLE_CONNECTION_PATH:
- {key: ansible_connection_path, section: persistent_connection}
yaml: {key: persistent_connection.ansible_connection_path}
version_added: "2.8"
+ deprecated:
+ why: This setting has no effect.
+ version: "2.22"
ANSIBLE_COW_SELECTION:
name: Cowsay filter selection
default: default
@@ -293,6 +304,14 @@ COLOR_HIGHLIGHT:
env: [{name: ANSIBLE_COLOR_HIGHLIGHT}]
ini:
- {key: highlight, section: colors}
+COLOR_INCLUDED:
+ name: Color for 'included' task status
+ default: cyan
+ description: Defines the color to use when showing 'Included' task status.
+ env: [{name: ANSIBLE_COLOR_INCLUDED}]
+ ini:
+ - {key: included, section: colors}
+ version_added: '2.18'
COLOR_OK:
name: Color for 'ok' task status
default: green
@@ -328,6 +347,54 @@ COLOR_WARN:
env: [{name: ANSIBLE_COLOR_WARN}]
ini:
- {key: warn, section: colors}
+COLOR_DOC_MODULE:
+ name: Color for module name in the ansible-doc output
+ default: yellow
+ description: Defines the color to use when emitting a module name in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_MODULE}]
+ ini:
+ - {key: doc_module, section: colors}
+ version_added: '2.18'
+COLOR_DOC_REFERENCE:
+ name: Color for cross-reference in the ansible-doc output
+ default: magenta
+ description: Defines the color to use when emitting cross-reference in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_REFERENCE}]
+ ini:
+ - {key: doc_reference, section: colors}
+ version_added: '2.18'
+COLOR_DOC_LINK:
+ name: Color for Link in ansible-doc output
+ default: cyan
+ description: Defines the color to use when emitting a link in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_LINK}]
+ ini:
+ - {key: doc_link, section: colors}
+ version_added: '2.18'
+COLOR_DOC_DEPRECATED:
+ name: Color for deprecated value in ansible-doc output
+ default: magenta
+ description: Defines the color to use when emitting a deprecated value in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_DEPRECATED}]
+ ini:
+ - {key: doc_deprecated, section: colors}
+ version_added: '2.18'
+COLOR_DOC_CONSTANT:
+ name: Color for constant in ansible-doc output
+ default: dark gray
+ description: Defines the color to use when emitting a constant in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_CONSTANT}]
+ ini:
+ - {key: doc_constant, section: colors}
+ version_added: '2.18'
+COLOR_DOC_PLUGIN:
+ name: Color for the plugin in ansible-doc output
+ default: yellow
+ description: Defines the color to use when emitting a plugin name in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_PLUGIN}]
+ ini:
+ - {key: doc_plugin, section: colors}
+ version_added: '2.18'
CONNECTION_PASSWORD_FILE:
name: Connection password file
default: ~
@@ -759,7 +826,9 @@ DEFAULT_LOCAL_TMP:
DEFAULT_LOG_PATH:
name: Ansible log file path
default: ~
- description: File to which Ansible will log on the controller. When empty logging is disabled.
+ description:
+ - File to which Ansible will log on the controller.
+ - When not set the logging is disabled.
env: [{name: ANSIBLE_LOG_PATH}]
ini:
- {key: log_path, section: defaults}
@@ -804,8 +873,8 @@ DEFAULT_MODULE_COMPRESSION:
env: []
ini:
- {key: module_compression, section: defaults}
-# vars:
-# - name: ansible_module_compression
+ vars:
+ - name: ansible_module_compression
DEFAULT_MODULE_NAME:
name: Default adhoc module
default: command
@@ -897,7 +966,7 @@ DEFAULT_PRIVATE_ROLE_VARS:
- This was introduced as a way to reset role variables to default values if a role is used more than once
in a playbook.
- Starting in version '2.17' M(ansible.builtin.include_roles) and M(ansible.builtin.import_roles) can
- indivudually override this via the C(public) parameter.
+ individually override this via the C(public) parameter.
- Included roles only make their variables public at execution, unlike imported roles which happen at playbook compile time.
env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}]
ini:
@@ -957,7 +1026,7 @@ DEFAULT_STDOUT_CALLBACK:
EDITOR:
name: editor application to use
default: vi
- descrioption:
+ description:
- for the cases in which Ansible needs to return a file within an editor, this chooses the application to use.
ini:
- section: defaults
@@ -1459,6 +1528,23 @@ GALAXY_REQUIRED_VALID_SIGNATURE_COUNT:
- The number of signatures that must be successful during GPG signature verification while installing or verifying collections.
- This should be a positive integer or all to indicate all signatures must successfully validate the collection.
- Prepend + to the value to fail if no valid signatures are found for the collection.
+GALAXY_COLLECTION_IMPORT_POLL_INTERVAL:
+ description:
+ - The initial interval in seconds for polling the import status of a collection.
+ - This interval increases exponentially based on the :ref:`galaxy_collection_import_poll_factor`, with a maximum delay of 30 seconds.
+ type: float
+ default: 2.0
+ env:
+ - name: ANSIBLE_GALAXY_COLLECTION_IMPORT_POLL_INTERVAL
+ version_added: '2.18'
+GALAXY_COLLECTION_IMPORT_POLL_FACTOR:
+ description:
+ - The multiplier used to increase the :ref:`galaxy_collection_import_poll_interval` when checking the collection import status.
+ type: float
+ default: 1.5
+ env:
+ - name: ANSIBLE_GALAXY_COLLECTION_IMPORT_POLL_FACTOR
+ version_added: "2.18"
HOST_KEY_CHECKING:
# NOTE: constant not in use by ssh/paramiko plugins anymore, but they do support the same configuration sources
# TODO: check non ssh connection plugins for use/migration
@@ -1511,6 +1597,7 @@ _INTERPRETER_PYTHON_DISTRO_MAP:
INTERPRETER_PYTHON_FALLBACK:
name: Ordered list of Python interpreters to check for in discovery
default:
+ - python3.13
- python3.12
- python3.11
- python3.10
@@ -1681,7 +1768,7 @@ INJECT_FACTS_AS_VARS:
default: True
description:
- Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
- - Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
+ - Unlike inside the `ansible_facts` dictionary where the prefix `ansible_` is removed from fact names, these will have the exact names that are returned by the module.
env: [{name: ANSIBLE_INJECT_FACT_VARS}]
ini:
- {key: inject_facts_as_vars, section: defaults}
@@ -1719,7 +1806,7 @@ OLD_PLUGIN_CACHE_CLEARING:
PAGER:
name: pager application to use
default: less
- descrioption:
+ description:
- for the cases in which Ansible needs to return output in a pageable fashion, this chooses the application to use.
ini:
- section: defaults
@@ -1921,7 +2008,11 @@ TASK_TIMEOUT:
name: Task Timeout
default: 0
description:
- - Set the maximum time (in seconds) that a task can run for.
+ - Set the maximum time (in seconds) for a task action to execute in.
+ - Timeout runs independently from templating or looping.
+ It applies per each attempt of executing the task's action and remains unchanged by the total time spent on a task.
+ - When the action execution exceeds the timeout, Ansible interrupts the process.
+ This is registered as a failure due to outside circumstances, not a task failure, to receive appropriate response and recovery process.
- If set to 0 (the default) there is no timeout.
env: [{name: ANSIBLE_TASK_TIMEOUT}]
ini:
@@ -2052,4 +2143,35 @@ VERBOSE_TO_STDERR:
- section: defaults
key: verbose_to_stderr
type: bool
-...
+_Z_TEST_ENTRY:
+ name: testentry
+ description: for tests
+ env:
+ - name: ANSIBLE_TEST_ENTRY
+ - name: ANSIBLE_TEST_ENTRY_D
+ deprecated:
+ why: for testing
+ version: '3.30'
+ alternatives: nothing
+ ini:
+ - section: testing
+ key: valid
+ - section: testing
+ key: deprecated
+ deprecated:
+ why: for testing
+ version: '3.30'
+ alternatives: nothing
+_Z_TEST_ENTRY_2:
+ version_added: '2.18'
+ name: testentry
+ description: for tests
+ deprecated:
+ why: for testing
+ version: '3.30'
+ alternatives: nothing
+ env:
+ - name: ANSIBLE_TEST_ENTRY2
+ ini:
+ - section: testing
+ key: valid2
diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py
index b8dada4ba4a..e4e7b6a8e95 100644
--- a/lib/ansible/config/manager.py
+++ b/lib/ansible/config/manager.py
@@ -4,6 +4,7 @@
from __future__ import annotations
import atexit
+import decimal
import configparser
import os
import os.path
@@ -15,7 +16,7 @@ from collections import namedtuple
from collections.abc import Mapping, Sequence
from jinja2.nativetypes import NativeEnvironment
-from ansible.errors import AnsibleOptionsError, AnsibleError
+from ansible.errors import AnsibleOptionsError, AnsibleError, AnsibleRequiredOptionError
from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
from ansible.module_utils.common.yaml import yaml_load
from ansible.module_utils.six import string_types
@@ -29,6 +30,26 @@ Setting = namedtuple('Setting', 'name value origin type')
INTERNAL_DEFS = {'lookup': ('_terms',)}
+GALAXY_SERVER_DEF = [
+ ('url', True, 'str'),
+ ('username', False, 'str'),
+ ('password', False, 'str'),
+ ('token', False, 'str'),
+ ('auth_url', False, 'str'),
+ ('api_version', False, 'int'),
+ ('validate_certs', False, 'bool'),
+ ('client_id', False, 'str'),
+ ('timeout', False, 'int'),
+]
+
+# config definition fields
+GALAXY_SERVER_ADDITIONAL = {
+ 'api_version': {'default': None, 'choices': [2, 3]},
+ 'validate_certs': {'cli': [{'name': 'validate_certs'}]},
+ 'timeout': {'cli': [{'name': 'timeout'}]},
+ 'token': {'default': None},
+}
+
def _get_entry(plugin_type, plugin_name, config):
''' construct entry for requested config '''
@@ -81,10 +102,18 @@ def ensure_type(value, value_type, origin=None, origin_ftype=None):
value = boolean(value, strict=False)
elif value_type in ('integer', 'int'):
- value = int(value)
+ if not isinstance(value, int):
+ try:
+ if (decimal_value := decimal.Decimal(value)) == (int_part := int(decimal_value)):
+ value = int_part
+ else:
+ errmsg = 'int'
+ except decimal.DecimalException as e:
+ raise ValueError from e
elif value_type == 'float':
- value = float(value)
+ if not isinstance(value, float):
+ value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
@@ -153,7 +182,7 @@ def ensure_type(value, value_type, origin=None, origin_ftype=None):
value = unquote(value)
if errmsg:
- raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
+ raise ValueError(f'Invalid type provided for "{errmsg}": {value!r}')
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
@@ -302,6 +331,42 @@ class ConfigManager(object):
# ensure we always have config def entry
self._base_defs['CONFIG_FILE'] = {'default': None, 'type': 'path'}
+ def load_galaxy_server_defs(self, server_list):
+
+ def server_config_def(section, key, required, option_type):
+ config_def = {
+ 'description': 'The %s of the %s Galaxy server' % (key, section),
+ 'ini': [
+ {
+ 'section': 'galaxy_server.%s' % section,
+ 'key': key,
+ }
+ ],
+ 'env': [
+ {'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
+ ],
+ 'required': required,
+ 'type': option_type,
+ }
+ if key in GALAXY_SERVER_ADDITIONAL:
+ config_def.update(GALAXY_SERVER_ADDITIONAL[key])
+ # ensure we always have a default timeout
+ if key == 'timeout' and 'default' not in config_def:
+ config_def['default'] = self.get_config_value('GALAXY_SERVER_TIMEOUT')
+
+ return config_def
+
+ if server_list:
+ for server_key in server_list:
+ if not server_key:
+ # To filter out empty strings or non truthy values as an empty server list env var is equal to [''].
+ continue
+
+ # Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
+ # section [galaxy_server.] for the values url, username, password, and token.
+ defs = dict((k, server_config_def(server_key, k, req, value_type)) for k, req, value_type in GALAXY_SERVER_DEF)
+ self.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
+
def template_default(self, value, variables):
if isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')) and variables is not None:
# template default values if possible
@@ -357,7 +422,7 @@ class ConfigManager(object):
def get_plugin_options(self, plugin_type, name, keys=None, variables=None, direct=None):
options = {}
- defs = self.get_configuration_definitions(plugin_type, name)
+ defs = self.get_configuration_definitions(plugin_type=plugin_type, name=name)
for option in defs:
options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables, direct=direct)
@@ -366,7 +431,7 @@ class ConfigManager(object):
def get_plugin_vars(self, plugin_type, name):
pvars = []
- for pdef in self.get_configuration_definitions(plugin_type, name).values():
+ for pdef in self.get_configuration_definitions(plugin_type=plugin_type, name=name).values():
if 'vars' in pdef and pdef['vars']:
for var_entry in pdef['vars']:
pvars.append(var_entry['name'])
@@ -375,7 +440,7 @@ class ConfigManager(object):
def get_plugin_options_from_var(self, plugin_type, name, variable):
options = []
- for option_name, pdef in self.get_configuration_definitions(plugin_type, name).items():
+ for option_name, pdef in self.get_configuration_definitions(plugin_type=plugin_type, name=name).items():
if 'vars' in pdef and pdef['vars']:
for var_entry in pdef['vars']:
if variable == var_entry['name']:
@@ -417,7 +482,6 @@ class ConfigManager(object):
for cdef in list(ret.keys()):
if cdef.startswith('_'):
del ret[cdef]
-
return ret
def _loop_entries(self, container, entry_list):
@@ -472,7 +536,7 @@ class ConfigManager(object):
origin = None
origin_ftype = None
- defs = self.get_configuration_definitions(plugin_type, plugin_name)
+ defs = self.get_configuration_definitions(plugin_type=plugin_type, name=plugin_name)
if config in defs:
aliases = defs[config].get('aliases', [])
@@ -562,8 +626,8 @@ class ConfigManager(object):
if value is None:
if defs[config].get('required', False):
if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}):
- raise AnsibleError("No setting was provided for required configuration %s" %
- to_native(_get_entry(plugin_type, plugin_name, config)))
+ raise AnsibleRequiredOptionError("No setting was provided for required configuration %s" %
+ to_native(_get_entry(plugin_type, plugin_name, config)))
else:
origin = 'default'
value = self.template_default(defs[config].get('default'), variables)
@@ -617,3 +681,17 @@ class ConfigManager(object):
self._plugins[plugin_type] = {}
self._plugins[plugin_type][name] = defs
+
+ @staticmethod
+ def get_deprecated_msg_from_config(dep_docs, include_removal=False):
+
+ removal = ''
+ if include_removal:
+ if 'removed_at_date' in dep_docs:
+ removal = f"Will be removed in a release after {dep_docs['removed_at_date']}\n\t"
+ else:
+ removal = f"Will be removed in: Ansible {dep_docs['removed_in']}\n\t"
+
+ # TODO: choose to deprecate either singular or plural
+ alt = dep_docs.get('alternatives', dep_docs.get('alternative', 'none'))
+ return f"Reason: {dep_docs['why']}\n\t{removal}Alternatives: {alt}"
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 42b1b1c7bd7..34f91db54ea 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -15,6 +15,10 @@ from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ansible.release import __version__
from ansible.utils.fqcn import add_internal_fqcns
+# initialize config manager/config data to read/store global settings
+# and generate 'pseudo constants' for app consumption.
+config = ConfigManager()
+
def _warning(msg):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
@@ -36,6 +40,28 @@ def _deprecated(msg, version):
sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
+def handle_config_noise(display=None):
+
+ if display is not None:
+ w = display.warning
+ d = display.deprecated
+ else:
+ w = _warning
+ d = _deprecated
+
+ while config.WARNINGS:
+ warn = config.WARNINGS.pop()
+ w(warn)
+
+ while config.DEPRECATED:
+ # tuple with name and options
+ dep = config.DEPRECATED.pop(0)
+ msg = config.get_deprecated_msg_from_config(dep[1])
+ # use tabs only for ansible-doc?
+ msg = msg.replace("\t", "")
+ d(f"{dep[0]} option. {msg}", version=dep[1]['version'])
+
+
def set_constant(name, value, export=vars()):
''' sets constants and returns resolved options dict '''
export[name] = value
@@ -152,10 +178,10 @@ INTERNAL_STATIC_VARS = frozenset(
]
)
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
-MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
- 'ansible.windows.win_shell', 'raw', 'script')))
-MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
- 'ansible.windows.win_shell', 'raw')))
+WIN_MOVED = ['ansible.windows.win_command', 'ansible.windows.win_shell']
+MODULE_REQUIRE_ARGS_SIMPLE = ['command', 'raw', 'script', 'shell', 'win_command', 'win_shell']
+MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(MODULE_REQUIRE_ARGS_SIMPLE) + WIN_MOVED)
+MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'shell', 'win_shell', 'raw')) + WIN_MOVED)
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python', 'ansible_facts')
SYNTHETIC_COLLECTIONS = ('ansible.builtin', 'ansible.legacy')
TREE_DIR = None
@@ -218,11 +244,8 @@ MAGIC_VARIABLE_MAPPING = dict(
)
# POPULATE SETTINGS FROM CONFIG ###
-config = ConfigManager()
-
-# Generate constants from config
for setting in config.get_configuration_definitions():
set_constant(setting, config.get_config_value(setting, variables=vars()))
-for warn in config.WARNINGS:
- _warning(warn)
+# emit any warnings or deprecations
+handle_config_noise()
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
index 8e33bef120b..f003b589c8a 100644
--- a/lib/ansible/errors/__init__.py
+++ b/lib/ansible/errors/__init__.py
@@ -227,6 +227,11 @@ class AnsibleOptionsError(AnsibleError):
pass
+class AnsibleRequiredOptionError(AnsibleOptionsError):
+ ''' bad or incomplete options passed '''
+ pass
+
+
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
diff --git a/lib/ansible/executor/interpreter_discovery.py b/lib/ansible/executor/interpreter_discovery.py
index 6d105817b03..24b2174d3c8 100644
--- a/lib/ansible/executor/interpreter_discovery.py
+++ b/lib/ansible/executor/interpreter_discovery.py
@@ -41,7 +41,7 @@ class InterpreterDiscoveryRequiredError(Exception):
def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
# interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to
# get the system type from uname, and find any random Python that can get us the info we need. For supported
- # target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available)
+ # target OS types, we'll dispatch a Python script that calls platform.dist() (for older platforms, where available)
# and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known
# distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the
# default fallback of /usr/bin/python is used (if we know it's there), or discovery fails.
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
index 58eebfaea57..deae3ea04e4 100644
--- a/lib/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -427,13 +427,13 @@ class PlayIterator:
# might be there from previous flush
state.handlers = self.handlers[:]
state.update_handlers = False
- state.cur_handlers_task = 0
while True:
try:
task = state.handlers[state.cur_handlers_task]
except IndexError:
task = None
+ state.cur_handlers_task = 0
state.run_state = state.pre_flushing_run_state
state.update_handlers = True
break
@@ -635,3 +635,19 @@ class PlayIterator:
def clear_notification(self, hostname: str, notification: str) -> None:
self._host_states[hostname].handler_notifications.remove(notification)
+
+ def end_host(self, hostname: str) -> None:
+ """Used by ``end_host``, ``end_batch`` and ``end_play`` meta tasks to end executing given host."""
+ state = self.get_active_state(self.get_state_for_host(hostname))
+ if state.run_state == IteratingStates.RESCUE:
+ # This is a special case for when ending a host occurs in rescue.
+ # By definition the meta task responsible for ending the host
+ # is the last task, so we need to clear the fail state to mark
+ # the host as rescued.
+ # The reason we need to do that is because this operation is
+ # normally done when PlayIterator transitions from rescue to
+ # always when only then we can say that rescue didn't fail
+ # but with ending a host via meta task, we don't get to that transition.
+ self.set_fail_state_for_host(hostname, FailedStates.NONE)
+ self.set_run_state_for_host(hostname, IteratingStates.COMPLETE)
+ self._play._removed_hosts.append(hostname)
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
index 7c3ac4113bf..f439967838b 100644
--- a/lib/ansible/executor/playbook_executor.py
+++ b/lib/ansible/executor/playbook_executor.py
@@ -195,10 +195,7 @@ class PlaybookExecutor:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
- # check the number of failures here, to see if they're above the maximum
- # failure percentage allowed, or if any errors are fatal. If either of those
- # conditions are met, we break out, otherwise we only break out if the entire
- # batch failed
+ # check the number of failures here and break out if the entire batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
diff --git a/lib/ansible/executor/powershell/become_wrapper.ps1 b/lib/ansible/executor/powershell/become_wrapper.ps1
index f40e2658f5f..cea42c128aa 100644
--- a/lib/ansible/executor/powershell/become_wrapper.ps1
+++ b/lib/ansible/executor/powershell/become_wrapper.ps1
@@ -116,12 +116,11 @@ Write-AnsibleLog "INFO - parsed become input, user: '$username', type: '$logon_t
# set to Stop and cannot be changed. Also need to split the payload from the wrapper to prevent potentially
# sensitive content from being logged by the scriptblock logger.
$bootstrap_wrapper = {
- &chcp.com 65001 > $null
- $exec_wrapper_str = [System.Console]::In.ReadToEnd()
- $split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
+ [Console]::InputEncoding = [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding
+ $ew = [System.Console]::In.ReadToEnd()
+ $split_parts = $ew.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
Set-Variable -Name json_raw -Value $split_parts[1]
- $exec_wrapper = [ScriptBlock]::Create($split_parts[0])
- &$exec_wrapper
+ &([ScriptBlock]::Create($split_parts[0]))
}
$exec_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper.ToString()))
$lp_command_line = "powershell.exe -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $exec_command"
diff --git a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
index cdba80cbb01..8e7141eb515 100644
--- a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
+++ b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
@@ -1,4 +1,4 @@
-&chcp.com 65001 > $null
+try { [Console]::InputEncoding = [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding } catch { $null = $_ }
if ($PSVersionTable.PSVersion -lt [Version]"3.0") {
'{"failed":true,"msg":"Ansible requires PowerShell v3.0 or newer"}'
@@ -9,5 +9,4 @@ $exec_wrapper_str = $input | Out-String
$split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
If (-not $split_parts.Length -eq 2) { throw "invalid payload" }
Set-Variable -Name json_raw -Value $split_parts[1]
-$exec_wrapper = [ScriptBlock]::Create($split_parts[0])
-&$exec_wrapper
+& ([ScriptBlock]::Create($split_parts[0]))
diff --git a/lib/ansible/executor/powershell/exec_wrapper.ps1 b/lib/ansible/executor/powershell/exec_wrapper.ps1
index 0f97bdfb8a5..4ecc1367c84 100644
--- a/lib/ansible/executor/powershell/exec_wrapper.ps1
+++ b/lib/ansible/executor/powershell/exec_wrapper.ps1
@@ -16,7 +16,7 @@ begin {
.SYNOPSIS
Converts a JSON string to a Hashtable/Array in the fastest way
possible. Unfortunately ConvertFrom-Json is still faster but outputs
- a PSCustomObject which is combersone for module consumption.
+ a PSCustomObject which is cumbersome for module consumption.
.PARAMETER InputObject
[String] The JSON string to deserialize.
@@ -178,6 +178,7 @@ $($ErrorRecord.InvocationInfo.PositionMessage)
Write-AnsibleLog "INFO - converting json raw to a payload" "exec_wrapper"
$payload = ConvertFrom-AnsibleJson -InputObject $json_raw
+ $payload.module_args._ansible_exec_wrapper_warnings = [System.Collections.Generic.List[string]]@()
# TODO: handle binary modules
# TODO: handle persistence
diff --git a/lib/ansible/executor/powershell/module_manifest.py b/lib/ansible/executor/powershell/module_manifest.py
index 99b18e5ff4b..93c5c8c643e 100644
--- a/lib/ansible/executor/powershell/module_manifest.py
+++ b/lib/ansible/executor/powershell/module_manifest.py
@@ -8,7 +8,7 @@ import errno
import json
import os
import pkgutil
-import random
+import secrets
import re
from importlib import import_module
@@ -318,7 +318,7 @@ def _create_powershell_wrapper(b_module_data, module_path, module_args,
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["actions"].insert(0, 'async_wrapper')
- exec_manifest["async_jid"] = f'j{random.randint(0, 999999999999)}'
+ exec_manifest["async_jid"] = f'j{secrets.randbelow(999999999999)}'
exec_manifest["async_timeout_sec"] = async_timeout
exec_manifest["async_startup_timeout"] = C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars)
diff --git a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
index c35c84cfc86..f79dd6fbc86 100644
--- a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
+++ b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
@@ -29,7 +29,18 @@ if ($csharp_utils.Count -gt 0) {
# add any C# references so the module does not have to do so
$new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"])
- Add-CSharpType -References $csharp_utils -TempPath $new_tmp -IncludeDebugInfo
+
+ # We use a fake module object to capture warnings
+ $fake_module = [PSCustomObject]@{
+ Tmpdir = $new_tmp
+ Verbosity = 3
+ }
+ $warning_func = New-Object -TypeName System.Management.Automation.PSScriptMethod -ArgumentList Warn, {
+ param($message)
+ $Payload.module_args._ansible_exec_wrapper_warnings.Add($message)
+ }
+ $fake_module.PSObject.Members.Add($warning_func)
+ Add-CSharpType -References $csharp_utils -AnsibleModule $fake_module
}
if ($Payload.ContainsKey("coverage") -and $null -ne $host.Runspace -and $null -ne $host.Runspace.Debugger) {
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
index a8e24f5f36a..932a33cfec0 100644
--- a/lib/ansible/executor/task_executor.py
+++ b/lib/ansible/executor/task_executor.py
@@ -4,23 +4,24 @@
from __future__ import annotations
import os
-import pty
import time
import json
+import pathlib
import signal
import subprocess
import sys
-import termios
import traceback
from ansible import constants as C
+from ansible.cli import scripts
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure, AnsibleActionFail, AnsibleActionSkip
from ansible.executor.task_result import TaskResult
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import binary_type
from ansible.module_utils.common.text.converters import to_text, to_native
-from ansible.module_utils.connection import write_to_file_descriptor
+from ansible.module_utils.connection import write_to_stream
+from ansible.module_utils.six import string_types
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.plugins import get_plugin_class
@@ -31,7 +32,7 @@ from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unsafe_proxy import to_unsafe_text, wrap_var
from ansible.vars.clean import namespace_facts, clean_facts
from ansible.utils.display import Display
-from ansible.utils.vars import combine_vars, isidentifier
+from ansible.utils.vars import combine_vars
display = Display()
@@ -42,11 +43,21 @@ __all__ = ['TaskExecutor']
class TaskTimeoutError(BaseException):
- pass
+ def __init__(self, message="", frame=None):
+
+ if frame is not None:
+ orig = frame
+ root = pathlib.Path(__file__).parent
+ while not pathlib.Path(frame.f_code.co_filename).is_relative_to(root):
+ frame = frame.f_back
+
+ self.frame = 'Interrupted at %s called from %s' % (orig, frame)
+
+ super(TaskTimeoutError, self).__init__(message)
def task_timeout(signum, frame):
- raise TaskTimeoutError
+ raise TaskTimeoutError(frame=frame)
def remove_omit(task_args, omit_token):
@@ -332,6 +343,13 @@ class TaskExecutor:
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
+
+ if self._task.register:
+ # Ensure per loop iteration results are registered in case `_execute()`
+ # returns early (when conditional, failure, ...).
+ # This is needed in case the registered variable is used in the loop label template.
+ task_vars[self._task.register] = res
+
task_fields = self._task.dump_attrs()
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
@@ -362,12 +380,17 @@ class TaskExecutor:
'msg': 'Failed to template loop_control.label: %s' % to_text(e)
})
+ # if plugin is loaded, get resolved name, otherwise leave original task connection
+ if self._connection and not isinstance(self._connection, string_types):
+ task_fields['connection'] = getattr(self._connection, 'ansible_name')
+
tr = TaskResult(
self._host.name,
self._task._uuid,
res,
task_fields=task_fields,
)
+
if tr.is_failed() or tr.is_unreachable():
self._final_q.send_callback('v2_runner_item_on_failed', tr)
elif tr.is_skipped():
@@ -379,6 +402,19 @@ class TaskExecutor:
self._final_q.send_callback('v2_runner_item_on_ok', tr)
results.append(res)
+
+ # break loop if break_when conditions are met
+ if self._task.loop_control and self._task.loop_control.break_when:
+ cond = Conditional(loader=self._loader)
+ cond.when = self._task.loop_control.get_validated_value(
+ 'break_when', self._task.loop_control.fattributes.get('break_when'), self._task.loop_control.break_when, templar
+ )
+ if cond.evaluate_conditional(templar, task_vars):
+ # delete loop vars before exiting loop
+ del task_vars[loop_var]
+ break
+
+ # done with loop var, remove for next iteration
del task_vars[loop_var]
# clear 'connection related' plugin variables for next iteration
@@ -640,7 +676,7 @@ class TaskExecutor:
return dict(unreachable=True, msg=to_text(e))
except TaskTimeoutError as e:
msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout)
- return dict(failed=True, msg=msg)
+ return dict(failed=True, msg=msg, timedout={'frame': e.frame, 'period': self._task.timeout})
finally:
if self._task.timeout:
signal.alarm(0)
@@ -657,9 +693,6 @@ class TaskExecutor:
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
- if not isidentifier(self._task.register):
- raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % self._task.register)
-
vars_copy[self._task.register] = result
if self._task.async_val > 0:
@@ -1048,7 +1081,7 @@ class TaskExecutor:
# add extras if plugin supports them
if getattr(self._connection, 'allow_extras', False):
for k in variables:
- if k.startswith('ansible_%s_' % self._connection._load_name) and k not in options:
+ if k.startswith('ansible_%s_' % self._connection.extras_prefix) and k not in options:
options['_extras'][k] = templar.template(variables[k])
task_keys = self._task.dump_attrs()
@@ -1179,26 +1212,19 @@ class TaskExecutor:
return handler, module
+CLI_STUB_NAME = 'ansible_connection_cli_stub.py'
+
+
def start_connection(play_context, options, task_uuid):
'''
Starts the persistent connection
'''
- candidate_paths = [C.ANSIBLE_CONNECTION_PATH or os.path.dirname(sys.argv[0])]
- candidate_paths.extend(os.environ.get('PATH', '').split(os.pathsep))
- for dirname in candidate_paths:
- ansible_connection = os.path.join(dirname, 'ansible-connection')
- if os.path.isfile(ansible_connection):
- display.vvvv("Found ansible-connection at path {0}".format(ansible_connection))
- break
- else:
- raise AnsibleError("Unable to find location of 'ansible-connection'. "
- "Please set or check the value of ANSIBLE_CONNECTION_PATH")
env = os.environ.copy()
env.update({
# HACK; most of these paths may change during the controller's lifetime
# (eg, due to late dynamic role includes, multi-playbook execution), without a way
- # to invalidate/update, ansible-connection won't always see the same plugins the controller
+ # to invalidate/update, the persistent connection helper won't always see the same plugins the controller
# can.
'ANSIBLE_BECOME_PLUGINS': become_loader.print_paths(),
'ANSIBLE_CLICONF_PLUGINS': cliconf_loader.print_paths(),
@@ -1211,30 +1237,19 @@ def start_connection(play_context, options, task_uuid):
verbosity = []
if display.verbosity:
verbosity.append('-%s' % ('v' * display.verbosity))
- python = sys.executable
- master, slave = pty.openpty()
+
+ if not (cli_stub_path := C.config.get_config_value('_ANSIBLE_CONNECTION_PATH')):
+ cli_stub_path = str(pathlib.Path(scripts.__file__).parent / CLI_STUB_NAME)
+
p = subprocess.Popen(
- [python, ansible_connection, *verbosity, to_text(os.getppid()), to_text(task_uuid)],
- stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
+ [sys.executable, cli_stub_path, *verbosity, to_text(os.getppid()), to_text(task_uuid)],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,
)
- os.close(slave)
-
- # We need to set the pty into noncanonical mode. This ensures that we
- # can receive lines longer than 4095 characters (plus newline) without
- # truncating.
- old = termios.tcgetattr(master)
- new = termios.tcgetattr(master)
- new[3] = new[3] & ~termios.ICANON
-
- try:
- termios.tcsetattr(master, termios.TCSANOW, new)
- write_to_file_descriptor(master, options)
- write_to_file_descriptor(master, play_context.serialize())
-
- (stdout, stderr) = p.communicate()
- finally:
- termios.tcsetattr(master, termios.TCSANOW, old)
- os.close(master)
+
+ write_to_stream(p.stdin, options)
+ write_to_stream(p.stdin, play_context.serialize())
+
+ (stdout, stderr) = p.communicate()
if p.returncode == 0:
result = json.loads(to_text(stdout, errors='surrogate_then_replace'))
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
index f6e8c8bf7e0..3b9e251da81 100644
--- a/lib/ansible/executor/task_queue_manager.py
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -223,7 +223,7 @@ class TaskQueueManager:
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False))
- # try to get colleciotn world name first
+ # try to get collection world name first
cnames = getattr(callback_plugin, '_redirected_names', [])
if cnames:
# store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later
diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
index 2690f3a52bb..821189367d1 100644
--- a/lib/ansible/executor/task_result.py
+++ b/lib/ansible/executor/task_result.py
@@ -139,7 +139,7 @@ class TaskResult:
elif self._result:
result._result = module_response_deepcopy(self._result)
- # actualy remove
+ # actually remove
for remove_key in ignore:
if remove_key in result._result:
del result._result[remove_key]
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
index 156dd4cf700..6765b087b35 100644
--- a/lib/ansible/galaxy/api.py
+++ b/lib/ansible/galaxy/api.py
@@ -62,8 +62,7 @@ def should_retry_error(exception):
if isinstance(orig_exc, URLError):
orig_exc = orig_exc.reason
- # Handle common URL related errors such as TimeoutError, and BadStatusLine
- # Note: socket.timeout is only required for Py3.9
+ # Handle common URL related errors
if isinstance(orig_exc, (TimeoutError, BadStatusLine, IncompleteRead)):
return True
@@ -720,7 +719,7 @@ class GalaxyAPI:
display.display("Waiting until Galaxy import task %s has completed" % full_url)
start = time.time()
- wait = 2
+ wait = C.GALAXY_COLLECTION_IMPORT_POLL_INTERVAL
while timeout == 0 or (time.time() - start) < timeout:
try:
@@ -744,7 +743,7 @@ class GalaxyAPI:
time.sleep(wait)
# poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds.
- wait = min(30, wait * 1.5)
+ wait = min(30, wait * C.GALAXY_COLLECTION_IMPORT_POLL_FACTOR)
if state == 'waiting':
raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'"
% to_native(full_url))
diff --git a/lib/ansible/galaxy/collection/__init__.py b/lib/ansible/galaxy/collection/__init__.py
index d27328cd0fe..b2c83ee8c30 100644
--- a/lib/ansible/galaxy/collection/__init__.py
+++ b/lib/ansible/galaxy/collection/__init__.py
@@ -8,6 +8,7 @@ from __future__ import annotations
import errno
import fnmatch
import functools
+import glob
import inspect
import json
import os
@@ -1525,6 +1526,7 @@ def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?
artifacts_manager.required_successful_signature_count,
artifacts_manager.ignore_signature_errors,
)
+ remove_source_metadata(collection, b_collection_path)
if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):
write_source_metadata(
collection,
@@ -1561,6 +1563,22 @@ def write_source_metadata(collection, b_collection_path, artifacts_manager):
raise
+def remove_source_metadata(collection, b_collection_path):
+ pattern = f"{collection.namespace}.{collection.name}-*.info"
+ info_path = os.path.join(
+ b_collection_path,
+ b'../../',
+ to_bytes(pattern, errors='surrogate_or_strict')
+ )
+ if (outdated_info := glob.glob(info_path)):
+ display.vvvv(f"Removing {pattern} metadata from previous installations")
+ for info_dir in outdated_info:
+ try:
+ shutil.rmtree(info_dir)
+ except Exception:
+ pass
+
+
def verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors):
# type: (str, list[str], str, str, list[str]) -> None
failed_verify = False
@@ -1584,13 +1602,6 @@ def install_artifact(b_coll_targz_path, b_collection_path, b_temp_path, signatur
"""
try:
with tarfile.open(b_coll_targz_path, mode='r') as collection_tar:
- # Remove this once py3.11 is our controller minimum
- # Workaround for https://bugs.python.org/issue47231
- # See _extract_tar_dir
- collection_tar._ansible_normalized_cache = {
- m.name.removesuffix(os.path.sep): m for m in collection_tar.getmembers()
- } # deprecated: description='TarFile member index' core_version='2.18' python_version='3.11'
-
# Verify the signature on the MANIFEST.json before extracting anything else
_extract_tar_file(collection_tar, MANIFEST_FILENAME, b_collection_path, b_temp_path)
@@ -1671,10 +1682,10 @@ def install_src(collection, b_collection_path, b_collection_output_path, artifac
def _extract_tar_dir(tar, dirname, b_dest):
""" Extracts a directory from a collection tar. """
- dirname = to_native(dirname, errors='surrogate_or_strict').removesuffix(os.path.sep)
+ dirname = to_native(dirname, errors='surrogate_or_strict')
try:
- tar_member = tar._ansible_normalized_cache[dirname]
+ tar_member = tar.getmember(dirname)
except KeyError:
raise AnsibleError("Unable to extract '%s' from collection" % dirname)
@@ -1896,7 +1907,7 @@ def _resolve_depenency_map(
for req in dep_exc.criterion.iter_requirement():
error_msg_lines.append(
- '* {req.fqcn!s}:{req.ver!s}'.format(req=req)
+ f'* {req.fqcn!s}:{req.ver!s}'
)
error_msg_lines.append(pre_release_hint)
diff --git a/lib/ansible/galaxy/collection/concrete_artifact_manager.py b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
index 27ce287af3c..06c1cf6f93b 100644
--- a/lib/ansible/galaxy/collection/concrete_artifact_manager.py
+++ b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
@@ -61,7 +61,7 @@ class ConcreteArtifactsManager:
"""
def __init__(self, b_working_directory, validate_certs=True, keyring=None, timeout=60, required_signature_count=None, ignore_signature_errors=None):
# type: (bytes, bool, str, int, str, list[str]) -> None
- """Initialize ConcreteArtifactsManager caches and costraints."""
+ """Initialize ConcreteArtifactsManager caches and constraints."""
self._validate_certs = validate_certs # type: bool
self._artifact_cache = {} # type: dict[bytes, bytes]
self._galaxy_artifact_cache = {} # type: dict[Candidate | Requirement, bytes]
@@ -413,7 +413,7 @@ def _extract_collection_from_git(repo_url, coll_ver, b_path):
b_checkout_path = mkdtemp(
dir=b_path,
prefix=to_bytes(name, errors='surrogate_or_strict'),
- ) # type: bytes
+ )
try:
git_executable = get_bin_path('git')
diff --git a/lib/ansible/galaxy/collection/galaxy_api_proxy.py b/lib/ansible/galaxy/collection/galaxy_api_proxy.py
index 0c1b7df0bec..046354a395d 100644
--- a/lib/ansible/galaxy/collection/galaxy_api_proxy.py
+++ b/lib/ansible/galaxy/collection/galaxy_api_proxy.py
@@ -27,8 +27,7 @@ display = Display()
class MultiGalaxyAPIProxy:
"""A proxy that abstracts talking to multiple Galaxy instances."""
- def __init__(self, apis, concrete_artifacts_manager, offline=False):
- # type: (t.Iterable[GalaxyAPI], ConcreteArtifactsManager, bool) -> None
+ def __init__(self, apis: t.Iterable[GalaxyAPI], concrete_artifacts_manager: ConcreteArtifactsManager, offline: bool = False) -> None:
"""Initialize the target APIs list."""
self._apis = apis
self._concrete_art_mgr = concrete_artifacts_manager
@@ -38,22 +37,21 @@ class MultiGalaxyAPIProxy:
def is_offline_mode_requested(self):
return self._offline
- def _assert_that_offline_mode_is_not_requested(self): # type: () -> None
+ def _assert_that_offline_mode_is_not_requested(self) -> None:
if self.is_offline_mode_requested:
raise NotImplementedError("The calling code is not supposed to be invoked in 'offline' mode.")
- def _get_collection_versions(self, requirement):
- # type: (Requirement) -> t.Iterator[tuple[GalaxyAPI, str]]
+ def _get_collection_versions(self, requirement: Requirement) -> t.Iterator[tuple[GalaxyAPI, str]]:
"""Helper for get_collection_versions.
Yield api, version pairs for all APIs,
and reraise the last error if no valid API was found.
"""
if self._offline:
- return []
+ return
found_api = False
- last_error = None # type: Exception | None
+ last_error: Exception | None = None
api_lookup_order = (
(requirement.src, )
@@ -86,8 +84,7 @@ class MultiGalaxyAPIProxy:
if not found_api and last_error is not None:
raise last_error
- def get_collection_versions(self, requirement):
- # type: (Requirement) -> t.Iterable[tuple[str, GalaxyAPI]]
+ def get_collection_versions(self, requirement: Requirement) -> t.Iterable[tuple[str, GalaxyAPI]]:
"""Get a set of unique versions for FQCN on Galaxy servers."""
if requirement.is_concrete_artifact:
return {
@@ -110,8 +107,7 @@ class MultiGalaxyAPIProxy:
)
)
- def get_collection_version_metadata(self, collection_candidate):
- # type: (Candidate) -> CollectionVersionMetadata
+ def get_collection_version_metadata(self, collection_candidate: Candidate) -> CollectionVersionMetadata:
"""Retrieve collection metadata of a given candidate."""
self._assert_that_offline_mode_is_not_requested()
@@ -160,8 +156,7 @@ class MultiGalaxyAPIProxy:
raise last_err
- def get_collection_dependencies(self, collection_candidate):
- # type: (Candidate) -> dict[str, str]
+ def get_collection_dependencies(self, collection_candidate: Candidate) -> dict[str, str]:
# FIXME: return Requirement instances instead?
"""Retrieve collection dependencies of a given candidate."""
if collection_candidate.is_concrete_artifact:
@@ -177,13 +172,12 @@ class MultiGalaxyAPIProxy:
dependencies
)
- def get_signatures(self, collection_candidate):
- # type: (Candidate) -> list[str]
+ def get_signatures(self, collection_candidate: Candidate) -> list[str]:
self._assert_that_offline_mode_is_not_requested()
namespace = collection_candidate.namespace
name = collection_candidate.name
version = collection_candidate.ver
- last_err = None # type: Exception | None
+ last_err: Exception | None = None
api_lookup_order = (
(collection_candidate.src, )
diff --git a/lib/ansible/galaxy/collection/gpg.py b/lib/ansible/galaxy/collection/gpg.py
index 38ec189ddd0..9d41cdcde8c 100644
--- a/lib/ansible/galaxy/collection/gpg.py
+++ b/lib/ansible/galaxy/collection/gpg.py
@@ -12,20 +12,14 @@ import contextlib
import inspect
import os
import subprocess
-import sys
import typing as t
from dataclasses import dataclass, fields as dc_fields
-from functools import partial
from urllib.error import HTTPError, URLError
if t.TYPE_CHECKING:
from ansible.utils.display import Display
-IS_PY310_PLUS = sys.version_info[:2] >= (3, 10)
-
-frozen_dataclass = partial(dataclass, frozen=True, **({'slots': True} if IS_PY310_PLUS else {}))
-
def get_signature_from_source(source, display=None): # type: (str, t.Optional[Display]) -> str
if display is not None:
@@ -128,7 +122,7 @@ def parse_gpg_errors(status_out): # type: (str) -> t.Iterator[GpgBaseError]
yield cls(*fields)
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgBaseError(Exception):
status: str
@@ -142,35 +136,35 @@ class GpgBaseError(Exception):
super(GpgBaseError, self).__setattr__(field_name, field_type(getattr(self, field_name)))
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgExpSig(GpgBaseError):
"""The signature with the keyid is good, but the signature is expired."""
keyid: str
username: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgExpKeySig(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by an expired key."""
keyid: str
username: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgRevKeySig(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by a revoked key."""
keyid: str
username: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgBadSig(GpgBaseError):
"""The signature with the keyid has not been verified okay."""
keyid: str
username: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgErrSig(GpgBaseError):
""""It was not possible to check the signature. This may be caused by
a missing public key or an unsupported algorithm. A RC of 4
@@ -186,24 +180,24 @@ class GpgErrSig(GpgBaseError):
fpr: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgNoPubkey(GpgBaseError):
"""The public key is not available."""
keyid: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgMissingPassPhrase(GpgBaseError):
"""No passphrase was supplied."""
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgBadPassphrase(GpgBaseError):
"""The supplied passphrase was wrong or not given."""
keyid: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgNoData(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
@@ -215,7 +209,7 @@ class GpgNoData(GpgBaseError):
what: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgUnexpected(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
@@ -227,7 +221,7 @@ class GpgUnexpected(GpgBaseError):
what: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgError(GpgBaseError):
"""This is a generic error status message, it might be followed by error location specific data."""
location: str
@@ -235,30 +229,30 @@ class GpgError(GpgBaseError):
more: str = ""
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgFailure(GpgBaseError):
"""This is the counterpart to SUCCESS and used to indicate a program failure."""
location: str
code: int
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgBadArmor(GpgBaseError):
"""The ASCII armor is corrupted."""
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgKeyExpired(GpgBaseError):
"""The key has expired."""
timestamp: int
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgKeyRevoked(GpgBaseError):
"""The used key has been revoked by its owner."""
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgNoSecKey(GpgBaseError):
"""The secret key is not available."""
keyid: str
diff --git a/lib/ansible/galaxy/data/COPYING b/lib/ansible/galaxy/data/COPYING
new file mode 100644
index 00000000000..87a9639c92f
--- /dev/null
+++ b/lib/ansible/galaxy/data/COPYING
@@ -0,0 +1,7 @@
+All templates, files and files generated from them in the subdirectories of this one
+are subject to the MIT license when applicable.
+
+MIT License:
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/ansible/galaxy/data/apb/Dockerfile.j2 b/lib/ansible/galaxy/data/apb/Dockerfile.j2
index 4d99a8b0c37..f39abc3bd89 100644
--- a/lib/ansible/galaxy/data/apb/Dockerfile.j2
+++ b/lib/ansible/galaxy/data/apb/Dockerfile.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
FROM ansibleplaybookbundle/apb-base
LABEL "com.redhat.apb.spec"=\
diff --git a/lib/ansible/galaxy/data/apb/Makefile.j2 b/lib/ansible/galaxy/data/apb/Makefile.j2
index ebeaa61f168..9278d246094 100644
--- a/lib/ansible/galaxy/data/apb/Makefile.j2
+++ b/lib/ansible/galaxy/data/apb/Makefile.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
DOCKERHOST = DOCKERHOST
DOCKERORG = DOCKERORG
IMAGENAME = {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/README.md b/lib/ansible/galaxy/data/apb/README.md
index 2e350a03fde..0f51845fbd3 100644
--- a/lib/ansible/galaxy/data/apb/README.md
+++ b/lib/ansible/galaxy/data/apb/README.md
@@ -6,17 +6,21 @@ A brief description of the APB goes here.
Requirements
------------
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here.
+For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
APB Variables
--------------
-A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well.
+A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and
+any variables that can/should be set via parameters to the role.
+Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
-A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to
+parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
diff --git a/lib/ansible/galaxy/data/apb/apb.yml.j2 b/lib/ansible/galaxy/data/apb/apb.yml.j2
index f96880196fe..e9405dcc359 100644
--- a/lib/ansible/galaxy/data/apb/apb.yml.j2
+++ b/lib/ansible/galaxy/data/apb/apb.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
version: '1.0.0'
name: {{ role_name }}
description: {{ description }}
diff --git a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
index 3818e64c335..8842d94e345 100644
--- a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
index 3f4c49674d4..89371a09bab 100644
--- a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/meta/main.yml.j2 b/lib/ansible/galaxy/data/apb/meta/main.yml.j2
index 546b8310288..23f870c4c50 100644
--- a/lib/ansible/galaxy/data/apb/meta/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/meta/main.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
galaxy_info:
author: {{ author }}
description: {{ description }}
diff --git a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
index 19527310a59..0a863784990 100644
--- a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
+++ b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
- name: "{{ role_name }} playbook to deprovision the application"
hosts: localhost
gather_facts: false
diff --git a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
index 7b08605ec58..f0691e2b875 100644
--- a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
+++ b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
- name: "{{ role_name }} playbook to provision the application"
hosts: localhost
gather_facts: false
diff --git a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
index a9880650590..1bba65a7566 100644
--- a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/tests/ansible.cfg b/lib/ansible/galaxy/data/apb/tests/ansible.cfg
index 2f74f1b2722..e2b73526706 100644
--- a/lib/ansible/galaxy/data/apb/tests/ansible.cfg
+++ b/lib/ansible/galaxy/data/apb/tests/ansible.cfg
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
[defaults]
inventory=./inventory
diff --git a/lib/ansible/galaxy/data/apb/tests/inventory b/lib/ansible/galaxy/data/apb/tests/inventory
index ea69cbf1225..a24f8243f1f 100644
--- a/lib/ansible/galaxy/data/apb/tests/inventory
+++ b/lib/ansible/galaxy/data/apb/tests/inventory
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
localhost
diff --git a/lib/ansible/galaxy/data/apb/tests/test.yml.j2 b/lib/ansible/galaxy/data/apb/tests/test.yml.j2
index fb14f85c97b..1b03869978c 100644
--- a/lib/ansible/galaxy/data/apb/tests/test.yml.j2
+++ b/lib/ansible/galaxy/data/apb/tests/test.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
gather_facts: no
diff --git a/lib/ansible/galaxy/data/apb/vars/main.yml.j2 b/lib/ansible/galaxy/data/apb/vars/main.yml.j2
index 092d511a1e6..8fc2f46c5e0 100644
--- a/lib/ansible/galaxy/data/apb/vars/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/vars/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/collections_galaxy_meta.yml b/lib/ansible/galaxy/data/collections_galaxy_meta.yml
index 5c4472cda1a..f47f1a7efff 100644
--- a/lib/ansible/galaxy/data/collections_galaxy_meta.yml
+++ b/lib/ansible/galaxy/data/collections_galaxy_meta.yml
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
diff --git a/lib/ansible/galaxy/data/container/defaults/main.yml.j2 b/lib/ansible/galaxy/data/container/defaults/main.yml.j2
index 3818e64c335..8842d94e345 100644
--- a/lib/ansible/galaxy/data/container/defaults/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/defaults/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/handlers/main.yml.j2 b/lib/ansible/galaxy/data/container/handlers/main.yml.j2
index 3f4c49674d4..89371a09bab 100644
--- a/lib/ansible/galaxy/data/container/handlers/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/handlers/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/meta/container.yml.j2 b/lib/ansible/galaxy/data/container/meta/container.yml.j2
index f033d34110e..97b39617192 100644
--- a/lib/ansible/galaxy/data/container/meta/container.yml.j2
+++ b/lib/ansible/galaxy/data/container/meta/container.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
# Add your Ansible Container service definitions here.
# For example:
#
diff --git a/lib/ansible/galaxy/data/container/meta/main.yml.j2 b/lib/ansible/galaxy/data/container/meta/main.yml.j2
index 8a6a382f291..d3fe1495a25 100644
--- a/lib/ansible/galaxy/data/container/meta/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/meta/main.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
galaxy_info:
author: {{ author }}
description: {{ description }}
diff --git a/lib/ansible/galaxy/data/container/tasks/main.yml.j2 b/lib/ansible/galaxy/data/container/tasks/main.yml.j2
index a9880650590..1bba65a7566 100644
--- a/lib/ansible/galaxy/data/container/tasks/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/tasks/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/tests/ansible.cfg b/lib/ansible/galaxy/data/container/tests/ansible.cfg
index 2f74f1b2722..e2b73526706 100644
--- a/lib/ansible/galaxy/data/container/tests/ansible.cfg
+++ b/lib/ansible/galaxy/data/container/tests/ansible.cfg
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
[defaults]
inventory=./inventory
diff --git a/lib/ansible/galaxy/data/container/tests/inventory b/lib/ansible/galaxy/data/container/tests/inventory
index ea69cbf1225..a24f8243f1f 100644
--- a/lib/ansible/galaxy/data/container/tests/inventory
+++ b/lib/ansible/galaxy/data/container/tests/inventory
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
localhost
diff --git a/lib/ansible/galaxy/data/container/tests/test.yml.j2 b/lib/ansible/galaxy/data/container/tests/test.yml.j2
index fb14f85c97b..1b03869978c 100644
--- a/lib/ansible/galaxy/data/container/tests/test.yml.j2
+++ b/lib/ansible/galaxy/data/container/tests/test.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
gather_facts: no
diff --git a/lib/ansible/galaxy/data/container/vars/main.yml.j2 b/lib/ansible/galaxy/data/container/vars/main.yml.j2
index 092d511a1e6..8fc2f46c5e0 100644
--- a/lib/ansible/galaxy/data/container/vars/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/vars/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/collection/README.md.j2 b/lib/ansible/galaxy/data/default/collection/README.md.j2
index 5e5162206ec..ff8d7a3e8b1 100644
--- a/lib/ansible/galaxy/data/default/collection/README.md.j2
+++ b/lib/ansible/galaxy/data/default/collection/README.md.j2
@@ -1,3 +1,4 @@
+{# SPDX-License-Identifier: MIT-0 #}
# Ansible Collection - {{ namespace }}.{{ collection_name }}
Documentation for the collection.
diff --git a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
index 7821491b257..842bdb10ce1 100644
--- a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
+++ b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
### REQUIRED
{% for option in required_config %}
{{ option.description | comment_ify }}
diff --git a/lib/ansible/galaxy/data/default/collection/meta/runtime.yml b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml
index 20f709edff5..936cae9f714 100644
--- a/lib/ansible/galaxy/data/default/collection/meta/runtime.yml
+++ b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
# Collections must specify a minimum required ansible version to upload
# to galaxy
diff --git a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
index 7c006cfa76f..795e371cd60 100644
--- a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
+++ b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
@@ -1,3 +1,4 @@
+{# SPDX-License-Identifier: MIT-0 #}
# Collections Plugins Directory
This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
diff --git a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
index 3818e64c335..8842d94e345 100644
--- a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
index 3f4c49674d4..89371a09bab 100644
--- a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
index 47abff98bb7..b23f47cc5bc 100644
--- a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
galaxy_info:
author: {{ author }}
description: {{ description }}
diff --git a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
index a9880650590..1bba65a7566 100644
--- a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/tests/inventory b/lib/ansible/galaxy/data/default/role/tests/inventory
index 878877b0776..03ca42fd173 100644
--- a/lib/ansible/galaxy/data/default/role/tests/inventory
+++ b/lib/ansible/galaxy/data/default/role/tests/inventory
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
localhost
diff --git a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
index 0c40f95a697..bf4f028593e 100644
--- a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
remote_user: root
diff --git a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
index 092d511a1e6..8fc2f46c5e0 100644
--- a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
index cf434d750c3..51e41111117 100644
--- a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
+++ b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/defaults/main.yml.j2 b/lib/ansible/galaxy/data/network/defaults/main.yml.j2
index 3818e64c335..8842d94e345 100644
--- a/lib/ansible/galaxy/data/network/defaults/main.yml.j2
+++ b/lib/ansible/galaxy/data/network/defaults/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/library/example_command.py.j2 b/lib/ansible/galaxy/data/network/library/example_command.py.j2
index dff5b153a64..9aa6ef62d4d 100644
--- a/lib/ansible/galaxy/data/network/library/example_command.py.j2
+++ b/lib/ansible/galaxy/data/network/library/example_command.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/library/example_config.py.j2 b/lib/ansible/galaxy/data/network/library/example_config.py.j2
index 0a8479b36cb..2913af08a0c 100644
--- a/lib/ansible/galaxy/data/network/library/example_config.py.j2
+++ b/lib/ansible/galaxy/data/network/library/example_config.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/library/example_facts.py.j2 b/lib/ansible/galaxy/data/network/library/example_facts.py.j2
index 2f0bafa1ccb..f90f456eab0 100644
--- a/lib/ansible/galaxy/data/network/library/example_facts.py.j2
+++ b/lib/ansible/galaxy/data/network/library/example_facts.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/meta/main.yml.j2 b/lib/ansible/galaxy/data/network/meta/main.yml.j2
index fe754a42ccd..0cd67263113 100644
--- a/lib/ansible/galaxy/data/network/meta/main.yml.j2
+++ b/lib/ansible/galaxy/data/network/meta/main.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
galaxy_info:
author: {{ author }}
description: {{ description }}
diff --git a/lib/ansible/galaxy/data/network/module_utils/example.py.j2 b/lib/ansible/galaxy/data/network/module_utils/example.py.j2
index 9422e747024..a3d9aeac236 100644
--- a/lib/ansible/galaxy/data/network/module_utils/example.py.j2
+++ b/lib/ansible/galaxy/data/network/module_utils/example.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
index 69c90c990af..fb9ddfad86c 100644
--- a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
+++ b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/tasks/main.yml.j2 b/lib/ansible/galaxy/data/network/tasks/main.yml.j2
index a9880650590..1bba65a7566 100644
--- a/lib/ansible/galaxy/data/network/tasks/main.yml.j2
+++ b/lib/ansible/galaxy/data/network/tasks/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
index f44e79f3dcf..d3562d15136 100644
--- a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
+++ b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/tests/inventory b/lib/ansible/galaxy/data/network/tests/inventory
index 878877b0776..03ca42fd173 100644
--- a/lib/ansible/galaxy/data/network/tests/inventory
+++ b/lib/ansible/galaxy/data/network/tests/inventory
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
localhost
diff --git a/lib/ansible/galaxy/data/network/tests/test.yml.j2 b/lib/ansible/galaxy/data/network/tests/test.yml.j2
index 11284eb5b8e..93263043bf1 100644
--- a/lib/ansible/galaxy/data/network/tests/test.yml.j2
+++ b/lib/ansible/galaxy/data/network/tests/test.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
connection: network_cli
diff --git a/lib/ansible/galaxy/data/network/vars/main.yml.j2 b/lib/ansible/galaxy/data/network/vars/main.yml.j2
index 092d511a1e6..8fc2f46c5e0 100644
--- a/lib/ansible/galaxy/data/network/vars/main.yml.j2
+++ b/lib/ansible/galaxy/data/network/vars/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/dependency_resolution/providers.py b/lib/ansible/galaxy/dependency_resolution/providers.py
index 716f5423b37..7578cae785c 100644
--- a/lib/ansible/galaxy/dependency_resolution/providers.py
+++ b/lib/ansible/galaxy/dependency_resolution/providers.py
@@ -126,7 +126,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
the current candidate list
* ``parent`` specifies the candidate that provides
- (dependend on) the requirement, or `None`
+ (depended on) the requirement, or `None`
to indicate a root requirement.
resolvelib >=0.7.0, < 0.8.0
@@ -202,7 +202,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
remote archives), the one-and-only match is returned
For a "named" requirement, Galaxy-compatible APIs are consulted
- to find concrete candidates for this requirement. Of theres a
+ to find concrete candidates for this requirement. If there's a
pre-installed candidate, it's prepended in front of others.
resolvelib >=0.5.3, <0.6.0
@@ -437,7 +437,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
- # NOTE: The underlying implmentation currently uses first found
+ # NOTE: The underlying implementation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
index d6d8454b809..806a9996ad4 100644
--- a/lib/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -256,7 +256,7 @@ class GalaxyRole(object):
display.display("- downloading role from %s" % archive_url)
try:
- url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent())
+ url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent(), timeout=60)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
@@ -386,6 +386,8 @@ class GalaxyRole(object):
else:
os.makedirs(self.path)
+ resolved_archive = unfrackpath(archive_parent_dir, follow=False)
+
# We strip off any higher-level directories for all of the files
# contained within the tar file here. The default is 'github_repo-target'.
# Gerrit instances, on the other hand, does not have a parent directory at all.
@@ -400,33 +402,29 @@ class GalaxyRole(object):
if not (attr_value := getattr(member, attr, None)):
continue
- if attr_value.startswith(os.sep) and not is_subpath(attr_value, archive_parent_dir):
- err = f"Invalid {attr} for tarfile member: path {attr_value} is not a subpath of the role {archive_parent_dir}"
- raise AnsibleError(err)
-
if attr == 'linkname':
# Symlinks are relative to the link
- relative_to_archive_dir = os.path.dirname(getattr(member, 'name', ''))
- archive_dir_path = os.path.join(archive_parent_dir, relative_to_archive_dir, attr_value)
+ relative_to = os.path.dirname(getattr(member, 'name', ''))
else:
# Normalize paths that start with the archive dir
attr_value = attr_value.replace(archive_parent_dir, "", 1)
attr_value = os.path.join(*attr_value.split(os.sep)) # remove leading os.sep
- archive_dir_path = os.path.join(archive_parent_dir, attr_value)
+ relative_to = ''
- resolved_archive = unfrackpath(archive_parent_dir)
- resolved_path = unfrackpath(archive_dir_path)
- if not is_subpath(resolved_path, resolved_archive):
- err = f"Invalid {attr} for tarfile member: path {resolved_path} is not a subpath of the role {resolved_archive}"
+ full_path = os.path.join(resolved_archive, relative_to, attr_value)
+ if not is_subpath(full_path, resolved_archive, real=True):
+ err = f"Invalid {attr} for tarfile member: path {full_path} is not a subpath of the role {resolved_archive}"
raise AnsibleError(err)
- relative_path = os.path.join(*resolved_path.replace(resolved_archive, "", 1).split(os.sep)) or '.'
+ relative_path_dir = os.path.join(resolved_archive, relative_to)
+ relative_path = os.path.join(*full_path.replace(relative_path_dir, "", 1).split(os.sep))
setattr(member, attr, relative_path)
if _check_working_data_filter():
# deprecated: description='extract fallback without filter' python_version='3.11'
role_tar_file.extract(member, to_native(self.path), filter='data') # type: ignore[call-arg]
else:
+ # Remove along with manual path filter once Python 3.12 is minimum supported version
role_tar_file.extract(member, to_native(self.path))
# write out the install info file for later use
diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py
index 183e2af109e..573d1b3a56c 100644
--- a/lib/ansible/galaxy/token.py
+++ b/lib/ansible/galaxy/token.py
@@ -21,11 +21,14 @@
from __future__ import annotations
import base64
-import os
import json
+import os
+import time
from stat import S_IRUSR, S_IWUSR
+from urllib.error import HTTPError
from ansible import constants as C
+from ansible.galaxy.api import GalaxyError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
@@ -57,12 +60,16 @@ class KeycloakToken(object):
self.client_id = client_id
if self.client_id is None:
self.client_id = 'cloud-services'
+ self._expiration = None
def _form_payload(self):
return 'grant_type=refresh_token&client_id=%s&refresh_token=%s' % (self.client_id,
self.access_token)
def get(self):
+ if self._expiration and time.time() >= self._expiration:
+ self._token = None
+
if self._token:
return self._token
@@ -76,15 +83,20 @@ class KeycloakToken(object):
# or 'azp' (Authorized party - the party to which the ID Token was issued)
payload = self._form_payload()
- resp = open_url(to_native(self.auth_url),
- data=payload,
- validate_certs=self.validate_certs,
- method='POST',
- http_agent=user_agent())
+ try:
+ resp = open_url(to_native(self.auth_url),
+ data=payload,
+ validate_certs=self.validate_certs,
+ method='POST',
+ http_agent=user_agent())
+ except HTTPError as e:
+ raise GalaxyError(e, 'Unable to get access token')
- # TODO: handle auth errors
+ data = json.load(resp)
- data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
+ # So that we have a buffer, expire the token in ~2/3 the given value
+ expires_in = data['expires_in'] // 3 * 2
+ self._expiration = time.time() + expires_in
# - extract 'access_token'
self._token = data.get('access_token')
diff --git a/lib/ansible/keyword_desc.yml b/lib/ansible/keyword_desc.yml
index 22a612ccc1c..4aea8234b61 100644
--- a/lib/ansible/keyword_desc.yml
+++ b/lib/ansible/keyword_desc.yml
@@ -61,7 +61,7 @@ serial: Explicitly define how Ansible batches the execution of the current play
strategy: Allows you to choose the strategy plugin to use for the play. See :ref:`strategy_plugins`.
tags: Tags applied to the task or included tasks, this allows selecting subsets of tasks from the command line.
tasks: Main list of tasks to execute in the play, they run after :term:`roles` and before :term:`post_tasks`.
-timeout: Time limit for the task to execute in, if exceeded Ansible will interrupt and fail the task.
+timeout: Time limit for the task action to execute in, if exceeded, Ansible will interrupt the process. Timeout does not include templating or looping.
throttle: Limit the number of concurrent task runs on task, block and playbook level. This is independent of the forks and serial settings, but cannot be set higher than those limits. For example, if forks is set to 10 and the throttle is set to 15, at most 10 hosts will be operated on in parallel.
until: "This keyword implies a ':term:`retries` loop' that will go on until the condition supplied here is met or we hit the :term:`retries` limit."
vars: Dictionary/map of variables
diff --git a/lib/ansible/module_utils/_internal/__init__.py b/lib/ansible/module_utils/_internal/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/module_utils/_internal/_concurrent/__init__.py b/lib/ansible/module_utils/_internal/_concurrent/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py b/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
new file mode 100644
index 00000000000..0b32a062fed
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
@@ -0,0 +1,28 @@
+"""Proxy stdlib threading module that only supports non-joinable daemon threads."""
+# NB: all new local module attrs are _ prefixed to ensure an identical public attribute surface area to the module we're proxying
+
+from __future__ import annotations as _annotations
+
+import threading as _threading
+import typing as _t
+
+
+class _DaemonThread(_threading.Thread):
+ """
+ Daemon-only Thread subclass; prevents running threads of this type from blocking interpreter shutdown and process exit.
+ The join() method is a no-op.
+ """
+
+ def __init__(self, *args, daemon: bool | None = None, **kwargs) -> None:
+ super().__init__(*args, daemon=daemon or True, **kwargs)
+
+ def join(self, timeout=None) -> None:
+ """ThreadPoolExecutor's atexit handler joins all queue threads before allowing shutdown; prevent them from blocking."""
+
+
+Thread = _DaemonThread # shadow the real Thread attr with our _DaemonThread
+
+
+def __getattr__(name: str) -> _t.Any:
+ """Delegate anything not defined locally to the real `threading` module."""
+ return getattr(_threading, name)
diff --git a/lib/ansible/module_utils/_internal/_concurrent/_futures.py b/lib/ansible/module_utils/_internal/_concurrent/_futures.py
new file mode 100644
index 00000000000..2ca493f6873
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_concurrent/_futures.py
@@ -0,0 +1,21 @@
+"""Utilities for concurrent code execution using futures."""
+
+from __future__ import annotations
+
+import concurrent.futures
+import types
+
+from . import _daemon_threading
+
+
+class DaemonThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
+ """ThreadPoolExecutor subclass that creates non-joinable daemon threads for non-blocking pool and process shutdown with abandoned threads."""
+
+ atc = concurrent.futures.ThreadPoolExecutor._adjust_thread_count
+
+ # clone the base class `_adjust_thread_count` method with a copy of its globals dict
+ _adjust_thread_count = types.FunctionType(atc.__code__, atc.__globals__.copy(), name=atc.__name__, argdefs=atc.__defaults__, closure=atc.__closure__)
+ # patch the method closure's `threading` module import to use our daemon-only thread factory instead
+ _adjust_thread_count.__globals__.update(threading=_daemon_threading)
+
+ del atc # don't expose this as a class attribute
diff --git a/lib/ansible/module_utils/api.py b/lib/ansible/module_utils/api.py
index 8f08772278e..2415c38a839 100644
--- a/lib/ansible/module_utils/api.py
+++ b/lib/ansible/module_utils/api.py
@@ -28,7 +28,7 @@ from __future__ import annotations
import copy
import functools
import itertools
-import random
+import secrets
import sys
import time
@@ -131,7 +131,7 @@ def generate_jittered_backoff(retries=10, delay_base=3, delay_threshold=60):
:param delay_threshold: The maximum time in seconds for any delay.
"""
for retry in range(0, retries):
- yield random.randint(0, min(delay_threshold, delay_base * 2 ** retry))
+ yield secrets.randbelow(min(delay_threshold, delay_base * 2 ** retry))
def retry_never(exception_or_result):
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index 51638a07d50..19dbb1d1541 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -458,7 +458,7 @@ class AnsibleModule(object):
self._selinux_mls_enabled = None
self._selinux_initial_context = None
- # finally, make sure we're in a sane working dir
+ # finally, make sure we're in a logical working dir
self._set_cwd()
@property
@@ -1202,6 +1202,7 @@ class AnsibleModule(object):
setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
def safe_eval(self, value, locals=None, include_exceptions=False):
+ # deprecated: description='no longer used in the codebase' core_version='2.21'
return safe_eval(value, locals, include_exceptions)
def _load_params(self):
@@ -1353,9 +1354,10 @@ class AnsibleModule(object):
Find system executable in PATH.
:param arg: The executable to find.
- :param required: if executable is not found and required is ``True``, fail_json
+ :param required: if the executable is not found and required is ``True``, fail_json
:param opt_dirs: optional list of directories to search in addition to ``PATH``
- :returns: if found return full path; otherwise return None
+ :returns: if found return full path; otherwise return original arg, unless 'warning' then return None
+ :raises: Sysexit: if arg is not found and required=True (via fail_json)
'''
bin_path = None
@@ -1364,8 +1366,6 @@ class AnsibleModule(object):
except ValueError as e:
if required:
self.fail_json(msg=to_text(e))
- else:
- return bin_path
return bin_path
@@ -1432,7 +1432,7 @@ class AnsibleModule(object):
kwargs['deprecations'] = deprecations
# preserve bools/none from no_log
- # TODO: once python version on target high enough, dict comprh
+ # TODO: once python version on target high enough, dict comprehensions
preserved = {}
for k, v in kwargs.items():
if v is None or isinstance(v, bool):
@@ -1557,7 +1557,7 @@ class AnsibleModule(object):
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
- #
+
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
@@ -1598,6 +1598,7 @@ class AnsibleModule(object):
dest_stat = os.stat(b_dest)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
shutil.copystat(b_dest, b_src)
+ os.utime(b_src, times=(time.time(), time.time()))
except OSError as e:
if e.errno != errno.EPERM:
raise
@@ -1659,8 +1660,10 @@ class AnsibleModule(object):
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
- if keep_dest_attrs and dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
- os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
+ if keep_dest_attrs:
+ if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
+ os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
+ os.utime(b_tmp_dest_name, times=(time.time(), time.time()))
except OSError as e:
if e.errno != errno.EPERM:
raise
@@ -1686,8 +1689,12 @@ class AnsibleModule(object):
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, S_IRWU_RWG_RWO & ~umask)
+ dest_dir_stat = os.stat(os.path.dirname(b_dest))
try:
- os.chown(b_dest, os.geteuid(), os.getegid())
+ if dest_dir_stat.st_mode & stat.S_ISGID:
+ os.chown(b_dest, os.geteuid(), dest_dir_stat.st_gid)
+ else:
+ os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
@@ -2054,7 +2061,7 @@ class AnsibleModule(object):
# not as exact as above, but should be good enough for most platforms that fail the previous call
buffer_size = select.PIPE_BUF
except Exception:
- buffer_size = 9000 # use sane default JIC
+ buffer_size = 9000 # use logical default JIC
return buffer_size
diff --git a/lib/ansible/module_utils/common/collections.py b/lib/ansible/module_utils/common/collections.py
index e4cb9ec1d70..28c53e14e2c 100644
--- a/lib/ansible/module_utils/common/collections.py
+++ b/lib/ansible/module_utils/common/collections.py
@@ -65,7 +65,7 @@ class ImmutableDict(Hashable, Mapping):
def is_string(seq):
- """Identify whether the input has a string-like type (inclding bytes)."""
+ """Identify whether the input has a string-like type (including bytes)."""
# AnsibleVaultEncryptedUnicode inherits from Sequence, but is expected to be a string like object
return isinstance(seq, (text_type, binary_type)) or getattr(seq, '__ENCRYPTED__', False)
diff --git a/lib/ansible/module_utils/common/file.py b/lib/ansible/module_utils/common/file.py
index b62e4c64f50..1b976fd9329 100644
--- a/lib/ansible/module_utils/common/file.py
+++ b/lib/ansible/module_utils/common/file.py
@@ -7,12 +7,6 @@ import os
import stat
import re
-try:
- import selinux # pylint: disable=unused-import
- HAVE_SELINUX = True
-except ImportError:
- HAVE_SELINUX = False
-
FILE_ATTRIBUTES = {
'A': 'noatime',
diff --git a/lib/ansible/module_utils/common/process.py b/lib/ansible/module_utils/common/process.py
index 8e62c5f5d6e..85ffd2195e7 100644
--- a/lib/ansible/module_utils/common/process.py
+++ b/lib/ansible/module_utils/common/process.py
@@ -12,13 +12,18 @@ from ansible.module_utils.common.warnings import deprecate
def get_bin_path(arg, opt_dirs=None, required=None):
'''
Find system executable in PATH. Raises ValueError if the executable is not found.
- Optional arguments:
- - required: [Deprecated] Before 2.10, if executable is not found and required is true it raises an Exception.
- In 2.10 and later, an Exception is always raised. This parameter will be removed in 2.21.
- - opt_dirs: optional list of directories to search in addition to PATH
+
+ :param arg: the executable to find
+ :type arg: string
+ :param opt_dirs: optional list of directories to search in addition to PATH
+ :type opt_dirs: list of strings
+ :param required: DEPRECATED. This parameter will be removed in 2.21
+ :type required: boolean
+ :returns: path to arg (should be abs path unless PATH or opt_dirs are relative paths)
+ :raises: ValueError: if arg is not found
+
In addition to PATH and opt_dirs, this function also looks through /sbin, /usr/sbin and /usr/local/sbin. A lot of
modules, especially for gathering facts, depend on this behaviour.
- If found return full path, otherwise raise ValueError.
'''
if required is not None:
deprecate(
@@ -27,26 +32,34 @@ def get_bin_path(arg, opt_dirs=None, required=None):
collection_name="ansible.builtin",
)
+ paths = []
+ sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
opt_dirs = [] if opt_dirs is None else opt_dirs
- sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
- paths = []
+ # Construct possible paths with precedence
+ # passed in paths
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
+ # system configured paths
paths += os.environ.get('PATH', '').split(os.pathsep)
- bin_path = None
- # mangle PATH to include /sbin dirs
+
+ # existing /sbin dirs, if not there already
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
+
+ # Search for binary
+ bin_path = None
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
+ # fist found wins
bin_path = path
break
+
if bin_path is None:
raise ValueError('Failed to find required executable "%s" in paths: %s' % (arg, os.pathsep.join(paths)))
diff --git a/lib/ansible/module_utils/common/text/formatters.py b/lib/ansible/module_utils/common/text/formatters.py
index 3096abec7c7..d548085c57f 100644
--- a/lib/ansible/module_utils/common/text/formatters.py
+++ b/lib/ansible/module_utils/common/text/formatters.py
@@ -20,6 +20,18 @@ SIZE_RANGES = {
'B': 1,
}
+VALID_UNITS = {
+ 'B': (('byte', 'B'), ('bit', 'b')),
+ 'K': (('kilobyte', 'KB'), ('kilobit', 'Kb')),
+ 'M': (('megabyte', 'MB'), ('megabit', 'Mb')),
+ 'G': (('gigabyte', 'GB'), ('gigabit', 'Gb')),
+ 'T': (('terabyte', 'TB'), ('terabit', 'Tb')),
+ 'P': (('petabyte', 'PB'), ('petabit', 'Pb')),
+ 'E': (('exabyte', 'EB'), ('exabit', 'Eb')),
+ 'Z': (('zetabyte', 'ZB'), ('zetabit', 'Zb')),
+ 'Y': (('yottabyte', 'YB'), ('yottabit', 'Yb')),
+}
+
def lenient_lowercase(lst):
"""Lowercase elements of a list.
@@ -53,7 +65,8 @@ def human_to_bytes(number, default_unit=None, isbits=False):
The function expects 'b' (lowercase) as a bit identifier, e.g. 'Mb'/'Kb'/etc.
if 'MB'/'KB'/... is passed, the ValueError will be rased.
"""
- m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
+ m = re.search(r'^([0-9]*\.?[0-9]+)(?:\s*([A-Za-z]+))?\s*$', str(number))
+
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
@@ -86,10 +99,13 @@ def human_to_bytes(number, default_unit=None, isbits=False):
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
-
- if unit_class_name in unit.lower():
+ unit_group = VALID_UNITS.get(range_key, None)
+ if unit_group is None:
+ raise ValueError(f"human_to_bytes() can't interpret a valid unit for {range_key}")
+ isbits_flag = 1 if isbits else 0
+ if unit.lower() == unit_group[isbits_flag][0]:
pass
- elif unit[1] != unit_class:
+ elif unit != unit_group[isbits_flag][1]:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
diff --git a/lib/ansible/module_utils/common/validation.py b/lib/ansible/module_utils/common/validation.py
index 69721e47f18..399767e775d 100644
--- a/lib/ansible/module_utils/common/validation.py
+++ b/lib/ansible/module_utils/common/validation.py
@@ -4,6 +4,7 @@
from __future__ import annotations
+import decimal
import json
import os
import re
@@ -13,10 +14,10 @@ from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.text.converters import jsonify
from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.common.warnings import deprecate
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import (
binary_type,
- integer_types,
string_types,
text_type,
)
@@ -39,6 +40,10 @@ def count_terms(terms, parameters):
def safe_eval(value, locals=None, include_exceptions=False):
+ deprecate(
+ "The safe_eval function should not be used.",
+ version="2.21",
+ )
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
@@ -415,7 +420,7 @@ def check_type_dict(value):
Raises :class:`TypeError` if unable to convert to a dict
- :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2``.
+ :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2`` or ``k1=v2 k2=v2``.
:returns: value converted to a dictionary
"""
@@ -427,10 +432,14 @@ def check_type_dict(value):
try:
return json.loads(value)
except Exception:
- (result, exc) = safe_eval(value, dict(), include_exceptions=True)
- if exc is not None:
- raise TypeError('unable to evaluate string as dictionary')
- return result
+ try:
+ result = literal_eval(value)
+ except Exception:
+ pass
+ else:
+ if isinstance(result, dict):
+ return result
+ raise TypeError('unable to evaluate string as dictionary')
elif '=' in value:
fields = []
field_buffer = []
@@ -457,7 +466,11 @@ def check_type_dict(value):
field = ''.join(field_buffer)
if field:
fields.append(field)
- return dict(x.split("=", 1) for x in fields)
+ try:
+ return dict(x.split("=", 1) for x in fields)
+ except ValueError:
+ # no "=" to split on: "k1=v1, k2"
+ raise TypeError('unable to evaluate string in the "key=value" format as dictionary')
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
@@ -493,16 +506,15 @@ def check_type_int(value):
:return: int of given value
"""
- if isinstance(value, integer_types):
- return value
-
- if isinstance(value, string_types):
+ if not isinstance(value, int):
try:
- return int(value)
- except ValueError:
- pass
-
- raise TypeError('%s cannot be converted to an int' % type(value))
+ if (decimal_value := decimal.Decimal(value)) != (int_value := int(decimal_value)):
+ raise ValueError("Significant decimal part found")
+ else:
+ value = int_value
+ except (decimal.DecimalException, TypeError, ValueError) as e:
+ raise TypeError(f'"{value!r}" cannot be converted to an int') from e
+ return value
def check_type_float(value):
@@ -514,16 +526,12 @@ def check_type_float(value):
:returns: float of given value.
"""
- if isinstance(value, float):
- return value
-
- if isinstance(value, (binary_type, text_type, int)):
+ if not isinstance(value, float):
try:
- return float(value)
- except ValueError:
- pass
-
- raise TypeError('%s cannot be converted to a float' % type(value))
+ value = float(value)
+ except (TypeError, ValueError) as e:
+ raise TypeError(f'{type(value)} cannot be converted to a float')
+ return value
def check_type_path(value,):
diff --git a/lib/ansible/module_utils/compat/paramiko.py b/lib/ansible/module_utils/compat/paramiko.py
index 8c84261cef8..302309cdaa8 100644
--- a/lib/ansible/module_utils/compat/paramiko.py
+++ b/lib/ansible/module_utils/compat/paramiko.py
@@ -11,7 +11,12 @@ PARAMIKO_IMPORT_ERR = None
try:
with warnings.catch_warnings():
- warnings.filterwarnings('ignore', message='Blowfish has been deprecated', category=UserWarning)
+ # Blowfish has been moved, but the deprecated import is used by paramiko versions older than 2.9.5.
+ # See: https://github.com/paramiko/paramiko/pull/2039
+ warnings.filterwarnings('ignore', message='Blowfish has been ', category=UserWarning)
+ # TripleDES has been moved, but the deprecated import is used by paramiko versions older than 3.3.2 and 3.4.1.
+ # See: https://github.com/paramiko/paramiko/pull/2421
+ warnings.filterwarnings('ignore', message='TripleDES has been ', category=UserWarning)
import paramiko # pylint: disable=unused-import
# paramiko and gssapi are incompatible and raise AttributeError not ImportError
# When running in FIPS mode, cryptography raises InternalError
diff --git a/lib/ansible/module_utils/compat/selinux.py b/lib/ansible/module_utils/compat/selinux.py
index 0900388b761..a7a19cfd63f 100644
--- a/lib/ansible/module_utils/compat/selinux.py
+++ b/lib/ansible/module_utils/compat/selinux.py
@@ -11,8 +11,8 @@ from ctypes import CDLL, c_char_p, c_int, byref, POINTER, get_errno
try:
_selinux_lib = CDLL('libselinux.so.1', use_errno=True)
-except OSError:
- raise ImportError('unable to load libselinux.so')
+except OSError as ex:
+ raise ImportError('unable to load libselinux.so') from ex
def _module_setup():
diff --git a/lib/ansible/module_utils/connection.py b/lib/ansible/module_utils/connection.py
index cc889696e70..b6720125855 100644
--- a/lib/ansible/module_utils/connection.py
+++ b/lib/ansible/module_utils/connection.py
@@ -29,8 +29,8 @@
from __future__ import annotations
import os
-import hashlib
import json
+import pickle
import socket
import struct
import traceback
@@ -40,30 +40,14 @@ from functools import partial
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.common.json import AnsibleJSONEncoder
from ansible.module_utils.six import iteritems
-from ansible.module_utils.six.moves import cPickle
-def write_to_file_descriptor(fd, obj):
- """Handles making sure all data is properly written to file descriptor fd.
+def write_to_stream(stream, obj):
+ """Write a length+newline-prefixed pickled object to a stream."""
+ src = pickle.dumps(obj)
- In particular, that data is encoded in a character stream-friendly way and
- that all data gets written before returning.
- """
- # Need to force a protocol that is compatible with both py2 and py3.
- # That would be protocol=2 or less.
- # Also need to force a protocol that excludes certain control chars as
- # stdin in this case is a pty and control chars will cause problems.
- # that means only protocol=0 will work.
- src = cPickle.dumps(obj, protocol=0)
-
- # raw \r characters will not survive pty round-trip
- # They should be rehydrated on the receiving end
- src = src.replace(b'\r', br'\r')
- data_hash = to_bytes(hashlib.sha1(src).hexdigest())
-
- os.write(fd, b'%d\n' % len(src))
- os.write(fd, src)
- os.write(fd, b'%s\n' % data_hash)
+ stream.write(b'%d\n' % len(src))
+ stream.write(src)
def send_data(s, data):
@@ -146,7 +130,7 @@ class Connection(object):
data = json.dumps(req, cls=AnsibleJSONEncoder, vault_to_text=True)
except TypeError as exc:
raise ConnectionError(
- "Failed to encode some variables as JSON for communication with ansible-connection. "
+ "Failed to encode some variables as JSON for communication with the persistent connection helper. "
"The original exception was: %s" % to_text(exc)
)
@@ -176,7 +160,7 @@ class Connection(object):
if response['id'] != reqid:
raise ConnectionError('invalid json-rpc id received')
if "result_type" in response:
- response["result"] = cPickle.loads(to_bytes(response["result"]))
+ response["result"] = pickle.loads(to_bytes(response["result"], errors="surrogateescape"))
return response
diff --git a/lib/ansible/module_utils/csharp/Ansible.Basic.cs b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
index a042af8cecc..085958270d7 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Basic.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
@@ -1025,7 +1025,16 @@ namespace Ansible.Basic
foreach (DictionaryEntry entry in param)
{
string paramKey = (string)entry.Key;
- if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase))
+ if (paramKey == "_ansible_exec_wrapper_warnings")
+ {
+ // Special key used in module_powershell_wrapper to pass
+ // along any warnings that should be returned back to
+ // Ansible.
+ removedParameters.Add(paramKey);
+ foreach (string warning in (IList)entry.Value)
+ Warn(warning);
+ }
+ else if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase))
unsupportedParameters.Add(paramKey);
else if (!legalInputs.Contains(paramKey))
// For backwards compatibility we do not care about the case but we need to warn the users as this will
diff --git a/lib/ansible/module_utils/csharp/Ansible.Become.cs b/lib/ansible/module_utils/csharp/Ansible.Become.cs
index d3bb1564fa6..68d4d11d7a5 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Become.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Become.cs
@@ -333,13 +333,12 @@ namespace Ansible.Become
// Grant access to the current Windows Station and Desktop to the become user
GrantAccessToWindowStationAndDesktop(account);
- // Try and impersonate a SYSTEM token, we need a SYSTEM token to either become a well known service
- // account or have administrative rights on the become access token.
- // If we ultimately are becoming the SYSTEM account we want the token with the most privileges available.
- // https://github.com/ansible/ansible/issues/71453
- bool mostPrivileges = becomeSid == "S-1-5-18";
+ // Try and impersonate a SYSTEM token. We need the SeTcbPrivilege for
+ // - LogonUser for a service SID
+ // - S4U logon
+ // - Token elevation
systemToken = GetPrimaryTokenForUser(new SecurityIdentifier("S-1-5-18"),
- new List() { "SeTcbPrivilege" }, mostPrivileges);
+ new List() { "SeTcbPrivilege" });
if (systemToken != null)
{
try
@@ -357,11 +356,9 @@ namespace Ansible.Become
try
{
- if (becomeSid == "S-1-5-18")
- userTokens.Add(systemToken);
// Cannot use String.IsEmptyOrNull() as an empty string is an account that doesn't have a pass.
// We only use S4U if no password was defined or it was null
- else if (!SERVICE_SIDS.Contains(becomeSid) && password == null && logonType != LogonType.NewCredentials)
+ if (!SERVICE_SIDS.Contains(becomeSid) && password == null && logonType != LogonType.NewCredentials)
{
// If no password was specified, try and duplicate an existing token for that user or use S4U to
// generate one without network credentials
@@ -384,6 +381,11 @@ namespace Ansible.Become
string domain = null;
switch (becomeSid)
{
+ case "S-1-5-18":
+ logonType = LogonType.Service;
+ domain = "NT AUTHORITY";
+ username = "SYSTEM";
+ break;
case "S-1-5-19":
logonType = LogonType.Service;
domain = "NT AUTHORITY";
@@ -426,7 +428,7 @@ namespace Ansible.Become
}
private static SafeNativeHandle GetPrimaryTokenForUser(SecurityIdentifier sid,
- List requiredPrivileges = null, bool mostPrivileges = false)
+ List requiredPrivileges = null)
{
// According to CreateProcessWithTokenW we require a token with
// TOKEN_QUERY, TOKEN_DUPLICATE and TOKEN_ASSIGN_PRIMARY
@@ -436,9 +438,6 @@ namespace Ansible.Become
TokenAccessLevels.AssignPrimary |
TokenAccessLevels.Impersonate;
- SafeNativeHandle userToken = null;
- int privilegeCount = 0;
-
foreach (SafeNativeHandle hToken in TokenUtil.EnumerateUserTokens(sid, dwAccess))
{
// Filter out any Network logon tokens, using become with that is useless when S4U
@@ -449,10 +448,6 @@ namespace Ansible.Become
List actualPrivileges = TokenUtil.GetTokenPrivileges(hToken).Select(x => x.Name).ToList();
- // If the token has less or the same number of privileges than the current token, skip it.
- if (mostPrivileges && privilegeCount >= actualPrivileges.Count)
- continue;
-
// Check that the required privileges are on the token
if (requiredPrivileges != null)
{
@@ -464,22 +459,16 @@ namespace Ansible.Become
// Duplicate the token to convert it to a primary token with the access level required.
try
{
- userToken = TokenUtil.DuplicateToken(hToken, TokenAccessLevels.MaximumAllowed,
+ return TokenUtil.DuplicateToken(hToken, TokenAccessLevels.MaximumAllowed,
SecurityImpersonationLevel.Anonymous, TokenType.Primary);
- privilegeCount = actualPrivileges.Count;
}
catch (Process.Win32Exception)
{
continue;
}
-
- // If we don't care about getting the token with the most privileges, escape the loop as we already
- // have a token.
- if (!mostPrivileges)
- break;
}
- return userToken;
+ return null;
}
private static SafeNativeHandle GetS4UTokenForUser(SecurityIdentifier sid, LogonType logonType)
diff --git a/lib/ansible/module_utils/csharp/Ansible.Process.cs b/lib/ansible/module_utils/csharp/Ansible.Process.cs
index fc156b7a20e..a351dcd0493 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Process.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Process.cs
@@ -397,7 +397,7 @@ namespace Ansible.Process
internal static Result WaitProcess(SafeFileHandle stdoutRead, SafeFileHandle stdoutWrite, SafeFileHandle stderrRead,
SafeFileHandle stderrWrite, FileStream stdinStream, byte[] stdin, IntPtr hProcess, string outputEncoding = null)
{
- // Default to using UTF-8 as the output encoding, this should be a sane default for most scenarios.
+ // Default to using UTF-8 as the output encoding, this should be a logical default for most scenarios.
outputEncoding = String.IsNullOrEmpty(outputEncoding) ? "utf-8" : outputEncoding;
Encoding encodingInstance = Encoding.GetEncoding(outputEncoding);
diff --git a/lib/ansible/module_utils/distro/__init__.py b/lib/ansible/module_utils/distro/__init__.py
index a8c29a6ce0a..8af439005fc 100644
--- a/lib/ansible/module_utils/distro/__init__.py
+++ b/lib/ansible/module_utils/distro/__init__.py
@@ -22,7 +22,7 @@ Compat distro library.
from __future__ import annotations
# The following makes it easier for us to script updates of the bundled code
-_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.6.0"}
+_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.9.0"}
# The following additional changes have been made:
# * Remove optparse since it is not needed for our use.
diff --git a/lib/ansible/module_utils/distro/_distro.py b/lib/ansible/module_utils/distro/_distro.py
index e57d6b68545..a67edae735c 100644
--- a/lib/ansible/module_utils/distro/_distro.py
+++ b/lib/ansible/module_utils/distro/_distro.py
@@ -1,4 +1,4 @@
-# Copyright 2015,2016,2017 Nir Cohen
+# Copyright 2015-2021 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -59,7 +59,7 @@ except ImportError:
# Python 3.7
TypedDict = dict
-__version__ = "1.8.0"
+__version__ = "1.9.0"
class VersionDict(TypedDict):
@@ -129,6 +129,7 @@ _DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
# Base file names to be looked up for if _UNIXCONFDIR is not readable.
_DISTRO_RELEASE_BASENAMES = [
"SuSE-release",
+ "altlinux-release",
"arch-release",
"base-release",
"centos-release",
@@ -155,6 +156,8 @@ _DISTRO_RELEASE_IGNORE_BASENAMES = (
"system-release",
"plesk-release",
"iredmail-release",
+ "board-release",
+ "ec2_version",
)
@@ -247,6 +250,7 @@ def id() -> str:
"rocky" Rocky Linux
"aix" AIX
"guix" Guix System
+ "altlinux" ALT Linux
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
@@ -995,10 +999,10 @@ class LinuxDistribution:
For details, see :func:`distro.info`.
"""
- return dict(
+ return InfoDict(
id=self.id(),
version=self.version(pretty, best),
- version_parts=dict(
+ version_parts=VersionDict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best),
diff --git a/lib/ansible/module_utils/facts/collector.py b/lib/ansible/module_utils/facts/collector.py
index 616188b3db9..0983df7aad4 100644
--- a/lib/ansible/module_utils/facts/collector.py
+++ b/lib/ansible/module_utils/facts/collector.py
@@ -90,6 +90,8 @@ class BaseFactCollector:
def _transform_dict_keys(self, fact_dict):
'''update a dicts keys to use new names as transformed by self._transform_name'''
+ if fact_dict is None:
+ return {}
for old_key in list(fact_dict.keys()):
new_key = self._transform_name(old_key)
# pop the item by old_key and replace it using new_key
diff --git a/lib/ansible/module_utils/facts/default_collectors.py b/lib/ansible/module_utils/facts/default_collectors.py
index 1dcbd7c52a1..af4391576c0 100644
--- a/lib/ansible/module_utils/facts/default_collectors.py
+++ b/lib/ansible/module_utils/facts/default_collectors.py
@@ -53,6 +53,7 @@ from ansible.module_utils.facts.system.python import PythonFactCollector
from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector
+from ansible.module_utils.facts.system.systemd import SystemdFactCollector
from ansible.module_utils.facts.system.user import UserFactCollector
from ansible.module_utils.facts.hardware.base import HardwareCollector
@@ -118,7 +119,8 @@ _general = [
EnvFactCollector,
LoadAvgFactCollector,
SshPubKeyFactCollector,
- UserFactCollector
+ UserFactCollector,
+ SystemdFactCollector
] # type: t.List[t.Type[BaseFactCollector]]
# virtual, this might also limit hardware/networking
diff --git a/lib/ansible/module_utils/facts/hardware/aix.py b/lib/ansible/module_utils/facts/hardware/aix.py
index db34fe147a6..c2a074bf8ea 100644
--- a/lib/ansible/module_utils/facts/hardware/aix.py
+++ b/lib/ansible/module_utils/facts/hardware/aix.py
@@ -195,34 +195,35 @@ class AIXHardware(Hardware):
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
- rc, mount_out, err = self.module.run_command(mount_path)
- if mount_out:
- for line in mount_out.split('\n'):
- fields = line.split()
- if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
- if re.match('^/', fields[0]):
- # normal mount
- mount = fields[1]
- mount_info = {'mount': mount,
- 'device': fields[0],
- 'fstype': fields[2],
- 'options': fields[6],
- 'time': '%s %s %s' % (fields[3], fields[4], fields[5])}
- mount_info.update(get_mount_size(mount))
- else:
- # nfs or cifs based mount
- # in case of nfs if no mount options are provided on command line
- # add into fields empty string...
- if len(fields) < 8:
- fields.append("")
-
- mount_info = {'mount': fields[2],
- 'device': '%s:%s' % (fields[0], fields[1]),
- 'fstype': fields[3],
- 'options': fields[7],
- 'time': '%s %s %s' % (fields[4], fields[5], fields[6])}
-
- mounts.append(mount_info)
+ if mount_path:
+ rc, mount_out, err = self.module.run_command(mount_path)
+ if mount_out:
+ for line in mount_out.split('\n'):
+ fields = line.split()
+ if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
+ if re.match('^/', fields[0]):
+ # normal mount
+ mount = fields[1]
+ mount_info = {'mount': mount,
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[6],
+ 'time': '%s %s %s' % (fields[3], fields[4], fields[5])}
+ mount_info.update(get_mount_size(mount))
+ else:
+ # nfs or cifs based mount
+ # in case of nfs if no mount options are provided on command line
+ # add into fields empty string...
+ if len(fields) < 8:
+ fields.append("")
+
+ mount_info = {'mount': fields[2],
+ 'device': '%s:%s' % (fields[0], fields[1]),
+ 'fstype': fields[3],
+ 'options': fields[7],
+ 'time': '%s %s %s' % (fields[4], fields[5], fields[6])}
+
+ mounts.append(mount_info)
mount_facts['mounts'] = mounts
@@ -232,30 +233,31 @@ class AIXHardware(Hardware):
device_facts = {}
device_facts['devices'] = {}
- lsdev_cmd = self.module.get_bin_path('lsdev', True)
- lsattr_cmd = self.module.get_bin_path('lsattr', True)
- rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
-
- for line in out_lsdev.splitlines():
- field = line.split()
-
- device_attrs = {}
- device_name = field[0]
- device_state = field[1]
- device_type = field[2:]
- lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
- rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
- for attr in out_lsattr.splitlines():
- attr_fields = attr.split()
- attr_name = attr_fields[0]
- attr_parameter = attr_fields[1]
- device_attrs[attr_name] = attr_parameter
-
- device_facts['devices'][device_name] = {
- 'state': device_state,
- 'type': ' '.join(device_type),
- 'attributes': device_attrs
- }
+ lsdev_cmd = self.module.get_bin_path('lsdev')
+ lsattr_cmd = self.module.get_bin_path('lsattr')
+ if lsdev_cmd and lsattr_cmd:
+ rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
+
+ for line in out_lsdev.splitlines():
+ field = line.split()
+
+ device_attrs = {}
+ device_name = field[0]
+ device_state = field[1]
+ device_type = field[2:]
+ lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
+ rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
+ for attr in out_lsattr.splitlines():
+ attr_fields = attr.split()
+ attr_name = attr_fields[0]
+ attr_parameter = attr_fields[1]
+ device_attrs[attr_name] = attr_parameter
+
+ device_facts['devices'][device_name] = {
+ 'state': device_state,
+ 'type': ' '.join(device_type),
+ 'attributes': device_attrs
+ }
return device_facts
diff --git a/lib/ansible/module_utils/facts/hardware/darwin.py b/lib/ansible/module_utils/facts/hardware/darwin.py
index 74e4ce4e2af..ac159d5fd2b 100644
--- a/lib/ansible/module_utils/facts/hardware/darwin.py
+++ b/lib/ansible/module_utils/facts/hardware/darwin.py
@@ -19,7 +19,6 @@ from __future__ import annotations
import struct
import time
-from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.sysctl import get_sysctl
@@ -41,7 +40,7 @@ class DarwinHardware(Hardware):
def populate(self, collected_facts=None):
hardware_facts = {}
- self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern'])
+ self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern', 'hw.model'])
mac_facts = self.get_mac_facts()
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
@@ -67,9 +66,8 @@ class DarwinHardware(Hardware):
def get_mac_facts(self):
mac_facts = {}
- rc, out, err = self.module.run_command("sysctl hw.model")
- if rc == 0:
- mac_facts['model'] = mac_facts['product_name'] = out.splitlines()[-1].split()[1]
+ if 'hw.model' in self.sysctl:
+ mac_facts['model'] = mac_facts['product_name'] = self.sysctl['hw.model']
mac_facts['osversion'] = self.sysctl['kern.osversion']
mac_facts['osrevision'] = self.sysctl['kern.osrevision']
@@ -96,44 +94,49 @@ class DarwinHardware(Hardware):
total_used = 0
page_size = 4096
- try:
- vm_stat_command = get_bin_path('vm_stat')
- except ValueError:
+
+ vm_stat_command = self.module.get_bin_path('vm_stat')
+ if vm_stat_command is None:
return memory_facts
- rc, out, err = self.module.run_command(vm_stat_command)
- if rc == 0:
- # Free = Total - (Wired + active + inactive)
- # Get a generator of tuples from the command output so we can later
- # turn it into a dictionary
- memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines())
-
- # Strip extra left spaces from the value
- memory_stats = dict((k, v.lstrip()) for k, v in memory_stats)
-
- for k, v in memory_stats.items():
- try:
- memory_stats[k] = int(v)
- except ValueError:
- # Most values convert cleanly to integer values but if the field does
- # not convert to an integer, just leave it alone.
- pass
-
- if memory_stats.get('Pages wired down'):
- total_used += memory_stats['Pages wired down'] * page_size
- if memory_stats.get('Pages active'):
- total_used += memory_stats['Pages active'] * page_size
- if memory_stats.get('Pages inactive'):
- total_used += memory_stats['Pages inactive'] * page_size
-
- memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024)
+ if vm_stat_command:
+ rc, out, err = self.module.run_command(vm_stat_command)
+ if rc == 0:
+ # Free = Total - (Wired + active + inactive)
+ # Get a generator of tuples from the command output so we can later
+ # turn it into a dictionary
+ memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines())
+
+ # Strip extra left spaces from the value
+ memory_stats = dict((k, v.lstrip()) for k, v in memory_stats)
+
+ for k, v in memory_stats.items():
+ try:
+ memory_stats[k] = int(v)
+ except ValueError:
+ # Most values convert cleanly to integer values but if the field does
+ # not convert to an integer, just leave it alone.
+ pass
+
+ if memory_stats.get('Pages wired down'):
+ total_used += memory_stats['Pages wired down'] * page_size
+ if memory_stats.get('Pages active'):
+ total_used += memory_stats['Pages active'] * page_size
+ if memory_stats.get('Pages inactive'):
+ total_used += memory_stats['Pages inactive'] * page_size
+
+ memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024)
return memory_facts
def get_uptime_facts(self):
+
# On Darwin, the default format is annoying to parse.
# Use -b to get the raw value and decode it.
sysctl_cmd = self.module.get_bin_path('sysctl')
+ if not sysctl_cmd:
+ return {}
+
cmd = [sysctl_cmd, '-b', 'kern.boottime']
# We need to get raw bytes, not UTF-8.
diff --git a/lib/ansible/module_utils/facts/hardware/freebsd.py b/lib/ansible/module_utils/facts/hardware/freebsd.py
index e44da3aaacc..c7f6c6c48b6 100644
--- a/lib/ansible/module_utils/facts/hardware/freebsd.py
+++ b/lib/ansible/module_utils/facts/hardware/freebsd.py
@@ -23,7 +23,6 @@ import time
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.timeout import TimeoutError, timeout
-
from ansible.module_utils.facts.utils import get_file_content, get_mount_size
@@ -173,13 +172,50 @@ class FreeBSDHardware(Hardware):
sysdir = '/dev'
device_facts['devices'] = {}
- drives = re.compile(r'(ada?\d+|da\d+|a?cd\d+)') # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
- slices = re.compile(r'(ada?\d+s\d+\w*|da\d+s\d+\w*)')
+ # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
+ drives = re.compile(
+ r"""(?x)(
+ (?:
+ ada? # ATA/SATA disk device
+ |da # SCSI disk device
+ |a?cd # SCSI CDROM drive
+ |amrd # AMI MegaRAID drive
+ |idad # Compaq RAID array
+ |ipsd # IBM ServeRAID RAID array
+ |md # md(4) disk device
+ |mfid # LSI MegaRAID SAS array
+ |mlxd # Mylex RAID disk
+ |twed # 3ware ATA RAID array
+ |vtbd # VirtIO Block Device
+ )\d+
+ )
+ """
+ )
+
+ slices = re.compile(
+ r"""(?x)(
+ (?:
+ ada? # ATA/SATA disk device
+ |a?cd # SCSI CDROM drive
+ |amrd # AMI MegaRAID drive
+ |da # SCSI disk device
+ |idad # Compaq RAID array
+ |ipsd # IBM ServeRAID RAID array
+ |md # md(4) disk device
+ |mfid # LSI MegaRAID SAS array
+ |mlxd # Mylex RAID disk
+ |twed # 3ware ATA RAID array
+ |vtbd # VirtIO Block Device
+ )\d+[ps]\d+\w*
+ )
+ """
+ )
+
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
- if d:
+ if d and d.group(1) not in device_facts['devices']:
device_facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
@@ -216,18 +252,22 @@ class FreeBSDHardware(Hardware):
'product_version': 'system-version',
'system_vendor': 'system-manufacturer',
}
+ if dmi_bin is None:
+ dmi_facts = dict.fromkeys(
+ DMI_DICT.keys(),
+ 'NA'
+ )
+ return dmi_facts
+
for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- # FIXME: why add the fact and then test if it is json?
- dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
- try:
- json.dumps(dmi_facts[k])
- except UnicodeDecodeError:
- dmi_facts[k] = 'NA'
- else:
+ (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ # FIXME: why add the fact and then test if it is json?
+ dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
+ try:
+ json.dumps(dmi_facts[k])
+ except UnicodeDecodeError:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
diff --git a/lib/ansible/module_utils/facts/hardware/hpux.py b/lib/ansible/module_utils/facts/hardware/hpux.py
index abb9dada663..efb63a98c2e 100644
--- a/lib/ansible/module_utils/facts/hardware/hpux.py
+++ b/lib/ansible/module_utils/facts/hardware/hpux.py
@@ -40,6 +40,9 @@ class HPUXHardware(Hardware):
def populate(self, collected_facts=None):
hardware_facts = {}
+ # TODO: very inefficient calls to machinfo,
+ # should just make one and then deal with finding the data (see facts/sysctl)
+ # but not going to change unless there is hp/ux for testing
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
hw_facts = self.get_hw_facts()
diff --git a/lib/ansible/module_utils/facts/hardware/linux.py b/lib/ansible/module_utils/facts/hardware/linux.py
index 605dbe6add7..293c75a2509 100644
--- a/lib/ansible/module_utils/facts/hardware/linux.py
+++ b/lib/ansible/module_utils/facts/hardware/linux.py
@@ -24,12 +24,9 @@ import re
import sys
import time
-from multiprocessing import cpu_count
-from multiprocessing.pool import ThreadPool
-
-from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils._internal._concurrent import _futures
from ansible.module_utils.common.locale import get_best_parsable_locale
-from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.common.text.formatters import bytes_to_human
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
@@ -91,6 +88,7 @@ class LinuxHardware(Hardware):
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
+ sysinfo_facts = self.get_sysinfo_facts()
device_facts = self.get_device_facts()
uptime_facts = self.get_uptime_facts()
lvm_facts = self.get_lvm_facts()
@@ -104,6 +102,7 @@ class LinuxHardware(Hardware):
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
+ hardware_facts.update(sysinfo_facts)
hardware_facts.update(device_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(lvm_facts)
@@ -208,6 +207,9 @@ class LinuxHardware(Hardware):
if 'vme' not in val:
xen_paravirt = True
+ if key == "flags":
+ cpu_facts['flags'] = val.split()
+
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']:
@@ -300,12 +302,9 @@ class LinuxHardware(Hardware):
)
except AttributeError:
# In Python < 3.3, os.sched_getaffinity() is not available
- try:
- cmd = get_bin_path('nproc')
- except ValueError:
- pass
- else:
- rc, out, _err = self.module.run_command(cmd)
+ nproc_cmd = self.module.get_bin_path('nproc')
+ if nproc_cmd is not None:
+ rc, out, _err = self.module.run_command(nproc_cmd)
if rc == 0:
cpu_facts['processor_nproc'] = int(out)
@@ -370,7 +369,6 @@ class LinuxHardware(Hardware):
else:
# Fall back to using dmidecode, if available
- dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_vendor': 'bios-vendor',
@@ -391,25 +389,54 @@ class LinuxHardware(Hardware):
'product_version': 'system-version',
'system_vendor': 'system-manufacturer',
}
+ dmi_bin = self.module.get_bin_path('dmidecode')
+ if dmi_bin is None:
+ dmi_facts = dict.fromkeys(
+ DMI_DICT.keys(),
+ 'NA'
+ )
+ return dmi_facts
+
for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
- try:
- json.dumps(thisvalue)
- except UnicodeDecodeError:
- thisvalue = "NA"
+ (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
+ try:
+ json.dumps(thisvalue)
+ except UnicodeDecodeError:
+ thisvalue = "NA"
- dmi_facts[k] = thisvalue
- else:
- dmi_facts[k] = 'NA'
+ dmi_facts[k] = thisvalue
else:
dmi_facts[k] = 'NA'
return dmi_facts
+ def get_sysinfo_facts(self):
+ """Fetch /proc/sysinfo facts from s390 Linux on IBM Z"""
+ if not os.path.exists('/proc/sysinfo'):
+ return {}
+
+ sysinfo_facts = dict.fromkeys(
+ ('system_vendor', 'product_version', 'product_serial', 'product_name', 'product_uuid'),
+ 'NA'
+ )
+ sysinfo_re = re.compile(
+ r'''
+ ^
+ (?:Manufacturer:\s+(?P.+))|
+ (?:Type:\s+(?P.+))|
+ (?:Sequence\ Code:\s+0+(?P.+))
+ $
+ ''',
+ re.VERBOSE | re.MULTILINE
+ )
+ data = get_file_content('/proc/sysinfo')
+ for match in sysinfo_re.finditer(data):
+ sysinfo_facts.update({k: v for k, v in match.groupdict().items() if v is not None})
+ return sysinfo_facts
+
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
@@ -548,7 +575,7 @@ class LinuxHardware(Hardware):
# start threads to query each mount
results = {}
- pool = ThreadPool(processes=min(len(mtab_entries), cpu_count()))
+ executor = _futures.DaemonThreadPoolExecutor()
maxtime = timeout.GATHER_TIMEOUT or timeout.DEFAULT_GATHER_TIMEOUT
for fields in mtab_entries:
# Transform octal escape sequences
@@ -572,30 +599,29 @@ class LinuxHardware(Hardware):
if not self.MTAB_BIND_MOUNT_RE.match(options):
mount_info['options'] += ",bind"
- results[mount] = {'info': mount_info,
- 'extra': pool.apply_async(self.get_mount_info, (mount, device, uuids)),
- 'timelimit': time.time() + maxtime}
+ results[mount] = {'info': mount_info, 'timelimit': time.monotonic() + maxtime}
+ results[mount]['extra'] = executor.submit(self.get_mount_info, mount, device, uuids)
- pool.close() # done with new workers, start gc
+ # done with spawning new workers, start gc
+ executor.shutdown()
- # wait for workers and get results
- while results:
+ while results: # wait for workers and get results
for mount in list(results):
done = False
res = results[mount]['extra']
try:
- if res.ready():
+ if res.done():
done = True
- if res.successful():
- mount_size, uuid = res.get()
+ if res.exception() is None:
+ mount_size, uuid = res.result()
if mount_size:
results[mount]['info'].update(mount_size)
results[mount]['info']['uuid'] = uuid or 'N/A'
else:
# failed, try to find out why, if 'res.successful' we know there are no exceptions
- results[mount]['info']['note'] = 'Could not get extra information: %s.' % (to_text(res.get()))
+ results[mount]['info']['note'] = f'Could not get extra information: {res.exception()}'
- elif time.time() > results[mount]['timelimit']:
+ elif time.monotonic() > results[mount]['timelimit']:
done = True
self.module.warn("Timeout exceeded when getting mount info for %s" % mount)
results[mount]['info']['note'] = 'Could not get extra information due to timeout'
@@ -744,10 +770,24 @@ class LinuxHardware(Hardware):
if serial:
d['serial'] = serial
- for key, test in [('removable', '/removable'),
- ('support_discard', '/queue/discard_granularity'),
- ]:
- d[key] = get_file_content(sysdir + test)
+ d['removable'] = get_file_content(sysdir + '/removable')
+
+ # Historically, `support_discard` simply returned the value of
+ # `/sys/block/{device}/queue/discard_granularity`. When its value
+ # is `0`, then the block device doesn't support discards;
+ # _however_, it being greater than zero doesn't necessarily mean
+ # that the block device _does_ support discards.
+ #
+ # Another indication that a block device doesn't support discards
+ # is `/sys/block/{device}/queue/discard_max_hw_bytes` being equal
+ # to `0` (with the same caveat as above). So if either of those are
+ # `0`, set `support_discard` to zero, otherwise set it to the value
+ # of `discard_granularity` for backwards compatibility.
+ d['support_discard'] = (
+ '0'
+ if get_file_content(sysdir + '/queue/discard_max_hw_bytes') == '0'
+ else get_file_content(sysdir + '/queue/discard_granularity')
+ )
if diskname in devs_wwn:
d['wwn'] = devs_wwn[diskname]
@@ -765,12 +805,12 @@ class LinuxHardware(Hardware):
part['links'][link_type] = link_values.get(partname, [])
part['start'] = get_file_content(part_sysdir + "/start", 0)
- part['sectors'] = get_file_content(part_sysdir + "/size", 0)
-
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512)
- part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
+ # sysfs sectorcount assumes 512 blocksize. Convert using the correct sectorsize
+ part['sectors'] = int(get_file_content(part_sysdir + "/size", 0)) * 512 // int(part['sectorsize'])
+ part['size'] = bytes_to_human(float(part['sectors']) * float(part['sectorsize']))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
@@ -784,13 +824,14 @@ class LinuxHardware(Hardware):
if m:
d['scheduler_mode'] = m.group(2)
- d['sectors'] = get_file_content(sysdir + "/size")
- if not d['sectors']:
- d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512)
- d['size'] = bytes_to_human(float(d['sectors']) * 512.0)
+ # sysfs sectorcount assumes 512 blocksize. Convert using the correct sectorsize
+ d['sectors'] = int(get_file_content(sysdir + "/size")) * 512 // int(d['sectorsize'])
+ if not d['sectors']:
+ d['sectors'] = 0
+ d['size'] = bytes_to_human(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
@@ -833,21 +874,24 @@ class LinuxHardware(Hardware):
""" Get LVM Facts if running as root and lvm utils are available """
lvm_facts = {'lvm': 'N/A'}
+ vgs_cmd = self.module.get_bin_path('vgs')
+ if vgs_cmd is None:
+ return lvm_facts
- if os.getuid() == 0 and self.module.get_bin_path('vgs'):
+ if os.getuid() == 0:
lvm_util_options = '--noheadings --nosuffix --units g --separator ,'
- vgs_path = self.module.get_bin_path('vgs')
# vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs = {}
- if vgs_path:
- rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))
- for vg_line in vg_lines.splitlines():
- items = vg_line.strip().split(',')
- vgs[items[0]] = {'size_g': items[-2],
- 'free_g': items[-1],
- 'num_lvs': items[2],
- 'num_pvs': items[1]}
+ rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_cmd, lvm_util_options))
+ for vg_line in vg_lines.splitlines():
+ items = vg_line.strip().split(',')
+ vgs[items[0]] = {
+ 'size_g': items[-2],
+ 'free_g': items[-1],
+ 'num_lvs': items[2],
+ 'num_pvs': items[1]
+ }
lvs_path = self.module.get_bin_path('lvs')
# lvs fields:
diff --git a/lib/ansible/module_utils/facts/hardware/netbsd.py b/lib/ansible/module_utils/facts/hardware/netbsd.py
index 7d024198392..69ac583df64 100644
--- a/lib/ansible/module_utils/facts/hardware/netbsd.py
+++ b/lib/ansible/module_utils/facts/hardware/netbsd.py
@@ -162,6 +162,9 @@ class NetBSDHardware(Hardware):
def get_uptime_facts(self):
# On NetBSD, we need to call sysctl with -n to get this value as an int.
sysctl_cmd = self.module.get_bin_path('sysctl')
+ if sysctl_cmd is None:
+ return {}
+
cmd = [sysctl_cmd, '-n', 'kern.boottime']
rc, out, err = self.module.run_command(cmd)
diff --git a/lib/ansible/module_utils/facts/hardware/openbsd.py b/lib/ansible/module_utils/facts/hardware/openbsd.py
index 751ee6165dd..b5f08c0092b 100644
--- a/lib/ansible/module_utils/facts/hardware/openbsd.py
+++ b/lib/ansible/module_utils/facts/hardware/openbsd.py
@@ -54,7 +54,7 @@ class OpenBSDHardware(Hardware):
hardware_facts.update(self.get_dmi_facts())
hardware_facts.update(self.get_uptime_facts())
- # storage devices notorioslly prone to hang/block so they are under a timeout
+ # storage devices notoriously prone to hang/block so they are under a timeout
try:
hardware_facts.update(self.get_mount_facts())
except timeout.TimeoutError:
@@ -113,6 +113,9 @@ class OpenBSDHardware(Hardware):
def get_uptime_facts(self):
# On openbsd, we need to call it with -n to get this value as an int.
sysctl_cmd = self.module.get_bin_path('sysctl')
+ if sysctl_cmd is None:
+ return {}
+
cmd = [sysctl_cmd, '-n', 'kern.boottime']
rc, out, err = self.module.run_command(cmd)
diff --git a/lib/ansible/module_utils/facts/hardware/sunos.py b/lib/ansible/module_utils/facts/hardware/sunos.py
index 62eeafc3c98..134e59a8c2c 100644
--- a/lib/ansible/module_utils/facts/hardware/sunos.py
+++ b/lib/ansible/module_utils/facts/hardware/sunos.py
@@ -172,7 +172,13 @@ class SunOSHardware(Hardware):
rc, platform, err = self.module.run_command('/usr/bin/uname -i')
platform_sbin = '/usr/platform/' + platform.rstrip() + '/sbin'
- prtdiag_path = self.module.get_bin_path("prtdiag", opt_dirs=[platform_sbin])
+ prtdiag_path = self.module.get_bin_path(
+ "prtdiag",
+ opt_dirs=[platform_sbin]
+ )
+ if prtdiag_path is None:
+ return dmi_facts
+
rc, out, err = self.module.run_command(prtdiag_path)
# rc returns 1
if out:
diff --git a/lib/ansible/module_utils/facts/network/aix.py b/lib/ansible/module_utils/facts/network/aix.py
index 29a679d84b1..17516d927d8 100644
--- a/lib/ansible/module_utils/facts/network/aix.py
+++ b/lib/ansible/module_utils/facts/network/aix.py
@@ -32,20 +32,21 @@ class AIXNetwork(GenericBsdIfconfigNetwork):
interface = dict(v4={}, v6={})
netstat_path = self.module.get_bin_path('netstat')
-
- if netstat_path:
- rc, out, err = self.module.run_command([netstat_path, '-nr'])
-
- lines = out.splitlines()
- for line in lines:
- words = line.split()
- if len(words) > 1 and words[0] == 'default':
- if '.' in words[1]:
- interface['v4']['gateway'] = words[1]
- interface['v4']['interface'] = words[5]
- elif ':' in words[1]:
- interface['v6']['gateway'] = words[1]
- interface['v6']['interface'] = words[5]
+ if netstat_path is None:
+ return interface['v4'], interface['v6']
+
+ rc, out, err = self.module.run_command([netstat_path, '-nr'])
+
+ lines = out.splitlines()
+ for line in lines:
+ words = line.split()
+ if len(words) > 1 and words[0] == 'default':
+ if '.' in words[1]:
+ interface['v4']['gateway'] = words[1]
+ interface['v4']['interface'] = words[5]
+ elif ':' in words[1]:
+ interface['v6']['gateway'] = words[1]
+ interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
@@ -58,9 +59,7 @@ class AIXNetwork(GenericBsdIfconfigNetwork):
all_ipv6_addresses=[],
)
- uname_rc = None
- uname_out = None
- uname_err = None
+ uname_rc = uname_out = uname_err = None
uname_path = self.module.get_bin_path('uname')
if uname_path:
uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
diff --git a/lib/ansible/module_utils/facts/network/fc_wwn.py b/lib/ansible/module_utils/facts/network/fc_wwn.py
index f53cc53927d..fb846cc08a8 100644
--- a/lib/ansible/module_utils/facts/network/fc_wwn.py
+++ b/lib/ansible/module_utils/facts/network/fc_wwn.py
@@ -82,7 +82,10 @@ class FcWwnInitiatorFactCollector(BaseFactCollector):
fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
elif sys.platform.startswith('hp-ux'):
cmd = module.get_bin_path('ioscan')
- fcmsu_cmd = module.get_bin_path('fcmsutil', opt_dirs=['/opt/fcms/bin'])
+ fcmsu_cmd = module.get_bin_path(
+ 'fcmsutil',
+ opt_dirs=['/opt/fcms/bin'],
+ )
# go ahead if we have both commands available
if cmd and fcmsu_cmd:
# ioscan / get list of available fibre-channel devices (fcd)
diff --git a/lib/ansible/module_utils/facts/network/hpux.py b/lib/ansible/module_utils/facts/network/hpux.py
index 61e1bdc644f..2f01825bb24 100644
--- a/lib/ansible/module_utils/facts/network/hpux.py
+++ b/lib/ansible/module_utils/facts/network/hpux.py
@@ -20,7 +20,7 @@ from ansible.module_utils.facts.network.base import Network, NetworkCollector
class HPUXNetwork(Network):
"""
- HP-UX-specifig subclass of Network. Defines networking facts:
+ HP-UX-specific subclass of Network. Defines networking facts:
- default_interface
- interfaces (a list of interface names)
- interface_ dictionary of ipv4 address information.
@@ -29,7 +29,10 @@ class HPUXNetwork(Network):
def populate(self, collected_facts=None):
network_facts = {}
- netstat_path = self.module.get_bin_path('netstat')
+ netstat_path = self.module.get_bin_path(
+ 'netstat',
+ opt_dirs=['/usr/bin']
+ )
if netstat_path is None:
return network_facts
@@ -46,7 +49,14 @@ class HPUXNetwork(Network):
def get_default_interfaces(self):
default_interfaces = {}
- rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
+ netstat_path = self.module.get_bin_path(
+ 'netstat',
+ opt_dirs=['/usr/bin']
+ )
+
+ if netstat_path is None:
+ return default_interfaces
+ rc, out, err = self.module.run_command("%s -nr" % netstat_path)
lines = out.splitlines()
for line in lines:
words = line.split()
@@ -59,7 +69,14 @@ class HPUXNetwork(Network):
def get_interfaces_info(self):
interfaces = {}
- rc, out, err = self.module.run_command("/usr/bin/netstat -niw")
+ netstat_path = self.module.get_bin_path(
+ 'netstat',
+ opt_dirs=['/usr/bin']
+ )
+
+ if netstat_path is None:
+ return interfaces
+ rc, out, err = self.module.run_command("%s -niw" % netstat_path)
lines = out.splitlines()
for line in lines:
words = line.split()
diff --git a/lib/ansible/module_utils/facts/network/iscsi.py b/lib/ansible/module_utils/facts/network/iscsi.py
index 8f7a61590fb..48f98a682bd 100644
--- a/lib/ansible/module_utils/facts/network/iscsi.py
+++ b/lib/ansible/module_utils/facts/network/iscsi.py
@@ -21,7 +21,6 @@ import sys
import ansible.module_utils.compat.typing as t
-from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.network.base import NetworkCollector
@@ -80,9 +79,8 @@ class IscsiInitiatorNetworkCollector(NetworkCollector):
iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1]
break
elif sys.platform.startswith('aix'):
- try:
- cmd = get_bin_path('lsattr')
- except ValueError:
+ cmd = module.get_bin_path('lsattr')
+ if cmd is None:
return iscsi_facts
cmd += " -E -l iscsi0"
@@ -92,10 +90,11 @@ class IscsiInitiatorNetworkCollector(NetworkCollector):
iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip()
elif sys.platform.startswith('hp-ux'):
- # try to find it in the default PATH and opt_dirs
- try:
- cmd = get_bin_path('iscsiutil', opt_dirs=['/opt/iscsi/bin'])
- except ValueError:
+ cmd = module.get_bin_path(
+ 'iscsiutil',
+ opt_dirs=['/opt/iscsi/bin']
+ )
+ if cmd is None:
return iscsi_facts
cmd += " -l"
diff --git a/lib/ansible/module_utils/facts/network/linux.py b/lib/ansible/module_utils/facts/network/linux.py
index 560cd255f37..d199d5a6ae3 100644
--- a/lib/ansible/module_utils/facts/network/linux.py
+++ b/lib/ansible/module_utils/facts/network/linux.py
@@ -295,8 +295,6 @@ class LinuxNetwork(Network):
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
- ip_path = self.module.get_bin_path("ip")
-
args = [ip_path, 'addr', 'show', 'primary', 'dev', device]
rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
diff --git a/lib/ansible/module_utils/facts/other/facter.py b/lib/ansible/module_utils/facts/other/facter.py
index ec1771ecfac..41b3cea7c92 100644
--- a/lib/ansible/module_utils/facts/other/facter.py
+++ b/lib/ansible/module_utils/facts/other/facter.py
@@ -22,8 +22,14 @@ class FacterFactCollector(BaseFactCollector):
namespace=namespace)
def find_facter(self, module):
- facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
- cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
+ facter_path = module.get_bin_path(
+ 'facter',
+ opt_dirs=['/opt/puppetlabs/bin']
+ )
+ cfacter_path = module.get_bin_path(
+ 'cfacter',
+ opt_dirs=['/opt/puppetlabs/bin']
+ )
# Prefer to use cfacter if available
if cfacter_path is not None:
@@ -73,7 +79,6 @@ class FacterFactCollector(BaseFactCollector):
try:
facter_dict = json.loads(facter_output)
except Exception:
- # FIXME: maybe raise a FactCollectorError with some info attrs?
- pass
+ module.warn("Failed to parse facter facts")
return facter_dict
diff --git a/lib/ansible/module_utils/facts/other/ohai.py b/lib/ansible/module_utils/facts/other/ohai.py
index 75968ef29f1..8f0e4dcaecb 100644
--- a/lib/ansible/module_utils/facts/other/ohai.py
+++ b/lib/ansible/module_utils/facts/other/ohai.py
@@ -36,10 +36,11 @@ class OhaiFactCollector(BaseFactCollector):
namespace=namespace)
def find_ohai(self, module):
- ohai_path = module.get_bin_path('ohai')
- return ohai_path
+ return module.get_bin_path(
+ 'ohai'
+ )
- def run_ohai(self, module, ohai_path,):
+ def run_ohai(self, module, ohai_path):
rc, out, err = module.run_command(ohai_path)
return rc, out, err
@@ -67,7 +68,6 @@ class OhaiFactCollector(BaseFactCollector):
try:
ohai_facts = json.loads(ohai_output)
except Exception:
- # FIXME: useful error, logging, something...
- pass
+ module.warn("Failed to gather ohai facts")
return ohai_facts
diff --git a/lib/ansible/module_utils/facts/packages.py b/lib/ansible/module_utils/facts/packages.py
index 21be56fab26..b5b9bcb35ef 100644
--- a/lib/ansible/module_utils/facts/packages.py
+++ b/lib/ansible/module_utils/facts/packages.py
@@ -3,24 +3,29 @@
from __future__ import annotations
+import ansible.module_utils.compat.typing as t
+
from abc import ABCMeta, abstractmethod
from ansible.module_utils.six import with_metaclass
+from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.common._utils import get_all_subclasses
def get_all_pkg_managers():
- return {obj.__name__.lower(): obj for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr)}
+ return {obj.__name__.lower(): obj for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr, RespawningLibMgr)}
class PkgMgr(with_metaclass(ABCMeta, object)): # type: ignore[misc]
@abstractmethod
- def is_available(self):
+ def is_available(self, handle_exceptions):
# This method is supposed to return True/False if the package manager is currently installed/usable
# It can also 'prep' the required systems in the process of detecting availability
+ # If handle_exceptions is false it should raise exceptions related to manager discovery instead of handling them.
pass
@abstractmethod
@@ -58,16 +63,50 @@ class LibMgr(PkgMgr):
self._lib = None
super(LibMgr, self).__init__()
- def is_available(self):
+ def is_available(self, handle_exceptions=True):
found = False
try:
self._lib = __import__(self.LIB)
found = True
except ImportError:
- pass
+ if not handle_exceptions:
+ raise Exception(missing_required_lib(self.LIB))
return found
+class RespawningLibMgr(LibMgr):
+
+ CLI_BINARIES = [] # type: t.List[str]
+ INTERPRETERS = ['/usr/bin/python3']
+
+ def is_available(self, handle_exceptions=True):
+ if super(RespawningLibMgr, self).is_available():
+ return True
+
+ for binary in self.CLI_BINARIES:
+ try:
+ bin_path = get_bin_path(binary)
+ except ValueError:
+ # Not an interesting exception to raise, just a speculative probe
+ continue
+ else:
+ # It looks like this package manager is installed
+ if not has_respawned():
+ # See if respawning will help
+ interpreter_path = probe_interpreters_for_module(self.INTERPRETERS, self.LIB)
+ if interpreter_path:
+ respawn_module(interpreter_path)
+ # The module will exit when the respawned copy completes
+
+ if not handle_exceptions:
+ raise Exception(f'Found executable at {bin_path}. {missing_required_lib(self.LIB)}')
+
+ if not handle_exceptions:
+ raise Exception(missing_required_lib(self.LIB))
+
+ return False
+
+
class CLIMgr(PkgMgr):
CLI = None # type: str | None
@@ -77,9 +116,12 @@ class CLIMgr(PkgMgr):
self._cli = None
super(CLIMgr, self).__init__()
- def is_available(self):
+ def is_available(self, handle_exceptions=True):
+ found = False
try:
self._cli = get_bin_path(self.CLI)
+ found = True
except ValueError:
- return False
- return True
+ if not handle_exceptions:
+ raise
+ return found
diff --git a/lib/ansible/module_utils/facts/sysctl.py b/lib/ansible/module_utils/facts/sysctl.py
index 1f94091200b..639e77c41f0 100644
--- a/lib/ansible/module_utils/facts/sysctl.py
+++ b/lib/ansible/module_utils/facts/sysctl.py
@@ -21,41 +21,43 @@ from ansible.module_utils.common.text.converters import to_text
def get_sysctl(module, prefixes):
- sysctl_cmd = module.get_bin_path('sysctl')
- cmd = [sysctl_cmd]
- cmd.extend(prefixes)
sysctl = dict()
-
- try:
- rc, out, err = module.run_command(cmd)
- except (IOError, OSError) as e:
- module.warn('Unable to read sysctl: %s' % to_text(e))
- rc = 1
-
- if rc == 0:
- key = ''
- value = ''
- for line in out.splitlines():
- if not line.strip():
- continue
-
- if line.startswith(' '):
- # handle multiline values, they will not have a starting key
- # Add the newline back in so people can split on it to parse
- # lines if they need to.
- value += '\n' + line
- continue
+ sysctl_cmd = module.get_bin_path('sysctl')
+ if sysctl_cmd is not None:
+
+ cmd = [sysctl_cmd]
+ cmd.extend(prefixes)
+
+ try:
+ rc, out, err = module.run_command(cmd)
+ except (IOError, OSError) as e:
+ module.warn('Unable to read sysctl: %s' % to_text(e))
+ rc = 1
+
+ if rc == 0:
+ key = ''
+ value = ''
+ for line in out.splitlines():
+ if not line.strip():
+ continue
+
+ if line.startswith(' '):
+ # handle multiline values, they will not have a starting key
+ # Add the newline back in so people can split on it to parse
+ # lines if they need to.
+ value += '\n' + line
+ continue
+
+ if key:
+ sysctl[key] = value.strip()
+
+ try:
+ (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
+ except Exception as e:
+ module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e)))
if key:
sysctl[key] = value.strip()
- try:
- (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
- except Exception as e:
- module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e)))
-
- if key:
- sysctl[key] = value.strip()
-
return sysctl
diff --git a/lib/ansible/module_utils/facts/system/distribution.py b/lib/ansible/module_utils/facts/system/distribution.py
index ee20fcb94f0..76f49b6ce84 100644
--- a/lib/ansible/module_utils/facts/system/distribution.py
+++ b/lib/ansible/module_utils/facts/system/distribution.py
@@ -517,7 +517,7 @@ class Distribution(object):
'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin', 'OSMC'],
'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
- 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite'],
+ 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite', 'SL-Micro'],
'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
'Mandrake': ['Mandrake', 'Mandriva'],
'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'],
diff --git a/lib/ansible/module_utils/facts/system/local.py b/lib/ansible/module_utils/facts/system/local.py
index 3d656f5a345..66ec58a2e7d 100644
--- a/lib/ansible/module_utils/facts/system/local.py
+++ b/lib/ansible/module_utils/facts/system/local.py
@@ -1,17 +1,5 @@
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
@@ -25,7 +13,6 @@ import ansible.module_utils.compat.typing as t
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
-from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves import configparser, StringIO
@@ -91,12 +78,9 @@ class LocalFactCollector(BaseFactCollector):
# if that fails read it with ConfigParser
cp = configparser.ConfigParser()
try:
- if PY3:
- cp.read_file(StringIO(out))
- else:
- cp.readfp(StringIO(out))
+ cp.read_file(StringIO(out))
except configparser.Error:
- fact = "error loading facts as JSON or ini - please check content: %s" % fn
+ fact = f"error loading facts as JSON or ini - please check content: {fn}"
module.warn(fact)
else:
fact = {}
@@ -104,8 +88,14 @@ class LocalFactCollector(BaseFactCollector):
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
- val = cp.get(sect, opt)
- fact[sect][opt] = val
+ try:
+ val = cp.get(sect, opt)
+ except configparser.Error as ex:
+ fact = f"error loading facts as ini - please check content: {fn} ({ex})"
+ module.warn(fact)
+ continue
+ else:
+ fact[sect][opt] = val
except Exception as e:
fact = "Failed to convert (%s) to JSON: %s" % (fn, to_text(e))
module.warn(fact)
diff --git a/lib/ansible/module_utils/facts/system/service_mgr.py b/lib/ansible/module_utils/facts/system/service_mgr.py
index 4dfa7e99d44..20257967c1e 100644
--- a/lib/ansible/module_utils/facts/system/service_mgr.py
+++ b/lib/ansible/module_utils/facts/system/service_mgr.py
@@ -106,7 +106,7 @@ class ServiceMgrFactCollector(BaseFactCollector):
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
- # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
+ # many systems return init, so this cannot be trusted, if it ends in 'sh' it probably is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
@@ -144,6 +144,8 @@ class ServiceMgrFactCollector(BaseFactCollector):
service_mgr_name = 'systemd'
elif os.path.exists('/etc/init.d/'):
service_mgr_name = 'sysvinit'
+ elif os.path.exists('/etc/dinit.d/'):
+ service_mgr_name = 'dinit'
if not service_mgr_name:
# if we cannot detect, fallback to generic 'service'
diff --git a/lib/ansible/module_utils/facts/system/systemd.py b/lib/ansible/module_utils/facts/system/systemd.py
new file mode 100644
index 00000000000..3ba2bbfcbdf
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/systemd.py
@@ -0,0 +1,47 @@
+# Get systemd version and features
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+from __future__ import annotations
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+
+
+class SystemdFactCollector(BaseFactCollector):
+ name = "systemd"
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ systemctl_bin = module.get_bin_path("systemctl")
+ systemd_facts = {}
+ if systemctl_bin and ServiceMgrFactCollector.is_systemd_managed(module):
+ rc, stdout, dummy = module.run_command(
+ [systemctl_bin, "--version"],
+ check_rc=False,
+ )
+
+ if rc != 0:
+ return systemd_facts
+
+ systemd_facts["systemd"] = {
+ "features": str(stdout.split("\n")[1]),
+ "version": int(stdout.split(" ")[1]),
+ }
+
+ return systemd_facts
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
index f40c3384cbc..3a1a317ec66 100644
--- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
@@ -37,7 +37,7 @@ Function Add-CSharpType {
.PARAMETER CompileSymbols
[String[]] A list of symbols to be defined during compile time. These are
added to the existing symbols, 'CORECLR', 'WINDOWS', 'UNIX' that are set
- conditionalls in this cmdlet.
+ conditionals in this cmdlet.
.NOTES
The following features were added to control the compiling options from the
@@ -75,7 +75,7 @@ Function Add-CSharpType {
[Switch]$IgnoreWarnings,
[Switch]$PassThru,
[Parameter(Mandatory = $true, ParameterSetName = "Module")][Object]$AnsibleModule,
- [Parameter(ParameterSetName = "Manual")][String]$TempPath = $env:TMP,
+ [Parameter(ParameterSetName = "Manual")][String]$TempPath,
[Parameter(ParameterSetName = "Manual")][Switch]$IncludeDebugInfo,
[String[]]$CompileSymbols = @()
)
@@ -280,9 +280,11 @@ Function Add-CSharpType {
$include_debug = $AnsibleModule.Verbosity -ge 3
}
else {
- $temp_path = $TempPath
+ $temp_path = [System.IO.Path]::GetTempPath()
$include_debug = $IncludeDebugInfo.IsPresent
}
+ $temp_path = Join-Path -Path $temp_path -ChildPath ([Guid]::NewGuid().Guid)
+
$compiler_options = [System.Collections.ArrayList]@("/optimize")
if ($defined_symbols.Count -gt 0) {
$compiler_options.Add("/define:" + ([String]::Join(";", $defined_symbols.ToArray()))) > $null
@@ -304,8 +306,12 @@ Function Add-CSharpType {
)
# create a code snippet for each reference and check if we need
- # to reference any extra assemblies
- $ignore_warnings = [System.Collections.ArrayList]@()
+ # to reference any extra assemblies.
+ # CS1610 is a warning when csc.exe failed to delete temporary files.
+ # We use our own temp dir deletion mechanism so this doesn't become a
+ # fatal error.
+ # https://github.com/ansible-collections/ansible.windows/issues/598
+ $ignore_warnings = [System.Collections.ArrayList]@('1610')
$compile_units = [System.Collections.Generic.List`1[System.CodeDom.CodeSnippetCompileUnit]]@()
foreach ($reference in $References) {
# scan through code and add any assemblies that match
@@ -373,7 +379,26 @@ Function Add-CSharpType {
}
}
- $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units)
+ $null = New-Item -Path $temp_path -ItemType Directory -Force
+ try {
+ $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units)
+ }
+ finally {
+ # Try to delete the temp path, if this fails and we are running
+ # with a module object write a warning instead of failing.
+ try {
+ [System.IO.Directory]::Delete($temp_path, $true)
+ }
+ catch {
+ $msg = "Failed to cleanup temporary directory '$temp_path' used for compiling C# code."
+ if ($AnsibleModule) {
+ $AnsibleModule.Warn("$msg Files may still be present after the task is complete. Error: $_")
+ }
+ else {
+ throw "$msg Error: $_"
+ }
+ }
+ }
}
finally {
foreach ($kvp in $originalEnv.GetEnumerator()) {
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
index 9b86f84188a..fb9fb11c490 100644
--- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
@@ -4,7 +4,7 @@
# used by Convert-DictToSnakeCase to convert a string in camelCase
# format to snake_case
Function Convert-StringToSnakeCase($string) {
- # cope with pluralized abbreaviations such as TargetGroupARNs
+ # cope with pluralized abbreviations such as TargetGroupARNs
if ($string -cmatch "[A-Z]{3,}s") {
$replacement_string = $string -creplace $matches[0], "_$($matches[0].ToLower())"
diff --git a/lib/ansible/module_utils/splitter.py b/lib/ansible/module_utils/splitter.py
index 7bddd32dae1..e2a3da50543 100644
--- a/lib/ansible/module_utils/splitter.py
+++ b/lib/ansible/module_utils/splitter.py
@@ -81,7 +81,7 @@ def split_args(args):
'''
# the list of params parsed out of the arg string
- # this is going to be the result value when we are donei
+ # this is going to be the result value when we are done
params = []
# here we encode the args, so we have a uniform charset to
diff --git a/lib/ansible/modules/add_host.py b/lib/ansible/modules/add_host.py
index de3c8619135..81930eab7d7 100644
--- a/lib/ansible/modules/add_host.py
+++ b/lib/ansible/modules/add_host.py
@@ -61,7 +61,7 @@ notes:
- The alias O(host) of the parameter O(name) is only available on Ansible 2.4 and newer.
- Since Ansible 2.4, the C(inventory_dir) variable is now set to V(None) instead of the 'global inventory source',
because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour.
-- Though this module does not change the remote host, we do provide 'changed' status as it can be useful for those trying to track inventory changes.
+- Though this module does not change the remote host, we do provide C(changed) status as it can be useful for those trying to track inventory changes.
- The hosts added will not bypass the C(--limit) from the command line, so both of those need to be in agreement to make them available as play targets.
They are still available from hostvars and for delegation as a normal part of the inventory.
seealso:
diff --git a/lib/ansible/modules/apt.py b/lib/ansible/modules/apt.py
index cc5edd0f6d9..70a2a07cc07 100644
--- a/lib/ansible/modules/apt.py
+++ b/lib/ansible/modules/apt.py
@@ -63,21 +63,20 @@ options:
default: 'no'
default_release:
description:
- - Corresponds to the C(-t) option for I(apt) and sets pin priorities
+ - Corresponds to the C(-t) option for I(apt) and sets pin priorities.
aliases: [ default-release ]
type: str
install_recommends:
description:
- - Corresponds to the C(--no-install-recommends) option for I(apt). V(true) installs recommended packages. V(false) does not install
+ - Corresponds to the C(--no-install-recommends) option for C(apt). V(true) installs recommended packages. V(false) does not install
recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
aliases: [ install-recommends ]
type: bool
force:
description:
- - 'Corresponds to the C(--force-yes) to I(apt-get) and implies O(allow_unauthenticated=yes) and O(allow_downgrade=yes)'
- - "This option will disable checking both the packages' signatures and the certificates of the
- web servers they are downloaded from."
- - 'This option *is not* the equivalent of passing the C(-f) flag to I(apt-get) on the command line'
+ - 'Corresponds to the C(--force-yes) to C(apt-get) and implies O(allow_unauthenticated=yes) and O(allow_downgrade=yes).'
+ - "This option will disable checking both the packages' signatures and the certificates of the web servers they are downloaded from."
+ - 'This option *is not* the equivalent of passing the C(-f) flag to C(apt-get) on the command line.'
- '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.**
Please also see C(man apt-get) for more information.'
type: bool
@@ -85,7 +84,7 @@ options:
clean:
description:
- Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
- the lock file from /var/cache/apt/archives/ and /var/cache/apt/archives/partial/.
+ the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
- Can be run as part of the package installation (clean runs before install) or as a separate step.
type: bool
default: 'no'
@@ -93,7 +92,7 @@ options:
allow_unauthenticated:
description:
- Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
- - 'O(allow_unauthenticated) is only supported with O(state): V(install)/V(present)'
+ - 'O(allow_unauthenticated) is only supported with O(state): V(install)/V(present).'
aliases: [ allow-unauthenticated ]
type: bool
default: 'no'
@@ -111,7 +110,7 @@ options:
version_added: "2.12"
allow_change_held_packages:
description:
- - Allows changing the version of a package which is on the apt hold list
+ - Allows changing the version of a package which is on the apt hold list.
type: bool
default: 'no'
version_added: '2.13'
@@ -128,14 +127,14 @@ options:
type: str
dpkg_options:
description:
- - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
- - Options should be supplied as comma separated list
+ - Add C(dpkg) options to C(apt) command. Defaults to C(-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold").
+ - Options should be supplied as comma separated list.
default: force-confdef,force-confold
type: str
deb:
description:
- Path to a .deb package on the remote machine.
- - If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
+ - If C(://) in the path, ansible will attempt to download deb before installing. (Version added 2.1)
- Requires the C(xz-utils) package to extract the control file of the deb package to install.
type: path
required: false
@@ -143,7 +142,8 @@ options:
autoremove:
description:
- If V(true), remove unused dependency packages for all module states except V(build-dep). It can also be used as the only option.
- - Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information.
+ - Previous to version 2.4, O(autoclean) was also an alias for O(autoremove), now it is its own separate command.
+ See documentation for further information.
type: bool
default: 'no'
version_added: "2.1"
@@ -155,10 +155,10 @@ options:
version_added: "2.4"
policy_rc_d:
description:
- - Force the exit code of /usr/sbin/policy-rc.d.
- - For example, if I(policy_rc_d=101) the installed package will not trigger a service start.
- - If /usr/sbin/policy-rc.d already exists, it is backed up and restored after the package installation.
- - If V(null), the /usr/sbin/policy-rc.d isn't created/changed.
+ - Force the exit code of C(/usr/sbin/policy-rc.d).
+ - For example, if O(policy_rc_d=101) the installed package will not trigger a service start.
+ - If C(/usr/sbin/policy-rc.d) already exists, it is backed up and restored after the package installation.
+ - If V(null), the C(/usr/sbin/policy-rc.d) is not created/changed.
type: int
default: null
version_added: "2.8"
@@ -179,7 +179,7 @@ options:
version_added: "2.11"
force_apt_get:
description:
- - Force usage of apt-get instead of aptitude
+ - Force usage of apt-get instead of aptitude.
type: bool
default: 'no'
version_added: "2.4"
@@ -205,22 +205,22 @@ attributes:
platforms: debian
notes:
- Three of the upgrade modes (V(full), V(safe) and its alias V(true)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back.
- - In most cases, packages installed with apt will start newly installed services by default. Most distributions have mechanisms to avoid this.
+ - In most cases, packages installed with I(apt) will start newly installed services by default. Most distributions have mechanisms to avoid this.
For example when installing Postgresql-9.5 in Debian 9, creating an executable shell script (/usr/sbin/policy-rc.d) that throws
- a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or its execute permission afterward.
- - The apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier
+ a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or its execute permission afterward.
+ - The C(apt-get) commandline supports implicit regex matches here but we do not because it can let typos through easier
(If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user.
- Since we don't have warnings and prompts before installing, we disallow this.Use an explicit fnmatch pattern if you want wildcarding)
+ Since there are no warnings and prompts before installing, we disallow this. Use an explicit fnmatch pattern if you want wildcarding).
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
- When O(default_release) is used, an implicit priority of 990 is used. This is the same behavior as C(apt-get -t).
- When an exact version is specified, an implicit priority of 1001 is used.
- - If the interpreter can't import ``python-apt``/``python3-apt`` the module will check for it in system-owned interpreters as well.
+ - If the interpreter can't import C(python-apt)/C(python3-apt) the module will check for it in system-owned interpreters as well.
If the dependency can't be found, the module will attempt to install it.
If the dependency is found or installed, the module will be respawned under the correct interpreter.
'''
EXAMPLES = '''
-- name: Install apache httpd (state=present is optional)
+- name: Install apache httpd (state=present is optional)
ansible.builtin.apt:
name: apache2
state: present
@@ -365,8 +365,8 @@ import datetime
import fnmatch
import locale as locale_module
import os
-import random
import re
+import secrets
import shutil
import sys
import tempfile
@@ -381,8 +381,8 @@ from ansible.module_utils.six import string_types
from ansible.module_utils.urls import fetch_file
DPKG_OPTIONS = 'force-confdef,force-confold'
-APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
-APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
+APT_GET_ZERO = "\n0 upgraded, 0 newly installed, 0 to remove"
+APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed, 0 to remove"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
APT_MARK_INVALID_OP = 'Invalid operation'
@@ -507,7 +507,7 @@ def package_best_match(pkgname, version_cmp, version, release, cache):
policy.create_pin('Release', pkgname, release, 990)
if version_cmp == "=":
# Installing a specific version from command line overrides all pinning
- # We don't mimmic this exactly, but instead set a priority which is higher than all APT built-in pin priorities.
+ # We don't mimic this exactly, but instead set a priority which is higher than all APT built-in pin priorities.
policy.create_pin('Version', pkgname, version, 1001)
pkg = cache[pkgname]
pkgver = policy.get_candidate_ver(pkg)
@@ -1252,6 +1252,7 @@ def main():
LC_ALL=locale,
LC_MESSAGES=locale,
LC_CTYPE=locale,
+ LANGUAGE=locale,
)
module.run_command_environ_update = APT_ENV_VARS
@@ -1387,23 +1388,32 @@ def main():
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
- randomize = random.randint(0, 1000) / 1000.0
+ randomize = secrets.randbelow(1000) / 1000.0
for retry in range(update_cache_retries):
try:
if not module.check_mode:
cache.update()
break
- except apt.cache.FetchFailedException as e:
- err = to_native(e)
+ except apt.cache.FetchFailedException as fetch_failed_exc:
+ err = fetch_failed_exc
+ module.warn(
+ f"Failed to update cache after {retry + 1} retries due "
+ f"to {to_native(fetch_failed_exc)}, retrying"
+ )
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
+ module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to refresh the cache again")
else:
- module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+ msg = (
+ f"Failed to update apt cache after {update_cache_retries} retries: "
+ f"{err if err else 'unknown reason'}"
+ )
+ module.fail_json(msg=msg)
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
diff --git a/lib/ansible/modules/apt_key.py b/lib/ansible/modules/apt_key.py
index 669bad20c6f..ec86e829baa 100644
--- a/lib/ansible/modules/apt_key.py
+++ b/lib/ansible/modules/apt_key.py
@@ -26,13 +26,13 @@ attributes:
platform:
platforms: debian
notes:
- - The apt-key command used by this module has been deprecated. See the L(Debian wiki,https://wiki.debian.org/DebianRepository/UseThirdParty) for details.
- This module is kept for backwards compatibility for systems that still use apt-key as the main way to manage apt repository keys.
+ - The C(apt-key) command used by this module has been deprecated. See the L(Debian wiki,https://wiki.debian.org/DebianRepository/UseThirdParty) for details.
+ This module is kept for backwards compatibility for systems that still use C(apt-key) as the main way to manage apt repository keys.
- As a sanity check, downloaded key id must match the one specified.
- "Use full fingerprint (40 characters) key ids to avoid key collisions.
To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)."
- - If you specify both the key id and the URL with O(state=present), the task can verify or add the key as needed.
- - Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's update_cache option).
+ - If you specify both the key O(id) and the O(url) with O(state=present), the task can verify or add the key as needed.
+ - Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's C(update_cache) option).
requirements:
- gpg
seealso:
@@ -42,7 +42,7 @@ options:
description:
- The identifier of the key.
- Including this allows check mode to correctly report the changed state.
- - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
+ - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
- This parameter is required when O(state) is set to V(absent).
type: str
data:
@@ -188,7 +188,7 @@ def lang_env(module):
if not hasattr(lang_env, 'result'):
locale = get_best_parsable_locale(module)
- lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale)
return lang_env.result
diff --git a/lib/ansible/modules/apt_repository.py b/lib/ansible/modules/apt_repository.py
index 4d01679db74..3fb027c0c32 100644
--- a/lib/ansible/modules/apt_repository.py
+++ b/lib/ansible/modules/apt_repository.py
@@ -41,13 +41,13 @@ options:
default: "present"
mode:
description:
- - The octal mode for newly created files in sources.list.d.
+ - The octal mode for newly created files in C(sources.list.d).
- Default is what system uses (probably 0644).
type: raw
version_added: "1.6"
update_cache:
description:
- - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
+ - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
type: bool
default: "yes"
aliases: [ update-cache ]
@@ -72,9 +72,9 @@ options:
version_added: '1.8'
filename:
description:
- - Sets the name of the source list file in sources.list.d.
+ - Sets the name of the source list file in C(sources.list.d).
Defaults to a file name based on the repository source url.
- The .list extension will be automatically added.
+ The C(.list) extension will be automatically added.
type: str
version_added: '2.1'
codename:
@@ -90,8 +90,8 @@ options:
Without this library, the module does not work.
- Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3.
- Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not
- the system Python, set O(install_python_apt=false) and ensure that the Python apt library
- for your Python version is installed some other way.
+ the system Python, set O(install_python_apt=false) and ensure that the Python apt library
+ for your Python version is installed some other way.
type: bool
default: true
author:
@@ -174,9 +174,9 @@ import glob
import json
import os
import re
+import secrets
import sys
import tempfile
-import random
import time
from ansible.module_utils.basic import AnsibleModule
@@ -504,7 +504,7 @@ class UbuntuSourcesList(SourcesList):
if self.apt_key_bin:
locale = get_best_parsable_locale(self.module)
- APT_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale)
+ APT_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale)
self.module.run_command_environ_update = APT_ENV
rc, out, err = self.module.run_command([self.apt_key_bin, 'export', key_fingerprint], check_rc=True)
found = bool(not err or 'nothing exported' not in err)
@@ -738,29 +738,38 @@ def main():
if changed and not module.check_mode:
try:
+ err = ''
sourceslist.save()
if update_cache:
- err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
- randomize = random.randint(0, 1000) / 1000.0
+ randomize = secrets.randbelow(1000) / 1000.0
+ cache = apt.Cache()
for retry in range(update_cache_retries):
try:
- cache = apt.Cache()
cache.update()
break
- except apt.cache.FetchFailedException as e:
- err = to_native(e)
+ except apt.cache.FetchFailedException as fetch_failed_exc:
+ err = fetch_failed_exc
+ module.warn(
+ f"Failed to update cache after {retry + 1} due "
+ f"to {to_native(fetch_failed_exc)} retry, retrying"
+ )
# Use exponential backoff with a max fail count, plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
+ module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to update the cache again")
else:
revert_sources_list(sources_before, sources_after, sourceslist_before)
- module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+ msg = (
+ f"Failed to update apt cache after {update_cache_retries} retries: "
+ f"{err if err else 'unknown reason'}"
+ )
+ module.fail_json(msg=msg)
except (OSError, IOError) as ex:
revert_sources_list(sources_before, sources_after, sourceslist_before)
diff --git a/lib/ansible/modules/assemble.py b/lib/ansible/modules/assemble.py
index 77c33bef104..bd8ddf6cfff 100644
--- a/lib/ansible/modules/assemble.py
+++ b/lib/ansible/modules/assemble.py
@@ -61,14 +61,14 @@ options:
type: str
ignore_hidden:
description:
- - A boolean that controls if files that start with a '.' will be included or not.
+ - A boolean that controls if files that start with a C(.) will be included or not.
type: bool
default: no
version_added: '2.0'
validate:
description:
- The validation command to run before copying into place.
- - The path to the file to validate is passed in via '%s' which must be present as in the sshd example below.
+ - The path to the file to validate is passed in by C(%s) which must be present as in the sshd example below.
- The command is passed securely so shell features like expansion and pipes won't work.
type: str
version_added: '2.0'
@@ -205,6 +205,11 @@ def main():
regexp=dict(type='str'),
ignore_hidden=dict(type='bool', default=False),
validate=dict(type='str'),
+
+ # Options that are for the action plugin, but ignored by the module itself.
+ # We have them here so that the tests pass without ignores, which
+ # reduces the likelihood of further bugs added.
+ decrypt=dict(type='bool', default=True),
),
add_file_common_args=True,
)
diff --git a/lib/ansible/modules/assert.py b/lib/ansible/modules/assert.py
index 420044203cf..af758a53c51 100644
--- a/lib/ansible/modules/assert.py
+++ b/lib/ansible/modules/assert.py
@@ -17,14 +17,14 @@ version_added: "1.5"
options:
that:
description:
- - A list of string expressions of the same form that can be passed to the 'when' statement.
+ - A list of string expressions of the same form that can be passed to the C(when) statement.
type: list
elements: str
required: true
fail_msg:
description:
- The customized message used for a failing assertion.
- - This argument was called 'msg' before Ansible 2.7, now it is renamed to 'fail_msg' with alias 'msg'.
+ - This argument was called O(msg) before Ansible 2.7, now it is renamed to O(fail_msg) with alias O(msg).
type: str
aliases: [ msg ]
version_added: "2.7"
@@ -85,7 +85,7 @@ EXAMPLES = r'''
- >
"reject" not in some_command_result.stderr
-- name: After version 2.7 both 'msg' and 'fail_msg' can customize failing assertion message
+- name: After version 2.7 both O(msg) and O(fail_msg) can customize failing assertion message
ansible.builtin.assert:
that:
- my_param <= 100
@@ -93,7 +93,7 @@ EXAMPLES = r'''
fail_msg: "'my_param' must be between 0 and 100"
success_msg: "'my_param' is between 0 and 100"
-- name: Please use 'msg' when ansible version is smaller than 2.7
+- name: Please use O(msg) when ansible version is smaller than 2.7
ansible.builtin.assert:
that:
- my_param <= 100
diff --git a/lib/ansible/modules/blockinfile.py b/lib/ansible/modules/blockinfile.py
index 6d32e4de30d..602f0f0684d 100644
--- a/lib/ansible/modules/blockinfile.py
+++ b/lib/ansible/modules/blockinfile.py
@@ -33,7 +33,7 @@ options:
marker:
description:
- The marker line template.
- - C({mark}) will be replaced with the values in O(marker_begin) (default="BEGIN") and O(marker_end) (default="END").
+ - C({mark}) will be replaced with the values in O(marker_begin) (default=C(BEGIN)) and O(marker_end) (default=C(END)).
- Using a custom marker without the C({mark}) variable may result in the block being repeatedly inserted on subsequent playbook runs.
- Multi-line markers are not supported and will result in the block being repeatedly inserted on subsequent playbook runs.
- A newline is automatically appended by the module to O(marker_begin) and O(marker_end).
@@ -50,12 +50,10 @@ options:
description:
- If specified and no begin/ending O(marker) lines are found, the block will be inserted after the last match of specified regular expression.
- A special value is available; V(EOF) for inserting the block at the end of the file.
- - If specified regular expression has no matches, V(EOF) will be used instead.
+ - If specified regular expression has no matches or no value is passed, V(EOF) will be used instead.
- The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
This behaviour was added in ansible-core 2.14.
type: str
- choices: [ EOF, '*regex*' ]
- default: EOF
insertbefore:
description:
- If specified and no begin/ending O(marker) lines are found, the block will be inserted before the last match of specified regular expression.
@@ -64,7 +62,6 @@ options:
- The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
This behaviour was added in ansible-core 2.14.
type: str
- choices: [ BOF, '*regex*' ]
create:
description:
- Create a new file if it does not exist.
@@ -106,7 +103,7 @@ options:
default: no
version_added: '2.16'
notes:
- - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
+ - When using C(with_*) loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
- As of Ansible 2.3, the O(dest) option has been changed to O(path) as default, but O(dest) still works as well.
- Option O(ignore:follow) has been removed in Ansible 2.5, because this module modifies the contents of the file
so O(ignore:follow=no) does not make sense.
diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py
index 4a3b8e133a0..42d9beeff4b 100644
--- a/lib/ansible/modules/command.py
+++ b/lib/ansible/modules/command.py
@@ -50,7 +50,7 @@ options:
free_form:
description:
- The command module takes a free form string as a command to run.
- - There is no actual parameter named 'free form'.
+ - There is no actual parameter named C(free_form).
cmd:
type: str
description:
diff --git a/lib/ansible/modules/copy.py b/lib/ansible/modules/copy.py
index cb2ccf9c8f8..0a1dc7f7717 100644
--- a/lib/ansible/modules/copy.py
+++ b/lib/ansible/modules/copy.py
@@ -28,8 +28,8 @@ options:
- Local path to a file to copy to the remote server.
- This can be absolute or relative.
- If path is a directory, it is copied recursively. In this case, if path ends
- with "/", only inside contents of that directory are copied to destination.
- Otherwise, if it does not end with "/", the directory itself with all contents
+ with C(/), only inside contents of that directory are copied to destination.
+ Otherwise, if it does not end with C(/), the directory itself with all contents
is copied. This behavior is similar to the C(rsync) command line tool.
type: path
content:
@@ -44,7 +44,7 @@ options:
description:
- Remote absolute path where the file should be copied to.
- If O(src) is a directory, this must be a directory too.
- - If O(dest) is a non-existent path and if either O(dest) ends with "/" or O(src) is a directory, O(dest) is created.
+ - If O(dest) is a non-existent path and if either O(dest) ends with C(/) or O(src) is a directory, O(dest) is created.
- If O(dest) is a relative path, the starting directory is determined by the remote host.
- If O(src) and O(dest) are files, the parent directory of O(dest) is not created and the task fails if it does not already exist.
type: path
@@ -92,7 +92,7 @@ options:
description:
- Influence whether O(src) needs to be transferred or already is present remotely.
- If V(false), it will search for O(src) on the controller node.
- - If V(true) it will search for O(src) on the managed (remote) node.
+ - If V(true), it will search for O(src) on the managed (remote) node.
- O(remote_src) supports recursive copying as of version 2.8.
- O(remote_src) only works with O(mode=preserve) as of version 2.6.
- Auto-decryption of files does not work when O(remote_src=yes).
diff --git a/lib/ansible/modules/cron.py b/lib/ansible/modules/cron.py
index 350077015ab..5c2b2a12c22 100644
--- a/lib/ansible/modules/cron.py
+++ b/lib/ansible/modules/cron.py
@@ -18,19 +18,19 @@ description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
- crontab entry C("#Ansible: ") corresponding to the "name" passed to the module,
- which is used by future ansible/module calls to find/check the state. The "name"
- parameter should be unique, and changing the "name" value will result in a new cron
+ crontab entry C("#Ansible: ") corresponding to the O(name) passed to the module,
+ which is used by future ansible/module calls to find/check the state. The O(name)
+ parameter should be unique, and changing the O(name) value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
- needs to find/check the state, it uses the "name" parameter to find the environment
+ needs to find/check the state, it uses the O(name) parameter to find the environment
variable definition line.
- - When using symbols such as %, they must be properly escaped.
+ - When using symbols such as C(%), they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- - Description of a crontab entry or, if env is set, the name of environment variable.
+ - Description of a crontab entry or, if O(env) is set, the name of environment variable.
- This parameter is always required as of ansible-core 2.12.
type: str
required: yes
@@ -41,7 +41,7 @@ options:
type: str
job:
description:
- - The command to execute or, if env is set, the value of environment variable.
+ - The command to execute or, if O(env) is set, the value of environment variable.
- The command should not contain line breaks.
- Required if O(state=present).
type: str
@@ -58,10 +58,10 @@ options:
The assumption is that this file is exclusively managed by the module,
do not use if the file contains multiple entries, NEVER use for /etc/crontab.
- If this is a relative path, it is interpreted with respect to C(/etc/cron.d).
- - Many linux distros expect (and some require) the filename portion to consist solely
+ - Many Linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- - Using this parameter requires you to specify the O(user) as well, unless O(state) is not V(present).
- - Either this parameter or O(name) is required
+ - Using this parameter requires you to specify the O(user) as well, unless O(state=absent).
+ - Either this parameter or O(name) is required.
type: path
backup:
description:
@@ -131,6 +131,9 @@ options:
version_added: "2.1"
requirements:
- cron (any 'vixie cron' conformant variant, like cronie)
+notes:
+ - If you are experiencing permissions issues with cron and MacOS,
+ you should see the official MacOS documentation for further information.
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
diff --git a/lib/ansible/modules/deb822_repository.py b/lib/ansible/modules/deb822_repository.py
index aff4fd4d504..0fa33c73d70 100644
--- a/lib/ansible/modules/deb822_repository.py
+++ b/lib/ansible/modules/deb822_repository.py
@@ -8,28 +8,28 @@ DOCUMENTATION = '''
author: 'Ansible Core Team (@ansible)'
short_description: 'Add and remove deb822 formatted repositories'
description:
-- 'Add and remove deb822 formatted repositories in Debian based distributions'
+- 'Add and remove deb822 formatted repositories in Debian based distributions.'
module: deb822_repository
notes:
-- This module will not automatically update caches, call the apt module based
+- This module will not automatically update caches, call the M(ansible.builtin.apt) module based
on the changed state.
options:
allow_downgrade_to_insecure:
description:
- Allow downgrading a package that was previously authenticated but
- is no longer authenticated
+ is no longer authenticated.
type: bool
allow_insecure:
description:
- - Allow insecure repositories
+ - Allow insecure repositories.
type: bool
allow_weak:
description:
- - Allow repositories signed with a key using a weak digest algorithm
+ - Allow repositories signed with a key using a weak digest algorithm.
type: bool
architectures:
description:
- - 'Architectures to search within repository'
+ - Architectures to search within repository.
type: list
elements: str
by_hash:
@@ -51,7 +51,7 @@ options:
components:
description:
- Components specify different sections of one distribution version
- present in a Suite.
+ present in a C(Suite).
type: list
elements: str
date_max_future:
@@ -64,8 +64,8 @@ options:
type: bool
inrelease_path:
description:
- - Determines the path to the InRelease file, relative to the normal
- position of an InRelease file.
+ - Determines the path to the C(InRelease) file, relative to the normal
+ position of an C(InRelease) file.
type: str
languages:
description:
@@ -81,8 +81,8 @@ options:
type: str
pdiffs:
description:
- - Controls if APT should try to use PDiffs to update old indexes
- instead of downloading the new indexes entirely
+ - Controls if APT should try to use C(PDiffs) to update old indexes
+ instead of downloading the new indexes entirely.
type: bool
signed_by:
description:
@@ -97,21 +97,20 @@ options:
Suite can specify an exact path in relation to the URI(s) provided,
in which case the Components: must be omitted and suite must end
with a slash (C(/)). Alternatively, it may take the form of a
- distribution version (e.g. a version codename like disco or artful).
+ distribution version (for example a version codename like C(disco) or C(artful)).
If the suite does not specify a path, at least one component must
be present.
type: list
elements: str
targets:
description:
- - Defines which download targets apt will try to acquire from this
- source.
+ - Defines which download targets apt will try to acquire from this source.
type: list
elements: str
trusted:
description:
- Decides if a source is considered trusted or if warnings should be
- raised before e.g. packages are installed from this source.
+ raised before, for example packages are installed from this source.
type: bool
types:
choices:
@@ -123,7 +122,7 @@ options:
elements: str
description:
- Which types of packages to look for from a given source; either
- binary V(deb) or source code V(deb-src)
+ binary V(deb) or source code V(deb-src).
uris:
description:
- The URIs must specify the base of the Debian distribution archive,
@@ -132,7 +131,7 @@ options:
elements: str
mode:
description:
- - The octal mode for newly created files in sources.list.d.
+ - The octal mode for newly created files in C(sources.list.d).
type: raw
default: '0644'
state:
diff --git a/lib/ansible/modules/debconf.py b/lib/ansible/modules/debconf.py
index 779952ec94d..69bc1534a0f 100644
--- a/lib/ansible/modules/debconf.py
+++ b/lib/ansible/modules/debconf.py
@@ -29,24 +29,24 @@ notes:
- Several questions have to be answered (depending on the package).
Use 'debconf-show ' on any Debian or derivative with the package
installed to see questions/settings available.
- - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
+ - Some distros will always record tasks involving the setting of passwords as changed. This is due to C(debconf-get-selections) masking passwords.
- It is highly recommended to add C(no_log=True) to the task while handling sensitive information using this module.
- - The debconf module does not reconfigure packages, it just updates the debconf database.
+ - The M(ansible.builtin.debconf) module does not reconfigure packages, it just updates the debconf database.
An additional step is needed (typically with C(notify) if debconf makes a change)
to reconfigure the package and apply the changes.
- debconf is extensively used for pre-seeding configuration prior to installation
+ C(debconf) is extensively used for pre-seeding configuration prior to installation
rather than modifying configurations.
- So, while dpkg-reconfigure does use debconf data, it is not always authoritative
+ So, while C(dpkg-reconfigure) does use debconf data, it is not always authoritative
and you may need to check how your package is handled.
- - Also note dpkg-reconfigure is a 3-phase process. It invokes the
+ - Also note C(dpkg-reconfigure) is a 3-phase process. It invokes the
control scripts from the C(/var/lib/dpkg/info) directory with the
C(.prerm reconfigure ),
C(.config reconfigure ) and C(.postinst control ) arguments.
- The main issue is that the C(.config reconfigure) step for many packages
will first reset the debconf database (overriding changes made by this module) by
checking the on-disk configuration. If this is the case for your package then
- dpkg-reconfigure will effectively ignore changes made by debconf.
- - However as dpkg-reconfigure only executes the C(.config) step if the file
+ C(dpkg-reconfigure) will effectively ignore changes made by debconf.
+ - However as C(dpkg-reconfigure) only executes the C(.config) step if the file
exists, it is possible to rename it to C(/var/lib/dpkg/info/.config.ignore)
before executing C(dpkg-reconfigure -f noninteractive ) and then restore it.
This seems to be compliant with Debian policy for the .config file.
@@ -76,12 +76,12 @@ options:
value:
description:
- Value to set the configuration to.
- - After Ansible 2.17, C(value) is of type 'raw'.
+ - After Ansible 2.17, C(value) is of type C(raw).
type: raw
aliases: [ answer ]
unseen:
description:
- - Do not set 'seen' flag when pre-seeding.
+ - Do not set C(seen) flag when pre-seeding.
type: bool
default: false
author:
@@ -173,8 +173,6 @@ def set_selection(module, pkg, question, vtype, value, unseen):
if unseen:
cmd.append('-u')
- if vtype == 'boolean':
- value = value.lower()
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
@@ -209,15 +207,17 @@ def main():
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
+ # ensure we compare booleans supplied to the way debconf sees them (true/false strings)
+ if vtype == 'boolean':
+ value = to_text(value).lower()
+
# if question doesn't exist, value cannot match
if question not in prev:
changed = True
else:
existing = prev[question]
- # ensure we compare booleans supplied to the way debconf sees them (true/false strings)
if vtype == 'boolean':
- value = to_text(value).lower()
existing = to_text(prev[question]).lower()
elif vtype == 'password':
existing = get_password_value(module, pkg, question, vtype)
diff --git a/lib/ansible/modules/debug.py b/lib/ansible/modules/debug.py
index cdaf118cd2d..325d2541c2c 100644
--- a/lib/ansible/modules/debug.py
+++ b/lib/ansible/modules/debug.py
@@ -14,7 +14,7 @@ description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook.
-- Useful for debugging together with the 'when:' directive.
+- Useful for debugging together with the C(when:) directive.
- This module is also supported for Windows targets.
version_added: '0.8'
options:
diff --git a/lib/ansible/modules/dnf.py b/lib/ansible/modules/dnf.py
index 44abe0b7ee0..c9ddbb5ae5e 100644
--- a/lib/ansible/modules/dnf.py
+++ b/lib/ansible/modules/dnf.py
@@ -19,9 +19,15 @@ description:
options:
use_backend:
description:
- - By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
+ - Backend module to use.
default: "auto"
- choices: [ auto, yum, yum4, dnf4, dnf5 ]
+ choices:
+ auto: Automatically select the backend based on the C(ansible_facts.pkg_mgr) fact.
+ yum: Alias for V(auto) (see Notes)
+ dnf: M(ansible.builtin.dnf)
+ yum4: Alias for V(dnf)
+ dnf4: Alias for V(dnf)
+ dnf5: M(ansible.builtin.dnf5)
type: str
version_added: 2.15
name:
@@ -49,14 +55,14 @@ options:
state:
description:
- Whether to install (V(present), V(latest)), or remove (V(absent)) a package.
- - Default is V(None), however in effect the default action is V(present) unless the O(autoremove) option is
- enabled for this module, then V(absent) is inferred.
+ - Default is V(None), however in effect the default action is V(present) unless the O(autoremove=true),
+ then V(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- - I(Repoid) of repositories to enable for the install/update operation.
+ - C(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
@@ -65,9 +71,9 @@ options:
disablerepo:
description:
- - I(Repoid) of repositories to disable for the install/update operation.
+ - C(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
- When specifying multiple repos, separate them with a ",".
+ When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
@@ -80,7 +86,7 @@ options:
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
- installed. Has an effect only if O(state) is V(present) or V(latest).
+ installed. Has an effect only if O(state=present) or O(state=latest).
- This setting affects packages installed from a repository as well as
"local" packages installed from the filesystem or a URL.
type: bool
@@ -105,13 +111,13 @@ options:
description:
- If V(true), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
- required by any such package. Should be used alone or when O(state) is V(absent)
+ required by any such package. Should be used alone or when O(state=absent).
type: bool
default: "no"
version_added: "2.4"
exclude:
description:
- - Package name(s) to exclude when state=present, or latest. This can be a
+ - Package name(s) to exclude when O(state=present), or latest. This can be a
list or a comma separated string.
version_added: "2.7"
type: list
@@ -120,14 +126,14 @@ options:
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
- without raising an error. Equivalent to passing the --skip-broken option.
+ without raising an error. Equivalent to passing the C(--skip-broken) option.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
- Has an effect only if O(state) is V(present) or V(latest).
+ Has an effect only if O(state=present) or O(state=latest).
type: bool
default: "no"
aliases: [ expire-cache ]
@@ -135,7 +141,7 @@ options:
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- - Has an effect only if O(state) is V(latest)
+ - Has an effect only if O(state=present) or O(state=latest).
default: "no"
type: bool
version_added: "2.7"
@@ -155,7 +161,7 @@ options:
version_added: "2.7"
enable_plugin:
description:
- - I(Plugin) name to enable for the install/update operation.
+ - C(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
version_added: "2.7"
type: list
@@ -163,7 +169,7 @@ options:
default: []
disable_plugin:
description:
- - I(Plugin) name to disable for the install/update operation.
+ - C(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
version_added: "2.7"
type: list
@@ -173,13 +179,14 @@ options:
description:
- Disable the excludes defined in DNF config files.
- If set to V(all), disables all excludes.
- - If set to V(main), disable excludes defined in [main] in dnf.conf.
+ - If set to V(main), disable excludes defined in C([main]) in C(dnf.conf).
- If set to V(repoid), disable excludes defined for given repo id.
version_added: "2.7"
type: str
validate_certs:
description:
- - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to V(false), the SSL certificates will not be validated.
+ - This only applies if using a https url as the source of the rpm. For example, for localinstall.
+ If set to V(false), the SSL certificates will not be validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
@@ -195,7 +202,7 @@ options:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
- Note that setting allow_downgrade=True can make this module
+ Note that setting O(allow_downgrade=true) can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
@@ -238,7 +245,7 @@ options:
version_added: "2.8"
allowerasing:
description:
- - If V(true) it allows erasing of installed packages to resolve dependencies.
+ - If V(true) it allows erasing of installed packages to resolve dependencies.
required: false
type: bool
default: "no"
@@ -282,14 +289,18 @@ attributes:
platform:
platforms: rhel
notes:
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
+ - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
- Group removal doesn't work if the group was installed with Ansible because
upstream dnf's API doesn't properly mark groups as installed, therefore upon
removal the module is unable to detect that the group is installed
- (https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
+ U(https://bugzilla.redhat.com/show_bug.cgi?id=1620324).
+ - While O(use_backend=yum) and the ability to call the action plugin as
+ M(ansible.builtin.yum) are provided for syntax compatibility, the YUM
+ backend was removed in ansible-core 2.17 because the required libraries are
+ not available for any supported version of Python. If you rely on this
+ functionality, use an older version of Ansible.
requirements:
- python3-dnf
- - for the autoremove option you need dnf >= 2.0.1"
author:
- Igor Gnatenko (@ignatenkobrain)
- Cristian van Ee (@DJMuggs)
@@ -390,7 +401,6 @@ import sys
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.urls import fetch_file
-from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
@@ -416,11 +426,7 @@ class DnfModule(YumDnf):
self._ensure_dnf()
self.pkg_mgr_name = "dnf"
-
- try:
- self.with_modules = dnf.base.WITH_MODULES
- except AttributeError:
- self.with_modules = False
+ self.with_modules = dnf.base.WITH_MODULES
def _sanitize_dnf_error_msg_install(self, spec, error):
"""
@@ -435,22 +441,6 @@ class DnfModule(YumDnf):
return error
- def _sanitize_dnf_error_msg_remove(self, spec, error):
- """
- For unhandled dnf.exceptions.Error scenarios, there are certain error
- messages we want to ignore in a removal scenario as known benign
- failures. Do that here.
- """
- if (
- 'no package matched' in to_native(error) or
- 'No match for argument:' in to_native(error)
- ):
- return (False, "{0} is not installed".format(spec))
-
- # Return value is tuple of:
- # ("Is this actually a failure?", "Error Message")
- return (True, error)
-
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
@@ -645,22 +635,14 @@ class DnfModule(YumDnf):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify)
- try:
- # this method has been supported in dnf-4.2.17-6 or later
- # https://bugzilla.redhat.com/show_bug.cgi?id=1788212
- base.setup_loggers()
- except AttributeError:
- pass
- try:
- base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
- base.pre_configure_plugins()
- except AttributeError:
- pass # older versions of dnf didn't require this and don't have these methods
+
+ base.setup_loggers()
+ base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
+ base.pre_configure_plugins()
+
self._specify_repositories(base, disablerepo, enablerepo)
- try:
- base.configure_plugins()
- except AttributeError:
- pass # older versions of dnf didn't require this and don't have these methods
+
+ base.configure_plugins()
try:
if self.update_cache:
@@ -726,22 +708,34 @@ class DnfModule(YumDnf):
self.module.exit_json(msg="", results=results)
def _is_installed(self, pkg):
- return bool(
- dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).installed().run()
- )
+ installed_query = dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).installed()
+ if dnf.util.is_glob_pattern(pkg):
+ available_query = dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).available()
+ return not (
+ {p.name for p in available_query} - {p.name for p in installed_query}
+ )
+ else:
+ return bool(installed_query)
- def _is_newer_version_installed(self, pkg_name):
+ def _is_newer_version_installed(self, pkg_spec):
try:
- if isinstance(pkg_name, dnf.package.Package):
- available = pkg_name
+ if isinstance(pkg_spec, dnf.package.Package):
+ installed = sorted(self.base.sack.query().installed().filter(name=pkg_spec.name, arch=pkg_spec.arch))[-1]
+ return installed.evr_gt(pkg_spec)
else:
- available = sorted(
- dnf.subject.Subject(pkg_name).get_best_query(sack=self.base.sack).available().run()
- )[-1]
- installed = sorted(self.base.sack.query().installed().filter(name=available.name).run())[-1]
+ available = dnf.subject.Subject(pkg_spec).get_best_query(sack=self.base.sack).available()
+ installed = self.base.sack.query().installed().filter(name=available[0].name)
+ for arch in sorted(set(p.arch for p in installed)): # select only from already-installed arches for this case
+ installed_pkg = sorted(installed.filter(arch=arch))[-1]
+ try:
+ available_pkg = sorted(available.filter(arch=arch))[-1]
+ except IndexError:
+ continue # nothing currently available for this arch; keep going
+ if installed_pkg.evr_gt(available_pkg):
+ return True
+ return False
except IndexError:
return False
- return installed > available
def _mark_package_install(self, pkg_spec, upgrade=False):
"""Mark the package for install."""
@@ -800,16 +794,13 @@ class DnfModule(YumDnf):
"results": []
}
except dnf.exceptions.Error as e:
- if to_text("already installed") in to_text(e):
- return {'failed': False, 'msg': '', 'failure': ''}
- else:
- return {
- 'failed': True,
- 'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
- 'failure': " ".join((pkg_spec, to_native(e))),
- 'rc': 1,
- "results": []
- }
+ return {
+ 'failed': True,
+ 'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
return {'failed': False, 'msg': msg, 'failure': '', 'rc': 0}
@@ -873,36 +864,20 @@ class DnfModule(YumDnf):
return not_installed
def _install_remote_rpms(self, filenames):
- if int(dnf.__version__.split(".")[0]) >= 2:
- pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
- else:
- pkgs = []
- try:
- for filename in filenames:
- pkgs.append(self.base.add_remote_rpm(filename))
- except IOError as e:
- if to_text("Can not load RPM file") in to_text(e):
- self.module.fail_json(
- msg="Error occurred attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
- results=[],
- rc=1,
- )
- if self.update_only:
- self._update_only(pkgs)
- else:
- for pkg in pkgs:
- try:
- if self._is_newer_version_installed(pkg):
- if self.allow_downgrade:
- self.base.package_install(pkg, strict=self.base.conf.strict)
- else:
+ try:
+ pkgs = self.base.add_remote_rpms(filenames)
+ if self.update_only:
+ self._update_only(pkgs)
+ else:
+ for pkg in pkgs:
+ if not (self._is_newer_version_installed(pkg) and not self.allow_downgrade):
self.base.package_install(pkg, strict=self.base.conf.strict)
- except Exception as e:
- self.module.fail_json(
- msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
- results=[],
- rc=1,
- )
+ except Exception as e:
+ self.module.fail_json(
+ msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
+ results=[],
+ rc=1,
+ )
def _is_module_installed(self, module_spec):
if self.with_modules:
@@ -919,7 +894,7 @@ class DnfModule(YumDnf):
else:
return True # No stream provided, but module found
- return False # seems like a sane default
+ return False # seems like a logical default
def ensure(self):
@@ -1123,14 +1098,6 @@ class DnfModule(YumDnf):
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
- except AttributeError:
- # Group either isn't installed or wasn't marked installed at install time
- # because of DNF bug
- #
- # This is necessary until the upstream dnf API bug is fixed where installing
- # a group via the dnf API doesn't actually mark the group as installed
- # https://bugzilla.redhat.com/show_bug.cgi?id=1620324
- pass
for environment in environments:
try:
@@ -1139,25 +1106,11 @@ class DnfModule(YumDnf):
# Environment is already uninstalled.
pass
- installed = self.base.sack.query().installed()
for pkg_spec in pkg_specs:
- # short-circuit installed check for wildcard matching
- if '*' in pkg_spec:
- try:
- self.base.remove(pkg_spec)
- except dnf.exceptions.MarkingError as e:
- is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e))
- if is_failure:
- failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e)))
- else:
- response['results'].append(handled_remove_error)
- continue
-
- installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query(
- sack=self.base.sack).installed().run()
-
- for pkg in installed_pkg:
- self.base.remove(str(pkg))
+ try:
+ self.base.remove(pkg_spec)
+ except dnf.exceptions.MarkingError as e:
+ response['results'].append(f"{e.value}: {pkg_spec}")
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
@@ -1211,10 +1164,8 @@ class DnfModule(YumDnf):
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
- self.module.fail_json(
- msg="Failed to download packages: {0}".format(to_text(e)),
- results=[],
- )
+ failure_response['msg'] = "Failed to download packages: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
# Validate GPG. This is NOT done in dnf.Base (it's done in the
# upstream CLI subclass of dnf.Base)
@@ -1255,33 +1206,10 @@ class DnfModule(YumDnf):
failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
- if to_text("already installed") in to_text(e):
- response['changed'] = False
- response['results'].append("Package already installed: {0}".format(to_native(e)))
- self.module.exit_json(**response)
- else:
- failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
- self.module.fail_json(**failure_response)
+ failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
def run(self):
- """The main function."""
-
- # Check if autoremove is called correctly
- if self.autoremove:
- if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
- self.module.fail_json(
- msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
- results=[],
- )
-
- # Check if download_dir is called correctly
- if self.download_dir:
- if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'):
- self.module.fail_json(
- msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__,
- results=[],
- )
-
if self.update_cache and not self.names and not self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
@@ -1339,7 +1267,7 @@ def main():
# list=repos
# list=pkgspec
- yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf4', 'dnf5'])
+ yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'dnf', 'yum', 'yum4', 'dnf4', 'dnf5'])
module = AnsibleModule(
**yumdnf_argument_spec
diff --git a/lib/ansible/modules/dnf5.py b/lib/ansible/modules/dnf5.py
index 2ebc4a1004e..6ce15a047ac 100644
--- a/lib/ansible/modules/dnf5.py
+++ b/lib/ansible/modules/dnf5.py
@@ -17,7 +17,7 @@ options:
name:
description:
- "A package name or package specifier with version, like C(name-1.0).
- When using state=latest, this can be '*' which means run: dnf -y update.
+ When using O(state=latest), this can be C(*) which means run: C(dnf -y update).
You can also pass a url or a local path to an rpm file.
To operate on several packages this can accept a comma separated string of packages or a list of packages."
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0).
@@ -37,15 +37,15 @@ options:
state:
description:
- Whether to install (V(present), V(latest)), or remove (V(absent)) a package.
- - Default is V(None), however in effect the default action is V(present) unless the V(autoremove) option is
- enabled for this module, then V(absent) is inferred.
+ - Default is V(None), however in effect the default action is V(present) unless the O(autoremove=true),
+ then V(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
- When specifying multiple repos, separate them with a ",".
+ When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
@@ -53,7 +53,7 @@ options:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
- When specifying multiple repos, separate them with a ",".
+ When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
@@ -84,12 +84,12 @@ options:
description:
- If V(true), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
- required by any such package. Should be used alone or when O(state) is V(absent)
+ required by any such package. Should be used alone or when O(state=absent).
type: bool
default: "no"
exclude:
description:
- - Package name(s) to exclude when state=present, or latest. This can be a
+ - Package name(s) to exclude when O(state=present) or O(state=latest). This can be a
list or a comma separated string.
type: list
elements: str
@@ -97,20 +97,20 @@ options:
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
- without raising an error. Equivalent to passing the --skip-broken option.
+ without raising an error. Equivalent to passing the C(--skip-broken) option.
type: bool
default: "no"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
- Has an effect only if O(state) is V(present) or V(latest).
+ Has an effect only if O(state=present) or O(state=latest).
type: bool
default: "no"
aliases: [ expire-cache ]
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- - Has an effect only if O(state) is V(latest)
+ - Has an effect only if O(state=present) or O(state=latest).
default: "no"
type: bool
security:
@@ -127,17 +127,19 @@ options:
type: bool
enable_plugin:
description:
- - This is currently a no-op as dnf5 itself does not implement this feature.
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
+ - O(disable_plugin) takes precedence in case a plugin is listed in both O(enable_plugin) and O(disable_plugin).
+ - Requires python3-libdnf5 5.2.0.0+.
type: list
elements: str
default: []
disable_plugin:
description:
- - This is currently a no-op as dnf5 itself does not implement this feature.
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
+ - O(disable_plugin) takes precedence in case a plugin is listed in both O(enable_plugin) and O(disable_plugin).
+ - Requires python3-libdnf5 5.2.0.0+.
type: list
default: []
elements: str
@@ -145,7 +147,7 @@ options:
description:
- Disable the excludes defined in DNF config files.
- If set to V(all), disables all excludes.
- - If set to V(main), disable excludes defined in [main] in dnf.conf.
+ - If set to V(main), disable excludes defined in C([main]) in C(dnf.conf).
- If set to V(repoid), disable excludes defined for given repo id.
type: str
validate_certs:
@@ -164,7 +166,7 @@ options:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
- Note that setting allow_downgrade=True can make this module
+ Note that setting O(allow_downgrade=true) can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
@@ -356,10 +358,23 @@ libdnf5 = None
def is_installed(base, spec):
settings = libdnf5.base.ResolveSpecSettings()
- query = libdnf5.rpm.PackageQuery(base)
- query.filter_installed()
- match, nevra = query.resolve_pkg_spec(spec, settings, True)
- return match
+ installed_query = libdnf5.rpm.PackageQuery(base)
+ installed_query.filter_installed()
+ match, nevra = installed_query.resolve_pkg_spec(spec, settings, True)
+
+ # FIXME use `is_glob_pattern` function when available:
+ # https://github.com/rpm-software-management/dnf5/issues/1563
+ glob_patterns = set("*[?")
+ if any(set(char) & glob_patterns for char in spec):
+ available_query = libdnf5.rpm.PackageQuery(base)
+ available_query.filter_available()
+ available_query.resolve_pkg_spec(spec, settings, True)
+
+ return not (
+ {p.get_name() for p in available_query} - {p.get_name() for p in installed_query}
+ )
+ else:
+ return match
def is_newer_version_installed(base, spec):
@@ -434,6 +449,21 @@ class Dnf5Module(YumDnf):
self.pkg_mgr_name = "dnf5"
+ def fail_on_non_existing_plugins(self, base):
+ # https://github.com/rpm-software-management/dnf5/issues/1460
+ plugin_names = [p.get_name() for p in base.get_plugins_info()]
+ msg = []
+ if enable_unmatched := set(self.enable_plugin).difference(plugin_names):
+ msg.append(
+ f"No matches were found for the following plugin name patterns while enabling libdnf5 plugins: {', '.join(enable_unmatched)}."
+ )
+ if disable_unmatched := set(self.disable_plugin).difference(plugin_names):
+ msg.append(
+ f"No matches were found for the following plugin name patterns while disabling libdnf5 plugins: {', '.join(disable_unmatched)}."
+ )
+ if msg:
+ self.module.fail_json(msg=" ".join(msg))
+
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ["LC_ALL"] = os.environ["LC_MESSAGES"] = locale
@@ -482,13 +512,6 @@ class Dnf5Module(YumDnf):
rc=1,
)
- if self.enable_plugin or self.disable_plugin:
- self.module.fail_json(
- msg="enable_plugin and disable_plugin options are not yet implemented in DNF5",
- failures=[],
- rc=1,
- )
-
base = libdnf5.base.Base()
conf = base.get_config()
@@ -531,8 +554,23 @@ class Dnf5Module(YumDnf):
if self.download_dir:
conf.destdir = self.download_dir
+ if self.enable_plugin:
+ try:
+ base.enable_disable_plugins(self.enable_plugin, True)
+ except AttributeError:
+ self.module.fail_json(msg="'enable_plugin' requires python3-libdnf5 5.2.0.0+")
+
+ if self.disable_plugin:
+ try:
+ base.enable_disable_plugins(self.disable_plugin, False)
+ except AttributeError:
+ self.module.fail_json(msg="'disable_plugin' requires python3-libdnf5 5.2.0.0+")
+
base.setup()
+ # https://github.com/rpm-software-management/dnf5/issues/1460
+ self.fail_on_non_existing_plugins(base)
+
log_router = base.get_logger()
global_logger = libdnf5.logger.GlobalLogger()
global_logger.set(log_router.get(), libdnf5.logger.Logger.Level_DEBUG)
@@ -617,7 +655,7 @@ class Dnf5Module(YumDnf):
results = []
if self.names == ["*"] and self.state == "latest":
goal.add_rpm_upgrade(settings)
- elif self.state in {"install", "present", "latest"}:
+ elif self.state in {"installed", "present", "latest"}:
upgrade = self.state == "latest"
for spec in self.names:
if is_newer_version_installed(base, spec):
@@ -650,7 +688,7 @@ class Dnf5Module(YumDnf):
if transaction.get_problems():
failures = []
for log_event in transaction.get_resolve_logs():
- if log_event.get_problem() == libdnf5.base.GoalProblem_NOT_FOUND and self.state in {"install", "present", "latest"}:
+ if log_event.get_problem() == libdnf5.base.GoalProblem_NOT_FOUND and self.state in {"installed", "present", "latest"}:
# NOTE dnf module compat
failures.append("No package {} available.".format(log_event.get_spec()))
else:
diff --git a/lib/ansible/modules/dpkg_selections.py b/lib/ansible/modules/dpkg_selections.py
index b591636802d..a1fa672732d 100644
--- a/lib/ansible/modules/dpkg_selections.py
+++ b/lib/ansible/modules/dpkg_selections.py
@@ -11,7 +11,7 @@ DOCUMENTATION = '''
module: dpkg_selections
short_description: Dpkg package selection selections
description:
- - Change dpkg package selection state via --get-selections and --set-selections.
+ - Change dpkg package selection state via C(--get-selections) and C(--set-selections).
version_added: "2.0"
author:
- Brian Brazil (@brian-brazil)
@@ -68,7 +68,7 @@ def main():
dpkg = module.get_bin_path('dpkg', True)
locale = get_best_parsable_locale(module)
- DPKG_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale)
+ DPKG_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale)
module.run_command_environ_update = DPKG_ENV
name = module.params['name']
diff --git a/lib/ansible/modules/expect.py b/lib/ansible/modules/expect.py
index 614476332b6..760d7148d60 100644
--- a/lib/ansible/modules/expect.py
+++ b/lib/ansible/modules/expect.py
@@ -70,10 +70,10 @@ notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), and so on), you must specify a shell in the command such as
C(/bin/bash -c "/path/to/something | grep else").
- - Case insensitive searches are indicated with a prefix of C(?i).
+ - Case insensitive searches are indicated with a prefix of C((?i)).
- The C(pexpect) library used by this module operates with a search window
of 2000 bytes, and does not use a multiline regex match. To perform a
- start of line bound match, use a pattern like ``(?m)^pattern``
+ start of line bound match, use a pattern like C((?m)^pattern).
- The M(ansible.builtin.expect) module is designed for simple scenarios.
For more complex needs, consider the use of expect code with the M(ansible.builtin.shell)
or M(ansible.builtin.script) modules. (An example is part of the M(ansible.builtin.shell) module documentation).
diff --git a/lib/ansible/modules/fetch.py b/lib/ansible/modules/fetch.py
index 66726e3ae39..a5edb767df0 100644
--- a/lib/ansible/modules/fetch.py
+++ b/lib/ansible/modules/fetch.py
@@ -28,8 +28,8 @@ options:
dest:
description:
- A directory to save the file into.
- - For example, if the O(dest) directory is C(/backup) a O(src) file named C(/etc/profile) on host
- C(host.example.com), would be saved into C(/backup/host.example.com/etc/profile).
+ - For example, if O(dest=/backup), then O(src=/etc/profile) on host
+ C(host.example.com), would save the file into C(/backup/host.example.com/etc/profile).
The host name is based on the inventory name.
required: yes
fail_on_missing:
diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py
index bab8deb5488..975ba5fda90 100644
--- a/lib/ansible/modules/file.py
+++ b/lib/ansible/modules/file.py
@@ -63,9 +63,9 @@ options:
force:
description:
- >
- Force the creation of the symlinks in two cases: the source file does
+ Force the creation of the links in two cases: if the link type is symbolic and the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
- O(path) file and create a symlink to the O(src) file in place of it).
+ O(path) file and create a link to the O(src) file in place of it).
type: bool
default: no
follow:
@@ -73,7 +73,7 @@ options:
- This flag indicates that filesystem links, if they exist, should be followed.
- O(follow=yes) and O(state=link) can modify O(src) when combined with parameters such as O(mode).
- Previous to Ansible 2.5, this was V(false) by default.
- - While creating a symlink with a non-existent destination, set O(follow) to V(false) to avoid a warning message related to permission issues.
+ - While creating a symlink with a non-existent destination, set O(follow=false) to avoid a warning message related to permission issues.
The warning message is added to notify the user that we can not set permissions to the non-existent destination.
type: bool
default: yes
@@ -877,6 +877,8 @@ def ensure_hardlink(path, src, follow, force, timestamps):
'path': path})
else:
try:
+ if follow and os.path.islink(b_src):
+ b_src = os.readlink(b_src)
os.link(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
diff --git a/lib/ansible/modules/find.py b/lib/ansible/modules/find.py
index 5e8e36a2327..a516b354bc9 100644
--- a/lib/ansible/modules/find.py
+++ b/lib/ansible/modules/find.py
@@ -58,8 +58,8 @@ options:
contains:
description:
- A regular expression or pattern which should be matched against the file content.
- - If O(read_whole_file) is V(false) it matches against the beginning of the line (uses
- V(re.match(\))). If O(read_whole_file) is V(true), it searches anywhere for that pattern
+ - If O(read_whole_file=false) it matches against the beginning of the line (uses
+ V(re.match(\))). If O(read_whole_file=true), it searches anywhere for that pattern
(uses V(re.search(\))).
- Works only when O(file_type) is V(file).
type: str
@@ -75,14 +75,15 @@ options:
paths:
description:
- List of paths of directories to search. All paths must be fully qualified.
+ - From ansible-core 2.18 and onwards, the data type has changed from C(str) to C(path).
type: list
required: true
aliases: [ name, path ]
- elements: str
+ elements: path
file_type:
description:
- Type of file to select.
- - The 'link' and 'any' choices were added in Ansible 2.3.
+ - The V(link) and V(any) choices were added in Ansible 2.3.
type: str
choices: [ any, directory, file, link ]
default: file
@@ -116,7 +117,7 @@ options:
restricted to modes that can be applied using the python
C(os.chmod) function.
- The mode can be provided as an octal such as V("0644") or
- as symbolic such as V(u=rw,g=r,o=r)
+ as symbolic such as V(u=rw,g=r,o=r).
type: raw
version_added: '2.16'
exact_mode:
@@ -145,15 +146,23 @@ options:
depth:
description:
- Set the maximum number of levels to descend into.
- - Setting recurse to V(false) will override this value, which is effectively depth 1.
+ - Setting O(recurse=false) will override this value, which is effectively depth 1.
- Default is unlimited depth.
type: int
version_added: "2.6"
encoding:
description:
- - When doing a C(contains) search, determine the encoding of the files to be searched.
+ - When doing a O(contains) search, determine the encoding of the files to be searched.
type: str
version_added: "2.17"
+ limit:
+ description:
+ - Limit the maximum number of matching paths returned. After finding this many, the find action will stop looking.
+ - Matches are made from the top, down (i.e. shallowest directory first).
+ - If not set, or set to v(null), it will do unlimited matches.
+ - Default is unlimited matches.
+ type: int
+ version_added: "2.18"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
@@ -227,6 +236,16 @@ EXAMPLES = r'''
- '^_[0-9]{2,4}_.*.log$'
- '^[a-z]{1,5}_.*log$'
+- name: Find file containing "wally" without necessarily reading all files
+ ansible.builtin.find:
+ paths: /var/log
+ file_type: file
+ contains: wally
+ read_whole_file: true
+ patterns: "^.*\\.log$"
+ use_regex: true
+ recurse: true
+ limit: 1
'''
RETURN = r'''
@@ -450,7 +469,7 @@ def statinfo(st):
def main():
module = AnsibleModule(
argument_spec=dict(
- paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'),
+ paths=dict(type='list', required=True, aliases=['name', 'path'], elements='path'),
patterns=dict(type='list', default=[], aliases=['pattern'], elements='str'),
excludes=dict(type='list', aliases=['exclude'], elements='str'),
contains=dict(type='str'),
@@ -467,7 +486,8 @@ def main():
depth=dict(type='int'),
mode=dict(type='raw'),
exact_mode=dict(type='bool', default=True),
- encoding=dict(type='str')
+ encoding=dict(type='str'),
+ limit=dict(type='int')
),
supports_check_mode=True,
)
@@ -520,17 +540,20 @@ def main():
else:
module.fail_json(size=params['size'], msg="failed to process size")
+ if params['limit'] is not None and params['limit'] <= 0:
+ module.fail_json(msg="limit cannot be %d (use None for unlimited)" % params['limit'])
+
now = time.time()
msg = 'All paths examined'
looked = 0
has_warnings = False
for npath in params['paths']:
- npath = os.path.expanduser(os.path.expandvars(npath))
try:
if not os.path.isdir(npath):
raise Exception("'%s' is not a directory" % to_native(npath))
- for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow']):
+ # Setting `topdown=True` to explicitly guarantee matches are made from the shallowest directory first
+ for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow'], topdown=True):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
@@ -596,7 +619,12 @@ def main():
r.update(statinfo(st))
filelist.append(r)
- if not params['recurse']:
+ if len(filelist) == params["limit"]:
+ # Breaks out of directory files loop only
+ msg = "Limit of matches reached"
+ break
+
+ if not params['recurse'] or len(filelist) == params["limit"]:
break
except Exception as e:
skipped[npath] = to_text(e)
diff --git a/lib/ansible/modules/gather_facts.py b/lib/ansible/modules/gather_facts.py
index 561275f2844..85fbe873326 100644
--- a/lib/ansible/modules/gather_facts.py
+++ b/lib/ansible/modules/gather_facts.py
@@ -27,6 +27,8 @@ options:
- By default it will be true if more than one fact module is used.
- For low cost/delay fact modules parallelism overhead might end up meaning the whole process takes longer.
Test your specific case to see if it is a speed improvement or not.
+ - The C(ansible_facts_parallel) variable can be used to set this option,
+ overriding the default, but not the direct assignment of the option in the task.
type: bool
attributes:
action:
@@ -49,8 +51,8 @@ attributes:
notes:
- This is mostly a wrapper around other fact gathering modules.
- Options passed into this action must be supported by all the underlying fact modules configured.
- - If using C(gather_timeout) and parallel execution, it will limit the total execution time of
- modules that do not accept C(gather_timeout) themselves.
+ - If using O(ignore:gather_timeout) and parallel execution, it will limit the total execution time of
+ modules that do not accept O(ignore:gather_timeout) themselves.
- Facts returned by each module will be merged, conflicts will favor 'last merged'.
Order is not guaranteed, when doing parallel gathering on multiple modules.
author:
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
index 920b986b580..959998c9591 100644
--- a/lib/ansible/modules/get_url.py
+++ b/lib/ansible/modules/get_url.py
@@ -13,7 +13,7 @@ short_description: Downloads files from HTTP, HTTPS, or FTP to node
description:
- Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
server I(must) have direct access to the remote resource.
- - By default, if an environment variable C(_proxy) is set on
+ - By default, if an environment variable E(_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
(see R(setting the environment,playbooks_environment)),
@@ -27,23 +27,23 @@ version_added: '0.6'
options:
ciphers:
description:
- - SSL/TLS Ciphers to use for the request
- - 'When a list is provided, all ciphers are joined in order with V(:)'
+ - SSL/TLS Ciphers to use for the request.
+ - 'When a list is provided, all ciphers are joined in order with C(:).'
- See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
for more details.
- - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions.
type: list
elements: str
version_added: '2.14'
decompress:
description:
- - Whether to attempt to decompress gzip content-encoded responses
+ - Whether to attempt to decompress gzip content-encoded responses.
type: bool
default: true
version_added: '2.14'
url:
description:
- - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ - HTTP, HTTPS, or FTP URL in the form C((http|https|ftp)://[user[:pass]]@host.domain[:port]/path).
type: str
required: true
dest:
@@ -60,9 +60,9 @@ options:
tmp_dest:
description:
- Absolute path of where temporary file is downloaded to.
- - When run on Ansible 2.5 or greater, path defaults to ansible's remote_tmp setting
+ - When run on Ansible 2.5 or greater, path defaults to ansible's C(remote_tmp) setting.
- When run on Ansible prior to 2.5, it defaults to E(TMPDIR), E(TEMP) or E(TMP) env variables or a platform specific value.
- - U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir)
+ - U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir).
type: path
version_added: '2.1'
force:
@@ -87,18 +87,20 @@ options:
- 'If a checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
- Format: :, e.g. checksum="sha256:D98291AC[...]B6DC7B97",
- checksum="sha256:http://example.com/path/sha256sum.txt"'
+ Format: :, for example C(checksum="sha256:D98291AC[...]B6DC7B97",
+ C(checksum="sha256:http://example.com/path/sha256sum.txt").'
- If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions.
- - The Python ``hashlib`` module is responsible for providing the available algorithms.
+ - The Python C(hashlib) module is responsible for providing the available algorithms.
The choices vary based on Python version and OpenSSL version.
- - On systems running in FIPS compliant mode, the ``md5`` algorithm may be unavailable.
+ - On systems running in FIPS compliant mode, the C(md5) algorithm may be unavailable.
- Additionally, if a checksum is passed to this parameter, and the file exist under
the O(dest) location, the C(destination_checksum) would be calculated, and if
checksum equals C(destination_checksum), the file download would be skipped
- (unless O(force) is V(true)). If the checksum does not equal C(destination_checksum),
+ (unless O(force=true)). If the checksum does not equal C(destination_checksum),
the destination file is deleted.
+ - If the checksum URL requires username and password, O(url_username) and O(url_password) are used
+ to download the checksum file.
type: str
default: ''
version_added: "2.0"
@@ -185,16 +187,16 @@ options:
authentication.
- Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
- Credentials for GSSAPI can be specified with O(url_username)/O(url_password) or with the GSSAPI env var
- C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ E(KRB5CCNAME) that specified a custom Kerberos credential cache.
- NTLM authentication is I(not) supported even if the GSSAPI mech for NTLM has been installed.
type: bool
default: no
version_added: '2.11'
use_netrc:
description:
- - Determining whether to use credentials from ``~/.netrc`` file
- - By default .netrc is used with Basic authentication headers
- - When set to False, .netrc credentials are ignored
+ - Determining whether to use credentials from C(~/.netrc) file.
+ - By default C(.netrc) is used with Basic authentication headers.
+ - When V(false), C(.netrc) credentials are ignored.
type: bool
default: true
version_added: '2.14'
@@ -661,6 +663,16 @@ def main():
result['checksum_src'] != result['checksum_dest'])
module.exit_json(msg=info.get('msg', ''), **result)
+ # If a checksum was provided, ensure that the temporary file matches this checksum
+ # before moving it to the destination.
+ if checksum != '':
+ tmpsrc_checksum = module.digest_from_file(tmpsrc, algorithm)
+
+ if checksum != tmpsrc_checksum:
+ os.remove(tmpsrc)
+ module.fail_json(msg=f"The checksum for {tmpsrc} did not match {checksum}; it was {tmpsrc_checksum}.", **result)
+
+ # Copy temporary file to destination if necessary
backup_file = None
if result['checksum_src'] != result['checksum_dest']:
try:
@@ -679,13 +691,6 @@ def main():
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
- if checksum != '':
- destination_checksum = module.digest_from_file(dest, algorithm)
-
- if checksum != destination_checksum:
- os.remove(dest)
- module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum), **result)
-
# allow file attribute changes
file_args = module.load_file_common_arguments(module.params, path=dest)
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
diff --git a/lib/ansible/modules/git.py b/lib/ansible/modules/git.py
index 26d4c59f924..c9ccff4bfc8 100644
--- a/lib/ansible/modules/git.py
+++ b/lib/ansible/modules/git.py
@@ -42,19 +42,19 @@ options:
default: "HEAD"
accept_hostkey:
description:
- - Will ensure or not that "-o StrictHostKeyChecking=no" is present as an ssh option.
+ - Will ensure or not that C(-o StrictHostKeyChecking=no) is present as an ssh option.
- Be aware that this disables a protection against MITM attacks.
- - Those using OpenSSH >= 7.5 might want to set O(ssh_opts) to V(StrictHostKeyChecking=accept-new)
+ - Those using OpenSSH >= 7.5 might want to use O(accept_newhostkey) or set O(ssh_opts) to V(StrictHostKeyChecking=accept-new)
instead, it does not remove the MITM issue but it does restrict it to the first attempt.
type: bool
default: 'no'
version_added: "1.5"
accept_newhostkey:
description:
- - As of OpenSSH 7.5, "-o StrictHostKeyChecking=accept-new" can be
+ - As of OpenSSH 7.5, C(-o StrictHostKeyChecking=accept-new) can be
used which is safer and will only accepts host keys which are
- not present or are the same. if V(true), ensure that
- "-o StrictHostKeyChecking=accept-new" is present as an ssh option.
+ not present or are the same. If V(true), ensure that
+ C(-o StrictHostKeyChecking=accept-new) is present as an ssh option.
type: bool
default: 'no'
version_added: "2.12"
@@ -65,21 +65,21 @@ options:
- For older versions it appends E(GIT_SSH_OPTS) (specific to this module) to the
variables above or via a wrapper script.
- Other options can add to this list, like O(key_file) and O(accept_hostkey).
- - An example value could be "-o StrictHostKeyChecking=no" (although this particular
+ - An example value could be C(-o StrictHostKeyChecking=no) (although this particular
option is better set by O(accept_hostkey)).
- - The module ensures that 'BatchMode=yes' is always present to avoid prompts.
+ - The module ensures that C(BatchMode=yes) is always present to avoid prompts.
type: str
version_added: "1.5"
key_file:
description:
- Specify an optional private key file path, on the target host, to use for the checkout.
- - This ensures 'IdentitiesOnly=yes' is present in O(ssh_opts).
+ - This ensures C(IdentitiesOnly=yes) is present in O(ssh_opts).
type: path
version_added: "1.5"
reference:
description:
- - Reference repository (see "git clone --reference ...").
+ - Reference repository (see C(git clone --reference ...)).
type: str
version_added: "1.4"
remote:
@@ -165,7 +165,7 @@ options:
track_submodules:
description:
- If V(true), submodules will track the latest commit on their
- master branch (or other branch specified in .gitmodules). If
+ master branch (or other branch specified in C(.gitmodules)). If
V(false), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the C(--remote) flag
to git submodule update.
@@ -235,63 +235,63 @@ notes:
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
- the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
+ the git module, with the following command: C(ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts)."
'''
EXAMPLES = '''
- name: Git checkout
ansible.builtin.git:
- repo: 'https://foosball.example.org/path/to/repo.git'
- dest: /srv/checkout
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
version: release-0.22
- name: Read-write git checkout from github
ansible.builtin.git:
- repo: git@github.com:mylogin/hello.git
- dest: /home/mylogin/hello
+ repo: git@github.com:ansible/ansible.git
+ dest: /tmp/checkout
- name: Just ensuring the repo checkout exists
ansible.builtin.git:
- repo: 'https://foosball.example.org/path/to/repo.git'
- dest: /srv/checkout
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
update: no
- name: Just get information about the repository whether or not it has already been cloned locally
ansible.builtin.git:
- repo: 'https://foosball.example.org/path/to/repo.git'
- dest: /srv/checkout
+ repo: git@github.com:ansible/ansible.git
+ dest: /tmp/checkout
clone: no
update: no
- name: Checkout a github repo and use refspec to fetch all pull requests
ansible.builtin.git:
- repo: https://github.com/ansible/ansible-examples.git
- dest: /src/ansible-examples
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
refspec: '+refs/pull/*:refs/heads/*'
- name: Create git archive from repo
ansible.builtin.git:
- repo: https://github.com/ansible/ansible-examples.git
- dest: /src/ansible-examples
- archive: /tmp/ansible-examples.zip
+ repo: git@github.com:ansible/ansible.git
+ dest: /tmp/checkout
+ archive: /tmp/ansible.zip
- name: Clone a repo with separate git directory
ansible.builtin.git:
- repo: https://github.com/ansible/ansible-examples.git
- dest: /src/ansible-examples
- separate_git_dir: /src/ansible-examples.git
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
+ separate_git_dir: /tmp/repo
- name: Example clone of a single branch
ansible.builtin.git:
- repo: https://github.com/ansible/ansible-examples.git
- dest: /src/ansible-examples
+ repo: git@github.com:ansible/ansible.git
+ dest: /tmp/checkout
single_branch: yes
version: master
- name: Avoid hanging when http(s) password is missing
ansible.builtin.git:
- repo: https://github.com/ansible/could-be-a-private-repo
- dest: /src/from-private-repo
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
environment:
GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password
# or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password
@@ -468,7 +468,7 @@ def write_ssh_wrapper(module):
def set_git_ssh_env(key_file, ssh_opts, git_version, module):
'''
use environment variables to configure git's ssh execution,
- which varies by version but this functino should handle all.
+ which varies by version but this function should handle all.
'''
# initialise to existing ssh opts and/or append user provided
@@ -500,7 +500,7 @@ def set_git_ssh_env(key_file, ssh_opts, git_version, module):
# older than 2.3 does not know how to use git_ssh_command,
# so we force it into get_ssh var
# https://github.com/gitster/git/commit/09d60d785c68c8fa65094ecbe46fbc2a38d0fc1f
- if git_version < LooseVersion('2.3.0'):
+ if git_version is not None and git_version < LooseVersion('2.3.0'):
# for use in wrapper
os.environ["GIT_SSH_OPTS"] = ssh_opts
@@ -913,7 +913,7 @@ def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, g
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
# ensure all tags are fetched
- if git_version_used >= LooseVersion('1.9'):
+ if git_version_used is not None and git_version_used >= LooseVersion('1.9'):
fetch_cmd.append('--tags')
else:
# old git versions have a bug in --tags that prevents updating existing tags
@@ -1308,7 +1308,7 @@ def main():
# GIT_SSH= as an environment variable, might create sh wrapper script for older versions.
set_git_ssh_env(key_file, ssh_opts, git_version_used, module)
- if depth is not None and git_version_used < LooseVersion('1.9.1'):
+ if depth is not None and git_version_used is not None and git_version_used < LooseVersion('1.9.1'):
module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
diff --git a/lib/ansible/modules/group.py b/lib/ansible/modules/group.py
index 100d211584c..716e7e0a515 100644
--- a/lib/ansible/modules/group.py
+++ b/lib/ansible/modules/group.py
@@ -37,7 +37,7 @@ options:
force:
description:
- Whether to delete a group even if it is the primary group of a user.
- - Only applicable on platforms which implement a --force flag on the group deletion command.
+ - Only applicable on platforms which implement a C(--force) flag on the group deletion command.
type: bool
default: false
version_added: "2.15"
@@ -62,6 +62,22 @@ options:
type: bool
default: no
version_added: "2.8"
+ gid_min:
+ description:
+ - Sets the GID_MIN value for group creation.
+ - Overwrites /etc/login.defs default value.
+ - Currently supported on Linux. Does nothing when used with other platforms.
+ - Requires O(local) is omitted or V(False).
+ type: int
+ version_added: "2.18"
+ gid_max:
+ description:
+ - Sets the GID_MAX value for group creation.
+ - Overwrites /etc/login.defs default value.
+ - Currently supported on Linux. Does nothing when used with other platforms.
+ - Requires O(local) is omitted or V(False).
+ type: int
+ version_added: "2.18"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
@@ -151,6 +167,14 @@ class Group(object):
self.system = module.params['system']
self.local = module.params['local']
self.non_unique = module.params['non_unique']
+ self.gid_min = module.params['gid_min']
+ self.gid_max = module.params['gid_max']
+
+ if self.local:
+ if self.gid_min is not None:
+ module.fail_json(msg="'gid_min' can not be used with 'local'")
+ if self.gid_max is not None:
+ module.fail_json(msg="'gid_max' can not be used with 'local'")
def execute_command(self, cmd):
return self.module.run_command(cmd)
@@ -184,6 +208,12 @@ class Group(object):
cmd.append('-o')
elif key == 'system' and kwargs[key] is True:
cmd.append('-r')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -292,6 +322,12 @@ class SunOS(Group):
cmd.append(str(kwargs[key]))
if self.non_unique:
cmd.append('-o')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -323,6 +359,12 @@ class AIX(Group):
cmd.append('id=' + str(kwargs[key]))
elif key == 'system' and kwargs[key] is True:
cmd.append('-a')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -368,6 +410,12 @@ class FreeBsdGroup(Group):
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
return self.execute_command(cmd)
def group_mod(self, **kwargs):
@@ -492,6 +540,12 @@ class OpenBsdGroup(Group):
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -538,6 +592,12 @@ class NetBsdGroup(Group):
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -578,6 +638,14 @@ class BusyBoxGroup(Group):
if self.system:
cmd.append('-S')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
+
cmd.append(self.name)
return self.execute_command(cmd)
@@ -626,6 +694,8 @@ def main():
system=dict(type='bool', default=False),
local=dict(type='bool', default=False),
non_unique=dict(type='bool', default=False),
+ gid_min=dict(type='int'),
+ gid_max=dict(type='int'),
),
supports_check_mode=True,
required_if=[
diff --git a/lib/ansible/modules/hostname.py b/lib/ansible/modules/hostname.py
index 1f0bfa03c23..4b2ee682f2e 100644
--- a/lib/ansible/modules/hostname.py
+++ b/lib/ansible/modules/hostname.py
@@ -36,7 +36,7 @@ options:
description:
- Which strategy to use to update the hostname.
- If not set we try to autodetect, but this can be problematic, particularly with containers as they can present misleading information.
- - Note that 'systemd' should be specified for RHEL/EL/CentOS 7+. Older distributions should use 'redhat'.
+ - Note that V(systemd) should be specified for RHEL/EL/CentOS 7+. Older distributions should use V(redhat).
choices: ['alpine', 'debian', 'freebsd', 'generic', 'macos', 'macosx', 'darwin', 'openbsd', 'openrc', 'redhat', 'sles', 'solaris', 'systemd']
type: str
version_added: '2.9'
@@ -516,7 +516,7 @@ class DarwinStrategy(BaseStrategy):
However, macOS also has LocalHostName and ComputerName settings.
LocalHostName controls the Bonjour/ZeroConf name, used by services
like AirDrop. This class implements a method, _scrub_hostname(), that mimics
- the transformations macOS makes on hostnames when enterened in the Sharing
+ the transformations macOS makes on hostnames when entered in the Sharing
preference pane. It replaces spaces with dashes and removes all special
characters.
@@ -886,8 +886,6 @@ def main():
if name != current_hostname:
name_before = current_hostname
- elif name != permanent_hostname:
- name_before = permanent_hostname
else:
name_before = permanent_hostname
diff --git a/lib/ansible/modules/include_vars.py b/lib/ansible/modules/include_vars.py
index 99e77cb5faa..9238682dead 100644
--- a/lib/ansible/modules/include_vars.py
+++ b/lib/ansible/modules/include_vars.py
@@ -21,20 +21,20 @@ options:
file:
description:
- The file name from which variables should be loaded.
- - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
+ - If the path is relative, it will look for the file in C(vars/) subdirectory of a role or relative to playbook.
type: path
version_added: "2.2"
dir:
description:
- The directory name from which the variables should be loaded.
- - If the path is relative and the task is inside a role, it will look inside the role's vars/ subdirectory.
+ - If the path is relative and the task is inside a role, it will look inside the role's C(vars/) subdirectory.
- If the path is relative and not inside a role, it will be parsed relative to the playbook.
type: path
version_added: "2.2"
name:
description:
- The name of a variable into which assign the included vars.
- - If omitted (null) they will be made top level vars.
+ - If omitted (V(null)) they will be made top level vars.
type: str
version_added: "2.2"
depth:
@@ -81,8 +81,8 @@ options:
version_added: "2.12"
free-form:
description:
- - This module allows you to specify the 'file' option directly without any other options.
- - There is no 'free-form' option, this is just an indicator, see example below.
+ - This module allows you to specify the O(file) option directly without any other options.
+ - There is no O(ignore:free-form) option, this is just an indicator, see example below.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
diff --git a/lib/ansible/modules/iptables.py b/lib/ansible/modules/iptables.py
index b7fd778df91..fad9b405ae4 100644
--- a/lib/ansible/modules/iptables.py
+++ b/lib/ansible/modules/iptables.py
@@ -39,7 +39,7 @@ options:
description:
- This option specifies the packet matching table on which the command should operate.
- If the kernel is configured with automatic module loading, an attempt will be made
- to load the appropriate module for that table if it is not already there.
+ to load the appropriate module for that table if it is not already there.
type: str
choices: [ filter, nat, mangle, raw, security ]
default: filter
@@ -135,7 +135,7 @@ options:
a specific property.
- The set of matches makes up the condition under which a target is invoked.
- Matches are evaluated first to last if specified as an array and work in short-circuit
- fashion, i.e. if one extension yields false, the evaluation will stop.
+ fashion, in other words if one extension yields false, the evaluation will stop.
type: list
elements: str
default: []
@@ -153,7 +153,7 @@ options:
gateway:
description:
- This specifies the IP address of the host to send the cloned packets.
- - This option is only valid when O(jump) is set to V(TEE).
+ - This option is only valid when O(jump=TEE).
type: str
version_added: "2.8"
log_prefix:
@@ -165,7 +165,7 @@ options:
description:
- Logging level according to the syslogd-defined priorities.
- The value can be strings or numbers from 1-8.
- - This parameter is only applicable if O(jump) is set to V(LOG).
+ - This parameter is only applicable if O(jump=LOG).
type: str
version_added: "2.8"
choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
@@ -242,13 +242,13 @@ options:
type: str
to_destination:
description:
- - This specifies a destination address to use with C(DNAT).
+ - This specifies a destination address to use with O(ctstate=DNAT).
- Without this, the destination address is never altered.
type: str
version_added: "2.1"
to_source:
description:
- - This specifies a source address to use with C(SNAT).
+ - This specifies a source address to use with O(ctstate=SNAT).
- Without this, the source address is never altered.
type: str
version_added: "2.2"
@@ -342,14 +342,14 @@ options:
reject_with:
description:
- 'Specifies the error packet type to return while rejecting. It implies
- "jump: REJECT".'
+ C(jump=REJECT).'
type: str
version_added: "2.1"
icmp_type:
description:
- This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
- command 'iptables -p icmp -h'
+ command C(iptables -p icmp -h).
type: str
version_added: "2.2"
flush:
@@ -387,10 +387,10 @@ options:
version_added: "2.13"
numeric:
description:
- - This parameter controls the running of the list -action of iptables, which is used internally by the module
- - Does not affect the actual functionality. Use this if iptables hang when creating a chain or altering policy
- - If V(true), then iptables skips the DNS-lookup of the IP addresses in a chain when it uses the list -action
- - Listing is used internally for example when setting a policy or creating a chain
+ - This parameter controls the running of the list -action of iptables, which is used internally by the module.
+ - Does not affect the actual functionality. Use this if iptables hang when creating a chain or altering policy.
+ - If V(true), then iptables skips the DNS-lookup of the IP addresses in a chain when it uses the list -action.
+ - Listing is used internally for example when setting a policy or creating a chain.
type: bool
default: false
version_added: "2.15"
@@ -848,6 +848,7 @@ def main():
required_if=[
['jump', 'TEE', ['gateway']],
['jump', 'tee', ['gateway']],
+ ['flush', False, ['chain']],
]
)
args = dict(
@@ -865,10 +866,6 @@ def main():
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
- # Check if chain option is required
- if args['flush'] is False and args['chain'] is None:
- module.fail_json(msg="Either chain or flush parameter must be specified.")
-
if module.params.get('log_prefix', None) or module.params.get('log_level', None):
if module.params['jump'] is None:
module.params['jump'] = 'LOG'
diff --git a/lib/ansible/modules/known_hosts.py b/lib/ansible/modules/known_hosts.py
index 8235258c664..b4155660e1b 100644
--- a/lib/ansible/modules/known_hosts.py
+++ b/lib/ansible/modules/known_hosts.py
@@ -10,7 +10,7 @@ DOCUMENTATION = r'''
module: known_hosts
short_description: Add or remove a host from the C(known_hosts) file
description:
- - The M(ansible.builtin.known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
+ - The M(ansible.builtin.known_hosts) module lets you add or remove host keys from the C(known_hosts) file.
- Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
This is useful if you're going to want to use the M(ansible.builtin.git) module over ssh, for example.
- If you have a very large number of host keys to manage, you will find the M(ansible.builtin.template) module more useful.
@@ -19,7 +19,7 @@ options:
name:
aliases: [ 'host' ]
description:
- - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that ssh-keygen can find it.
+ - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that C(ssh-keygen) can find it.
- Must match with or present in key attribute.
- For custom SSH port, O(name) needs to specify port as well. See example section.
type: str
@@ -49,8 +49,8 @@ options:
version_added: "2.3"
state:
description:
- - V(present) to add the host key.
- - V(absent) to remove it.
+ - V(present) to add host keys.
+ - V(absent) to remove host keys.
choices: [ "absent", "present" ]
default: "present"
type: str
@@ -101,6 +101,7 @@ EXAMPLES = r'''
# state = absent|present (default: present)
import base64
+import copy
import errno
import hashlib
import hmac
@@ -118,6 +119,7 @@ def enforce_state(module, params):
Add or remove key.
"""
+ results = dict(changed=False)
host = params["name"].lower()
key = params.get("key", None)
path = params.get("path")
@@ -140,13 +142,12 @@ def enforce_state(module, params):
found, replace_or_add, found_line = search_for_host_key(module, host, key, path, sshkeygen)
- params['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
+ results['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
# check if we are trying to remove a non matching key,
# in that case return with no change to the host
if state == 'absent' and not found_line and key:
- params['changed'] = False
- return params
+ return results
# We will change state if found==True & state!="present"
# or found==False & state=="present"
@@ -154,15 +155,15 @@ def enforce_state(module, params):
# Alternatively, if replace is true (i.e. key present, and we must change
# it)
if module.check_mode:
- module.exit_json(changed=replace_or_add or (state == "present") != found,
- diff=params['diff'])
+ results['changed'] = replace_or_add or (state == "present") != found
+ module.exit_json(**results)
# Now do the work.
# Only remove whole host if found and no key provided
if found and not key and state == "absent":
module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True)
- params['changed'] = True
+ results['changed'] = True
# Next, add a new (or replacing) entry
if replace_or_add or found != (state == "present"):
@@ -188,9 +189,9 @@ def enforce_state(module, params):
else:
module.atomic_move(outf.name, path)
- params['changed'] = True
+ results['changed'] = True
- return params
+ return results
def sanity_check(module, host, key, sshkeygen):
@@ -364,7 +365,9 @@ def main():
supports_check_mode=True
)
- results = enforce_state(module, module.params)
+ # TODO: deprecate returning everything that was passed in
+ results = copy.copy(module.params)
+ results.update(enforce_state(module, module.params))
module.exit_json(**results)
diff --git a/lib/ansible/modules/lineinfile.py b/lib/ansible/modules/lineinfile.py
index 9e9fdd9b8ef..105fcc86604 100644
--- a/lib/ansible/modules/lineinfile.py
+++ b/lib/ansible/modules/lineinfile.py
@@ -87,13 +87,11 @@ options:
- If specified, the line will be inserted after the last match of specified regular expression.
- If the first match is required, use(firstmatch=yes).
- A special value is available; V(EOF) for inserting the line at the end of the file.
- - If specified regular expression has no matches, EOF will be used instead.
+ - If specified regular expression has no matches or no value is passed, V(EOF) will be used instead.
- If O(insertbefore) is set, default value V(EOF) will be ignored.
- If regular expressions are passed to both O(regexp) and O(insertafter), O(insertafter) is only honored if no match for O(regexp) is found.
- May not be used with O(backrefs) or O(insertbefore).
type: str
- choices: [ EOF, '*regex*' ]
- default: EOF
insertbefore:
description:
- Used with O(state=present).
@@ -104,7 +102,6 @@ options:
- If regular expressions are passed to both O(regexp) and O(insertbefore), O(insertbefore) is only honored if no match for O(regexp) is found.
- May not be used with O(backrefs) or O(insertafter).
type: str
- choices: [ BOF, '*regex*' ]
version_added: "1.1"
create:
description:
diff --git a/lib/ansible/modules/meta.py b/lib/ansible/modules/meta.py
index 0baea37d677..91b3f0403f9 100644
--- a/lib/ansible/modules/meta.py
+++ b/lib/ansible/modules/meta.py
@@ -33,7 +33,12 @@ options:
- V(end_host) (added in Ansible 2.8) is a per-host variation of V(end_play). Causes the play to end for the current host without failing it.
- V(end_batch) (added in Ansible 2.12) causes the current batch (see C(serial)) to end without failing the host(s).
Note that with C(serial=0) or undefined this behaves the same as V(end_play).
- choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch ]
+ - V(end_role) (added in Ansible 2.18) causes the currently executing role to end without failing the host(s).
+ Effectively all tasks from within a role after V(end_role) is executed are ignored. Since handlers live in a global,
+ play scope, all handlers added via the role are unaffected and are still executed if notified. It is an error
+ to call V(end_role) from outside of a role or from a handler. Note that V(end_role) does not have an effect to
+ the parent roles or roles that depend (via dependencies in meta/main.yml) on a role executing V(end_role).
+ choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch, end_role ]
required: true
extends_documentation_fragment:
- action_common_attributes
diff --git a/lib/ansible/modules/mount_facts.py b/lib/ansible/modules/mount_facts.py
new file mode 100644
index 00000000000..5982ae580ae
--- /dev/null
+++ b/lib/ansible/modules/mount_facts.py
@@ -0,0 +1,651 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2024 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import annotations
+
+
+DOCUMENTATION = """
+---
+module: mount_facts
+version_added: 2.18
+short_description: Retrieve mount information.
+description:
+ - Retrieve information about mounts from preferred sources and filter the results based on the filesystem type and device.
+options:
+ devices:
+ description: A list of fnmatch patterns to filter mounts by the special device or remote file system.
+ default: ~
+ type: list
+ elements: str
+ fstypes:
+ description: A list of fnmatch patterns to filter mounts by the type of the file system.
+ default: ~
+ type: list
+ elements: str
+ sources:
+ description:
+ - A list of sources used to determine the mounts. Missing file sources (or empty files) are skipped. Repeat sources, including symlinks, are skipped.
+ - The C(mount_points) return value contains the first definition found for a mount point.
+ - Additional mounts to the same mount point are available from C(aggregate_mounts) (if enabled).
+ - By default, mounts are retrieved from all of the standard locations, which have the predefined aliases V(all)/V(static)/V(dynamic).
+ - V(all) contains V(dynamic) and V(static).
+ - V(dynamic) contains V(/etc/mtab), V(/proc/mounts), V(/etc/mnttab), and the value of O(mount_binary) if it is not None.
+ This allows platforms like BSD or AIX, which don't have an equivalent to V(/proc/mounts), to collect the current mounts by default.
+ See the O(mount_binary) option to disable the fall back or configure a different executable.
+ - V(static) contains V(/etc/fstab), V(/etc/vfstab), and V(/etc/filesystems).
+ Note that V(/etc/filesystems) is specific to AIX. The Linux file by this name has a different format/purpose and is ignored.
+ - The value of O(mount_binary) can be configured as a source, which will cause it to always execute.
+ Depending on the other sources configured, this could be inefficient/redundant.
+ For example, if V(/proc/mounts) and V(mount) are listed as O(sources), Linux hosts will retrieve the same mounts twice.
+ default: ~
+ type: list
+ elements: str
+ mount_binary:
+ description:
+ - The O(mount_binary) is used if O(sources) contain the value "mount", or if O(sources) contains a dynamic
+ source, and none were found (as can be expected on BSD or AIX hosts).
+ - Set to V(null) to stop after no dynamic file source is found instead.
+ type: raw
+ default: mount
+ timeout:
+ description:
+ - This is the maximum number of seconds to wait for each mount to complete. When this is V(null), wait indefinitely.
+ - Configure in conjunction with O(on_timeout) to skip unresponsive mounts.
+ - This timeout also applies to the O(mount_binary) command to list mounts.
+ - If the module is configured to run during the play's fact gathering stage, set a timeout using module_defaults to prevent a hang (see example).
+ type: float
+ on_timeout:
+ description:
+ - The action to take when gathering mount information exceeds O(timeout).
+ type: str
+ default: error
+ choices:
+ - error
+ - warn
+ - ignore
+ include_aggregate_mounts:
+ description:
+ - Whether or not the module should return the C(aggregate_mounts) list in C(ansible_facts).
+ - When this is V(null), a warning will be emitted if multiple mounts for the same mount point are found.
+ default: ~
+ type: bool
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+author:
+ - Ansible Core Team
+ - Sloane Hertel (@s-hertel)
+"""
+
+EXAMPLES = """
+- name: Get non-local devices
+ mount_facts:
+ devices: "[!/]*"
+
+- name: Get FUSE subtype mounts
+ mount_facts:
+ fstypes:
+ - "fuse.*"
+
+- name: Get NFS mounts during gather_facts with timeout
+ hosts: all
+ gather_facts: true
+ vars:
+ ansible_facts_modules:
+ - ansible.builtin.mount_facts
+ module_default:
+ ansible.builtin.mount_facts:
+ timeout: 10
+ fstypes:
+ - nfs
+ - nfs4
+
+- name: Get mounts from a non-default location
+ mount_facts:
+ sources:
+ - /usr/etc/fstab
+
+- name: Get mounts from the mount binary
+ mount_facts:
+ sources:
+ - mount
+ mount_binary: /sbin/mount
+"""
+
+RETURN = """
+ansible_facts:
+ description:
+ - An ansible_facts dictionary containing a dictionary of C(mount_points) and list of C(aggregate_mounts) when enabled.
+ - Each key in C(mount_points) is a mount point, and the value contains mount information (similar to C(ansible_facts["mounts"])).
+ Each value also contains the key C(ansible_context), with details about the source and line(s) corresponding to the parsed mount point.
+ - When C(aggregate_mounts) are included, the containing dictionaries are the same format as the C(mount_point) values.
+ returned: on success
+ type: dict
+ sample:
+ mount_points:
+ /proc/sys/fs/binfmt_misc:
+ ansible_context:
+ source: /proc/mounts
+ source_data: "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850 0 0"
+ block_available: 0
+ block_size: 4096
+ block_total: 0
+ block_used: 0
+ device: "systemd-1"
+ dump: 0
+ fstype: "autofs"
+ inode_available: 0
+ inode_total: 0
+ inode_used: 0
+ mount: "/proc/sys/fs/binfmt_misc"
+ options: "rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850"
+ passno: 0
+ size_available: 0
+ size_total: 0
+ uuid: null
+ aggregate_mounts:
+ - ansible_context:
+ source: /proc/mounts
+ source_data: "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850 0 0"
+ block_available: 0
+ block_size: 4096
+ block_total: 0
+ block_used: 0
+ device: "systemd-1"
+ dump: 0
+ fstype: "autofs"
+ inode_available: 0
+ inode_total: 0
+ inode_used: 0
+ mount: "/proc/sys/fs/binfmt_misc"
+ options: "rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850"
+ passno: 0
+ size_available: 0
+ size_total: 0
+ uuid: null
+ - ansible_context:
+ source: /proc/mounts
+ source_data: "binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0"
+ block_available: 0
+ block_size: 4096
+ block_total: 0
+ block_used: 0
+ device: binfmt_misc
+ dump: 0
+ fstype: binfmt_misc
+ inode_available: 0
+ inode_total: 0
+ inode_used: 0
+ mount: "/proc/sys/fs/binfmt_misc"
+ options: "rw,nosuid,nodev,noexec,relatime"
+ passno: 0
+ size_available: 0
+ size_total: 0
+ uuid: null
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts import timeout as _timeout
+from ansible.module_utils.facts.utils import get_mount_size, get_file_content
+
+from contextlib import suppress
+from dataclasses import astuple, dataclass
+from fnmatch import fnmatch
+
+import codecs
+import datetime
+import functools
+import os
+import re
+import subprocess
+import typing as t
+
+STATIC_SOURCES = ["/etc/fstab", "/etc/vfstab", "/etc/filesystems"]
+DYNAMIC_SOURCES = ["/etc/mtab", "/proc/mounts", "/etc/mnttab"]
+
+# AIX and BSD don't have a file-based dynamic source, so the module also supports running a mount binary to collect these.
+# Pattern for Linux, including OpenBSD and NetBSD
+LINUX_MOUNT_RE = re.compile(r"^(?P\S+) on (?P\S+) type (?P\S+) \((?P.+)\)$")
+# Pattern for other BSD including FreeBSD, DragonFlyBSD, and MacOS
+BSD_MOUNT_RE = re.compile(r"^(?P\S+) on (?P\S+) \((?P.+)\)$")
+# Pattern for AIX, example in https://www.ibm.com/docs/en/aix/7.2?topic=m-mount-command
+AIX_MOUNT_RE = re.compile(r"^(?P\S*)\s+(?P\S+)\s+(?P\S+)\s+(?P\S+)\s+(?P