diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml
index 50f92313447..d2078a43cf0 100644
--- a/.azure-pipelines/azure-pipelines.yml
+++ b/.azure-pipelines/azure-pipelines.yml
@@ -31,7 +31,7 @@ variables:
- name: fetchDepth
value: 500
- name: defaultContainer
- value: quay.io/ansible/azure-pipelines-test-container:4.0.1
+ value: quay.io/ansible/azure-pipelines-test-container:6.0.0
pool: Standard
@@ -54,12 +54,12 @@ stages:
nameFormat: Python {0}
testFormat: units/{0}
targets:
- - test: 3.7
- test: 3.8
- test: 3.9
- test: '3.10'
- test: 3.11
- test: 3.12
+ - test: 3.13
- stage: Windows
dependsOn: []
jobs:
@@ -68,35 +68,46 @@ stages:
nameFormat: Server {0}
testFormat: windows/{0}/1
targets:
- - test: 2016
- - test: 2019
- - test: 2022
+ - name: 2016 WinRM HTTP
+ test: 2016/winrm/http
+ - name: 2019 WinRM HTTPS
+ test: 2019/winrm/https
+ - name: 2022 WinRM HTTPS
+ test: 2022/winrm/https
+ - name: 2022 PSRP HTTP
+ test: 2022/psrp/http
+ - name: 2022 SSH Key
+ test: 2022/ssh/key
- stage: Remote
dependsOn: []
jobs:
- template: templates/matrix.yml # context/target
parameters:
targets:
- - name: macOS 13.2
- test: macos/13.2
- - name: RHEL 9.3 py39
- test: rhel/9.3@3.9
- - name: RHEL 9.3 py311
- test: rhel/9.3@3.11
- - name: FreeBSD 13.2
- test: freebsd/13.2
+ - name: macOS 14.3
+ test: macos/14.3
+ - name: RHEL 9.4 py39
+ test: rhel/9.4@3.9
+ - name: RHEL 9.4 py312
+ test: rhel/9.4@3.12
+ - name: FreeBSD 13.3
+ test: freebsd/13.3
+ - name: FreeBSD 14.1
+ test: freebsd/14.1
groups:
- 1
- 2
- template: templates/matrix.yml # context/controller
parameters:
targets:
- - name: macOS 13.2
- test: macos/13.2
- - name: RHEL 9.3
- test: rhel/9.3
- - name: FreeBSD 13.2
- test: freebsd/13.2
+ - name: macOS 14.3
+ test: macos/14.3
+ - name: RHEL 9.4
+ test: rhel/9.4
+ - name: FreeBSD 13.3
+ test: freebsd/13.3
+ - name: FreeBSD 14.1
+ test: freebsd/14.1
groups:
- 3
- 4
@@ -104,44 +115,44 @@ stages:
- template: templates/matrix.yml # context/controller (ansible-test container management)
parameters:
targets:
- - name: Alpine 3.18
- test: alpine/3.18
- - name: Fedora 39
- test: fedora/39
- - name: RHEL 9.3
- test: rhel/9.3
- - name: Ubuntu 22.04
- test: ubuntu/22.04
+ - name: Alpine 3.20
+ test: alpine/3.20
+ - name: Fedora 40
+ test: fedora/40
+ - name: RHEL 9.4
+ test: rhel/9.4
+ - name: Ubuntu 24.04
+ test: ubuntu/24.04
groups:
- 6
- stage: Docker
dependsOn: []
jobs:
- - template: templates/matrix.yml
+ - template: templates/matrix.yml # context/target
parameters:
testFormat: linux/{0}
targets:
- - name: Alpine 3
- test: alpine3
- - name: Fedora 39
- test: fedora39
- - name: Ubuntu 20.04
- test: ubuntu2004
+ - name: Alpine 3.20
+ test: alpine320
+ - name: Fedora 40
+ test: fedora40
- name: Ubuntu 22.04
test: ubuntu2204
+ - name: Ubuntu 24.04
+ test: ubuntu2404
groups:
- 1
- 2
- - template: templates/matrix.yml
+ - template: templates/matrix.yml # context/controller
parameters:
testFormat: linux/{0}
targets:
- - name: Alpine 3
- test: alpine3
- - name: Fedora 39
- test: fedora39
- - name: Ubuntu 22.04
- test: ubuntu2204
+ - name: Alpine 3.20
+ test: alpine320
+ - name: Fedora 40
+ test: fedora40
+ - name: Ubuntu 24.04
+ test: ubuntu2404
groups:
- 3
- 4
@@ -154,9 +165,9 @@ stages:
nameFormat: Python {0}
testFormat: galaxy/{0}/1
targets:
- - test: '3.10'
- test: 3.11
- test: 3.12
+ - test: 3.13
- stage: Generic
dependsOn: []
jobs:
@@ -165,9 +176,9 @@ stages:
nameFormat: Python {0}
testFormat: generic/{0}/1
targets:
- - test: '3.10'
- test: 3.11
- test: 3.12
+ - test: 3.13
- stage: Incidental_Windows
displayName: Incidental Windows
dependsOn: []
@@ -177,9 +188,16 @@ stages:
nameFormat: Server {0}
testFormat: i/windows/{0}
targets:
- - test: 2016
- - test: 2019
- - test: 2022
+ - name: 2016 WinRM HTTP
+ test: 2016/winrm/http
+ - name: 2019 WinRM HTTPS
+ test: 2019/winrm/https
+ - name: 2022 WinRM HTTPS
+ test: 2022/winrm/https
+ - name: 2022 PSRP HTTP
+ test: 2022/psrp/http
+ - name: 2022 SSH Key
+ test: 2022/ssh/key
- stage: Incidental
dependsOn: []
jobs:
@@ -189,8 +207,6 @@ stages:
targets:
- name: IOS Python
test: ios/csr1000v/
- - name: VyOS Python
- test: vyos/1.1.8/
- stage: Summary
condition: succeededOrFailed()
dependsOn:
diff --git a/.azure-pipelines/commands/incidental/vyos.sh b/.azure-pipelines/commands/incidental/vyos.sh
deleted file mode 120000
index cad3e41b707..00000000000
--- a/.azure-pipelines/commands/incidental/vyos.sh
+++ /dev/null
@@ -1 +0,0 @@
-network.sh
\ No newline at end of file
diff --git a/.azure-pipelines/commands/incidental/windows.sh b/.azure-pipelines/commands/incidental/windows.sh
index 24272f62baf..f5a3070c457 100755
--- a/.azure-pipelines/commands/incidental/windows.sh
+++ b/.azure-pipelines/commands/incidental/windows.sh
@@ -6,6 +6,8 @@ declare -a args
IFS='/:' read -ra args <<< "$1"
version="${args[1]}"
+connection="${args[2]}"
+connection_setting="${args[3]}"
target="shippable/windows/incidental/"
@@ -26,11 +28,7 @@ if [ -s /tmp/windows.txt ] || [ "${CHANGED:+$CHANGED}" == "" ]; then
echo "Detected changes requiring integration tests specific to Windows:"
cat /tmp/windows.txt
- echo "Running Windows integration tests for multiple versions concurrently."
-
- platforms=(
- --windows "${version}"
- )
+ echo "Running Windows integration tests for the version ${version}."
else
echo "No changes requiring integration tests specific to Windows were detected."
echo "Running Windows integration tests for a single version only: ${single_version}"
@@ -39,14 +37,10 @@ else
echo "Skipping this job since it is for: ${version}"
exit 0
fi
-
- platforms=(
- --windows "${version}"
- )
fi
# shellcheck disable=SC2086
ansible-test windows-integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
- "${platforms[@]}" \
- --docker default --python "${python_default}" \
- --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
+ --controller "docker:default,python=${python_default}" \
+ --target "remote:windows/${version},connection=${connection}+${connection_setting},provider=${provider}" \
+ --remote-terminate always --remote-stage "${stage}"
diff --git a/.azure-pipelines/commands/windows.sh b/.azure-pipelines/commands/windows.sh
index 693d4f24bdc..622eb9e2d5e 100755
--- a/.azure-pipelines/commands/windows.sh
+++ b/.azure-pipelines/commands/windows.sh
@@ -6,7 +6,9 @@ declare -a args
IFS='/:' read -ra args <<< "$1"
version="${args[1]}"
-group="${args[2]}"
+connection="${args[2]}"
+connection_setting="${args[3]}"
+group="${args[4]}"
target="shippable/windows/group${group}/"
@@ -31,11 +33,7 @@ if [ -s /tmp/windows.txt ] || [ "${CHANGED:+$CHANGED}" == "" ]; then
echo "Detected changes requiring integration tests specific to Windows:"
cat /tmp/windows.txt
- echo "Running Windows integration tests for multiple versions concurrently."
-
- platforms=(
- --windows "${version}"
- )
+ echo "Running Windows integration tests for the version ${version}."
else
echo "No changes requiring integration tests specific to Windows were detected."
echo "Running Windows integration tests for a single version only: ${single_version}"
@@ -44,17 +42,13 @@ else
echo "Skipping this job since it is for: ${version}"
exit 0
fi
-
- platforms=(
- --windows "${version}"
- )
fi
-for version in "${python_versions[@]}"; do
+for py_version in "${python_versions[@]}"; do
changed_all_target="all"
changed_all_mode="default"
- if [ "${version}" == "${python_default}" ]; then
+ if [ "${py_version}" == "${python_default}" ]; then
# smoketest tests
if [ "${CHANGED}" ]; then
# with change detection enabled run tests for anything changed
@@ -80,7 +74,7 @@ for version in "${python_versions[@]}"; do
fi
# terminate remote instances on the final python version tested
- if [ "${version}" = "${python_versions[-1]}" ]; then
+ if [ "${py_version}" = "${python_versions[-1]}" ]; then
terminate="always"
else
terminate="never"
@@ -88,7 +82,8 @@ for version in "${python_versions[@]}"; do
# shellcheck disable=SC2086
ansible-test windows-integration --color -v --retry-on-error "${ci}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
- "${platforms[@]}" --changed-all-target "${changed_all_target}" --changed-all-mode "${changed_all_mode}" \
- --docker default --python "${version}" \
- --remote-terminate "${terminate}" --remote-stage "${stage}" --remote-provider "${provider}"
+ --changed-all-target "${changed_all_target}" --changed-all-mode "${changed_all_mode}" \
+ --controller "docker:default,python=${py_version}" \
+ --target "remote:windows/${version},connection=${connection}+${connection_setting},provider=${provider}" \
+ --remote-terminate "${terminate}" --remote-stage "${stage}"
done
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 35b4523e017..fc15ea5dfc2 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -4,9 +4,14 @@ Hi! Nice to see you here!
## QUESTIONS ?
-Please see the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) for information on how to ask questions on the [mailing lists](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information) and IRC.
+If you have questions about anything related to Ansible, get in touch with us!
+See [Communicating with the Ansible community](https://docs.ansible.com/ansible/devel/community/communication.html) to find out how.
-The GitHub issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, as the community page explains best.
+The [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html) also explains how to contribute
+and interact with the project, including how to submit bug reports and code to Ansible.
+
+Please note that the GitHub issue tracker is not the best place to ask questions for several reasons.
+You'll get more helpful, and quicker, responses in the forum.
## CONTRIBUTING ?
@@ -14,15 +19,18 @@ By contributing to this project you agree to the [Developer Certificate of Origi
The Ansible project is licensed under the [GPL-3.0](COPYING) or later. Some portions of the code fall under other licenses as noted in individual files.
-The Ansible project accepts contributions through GitHub pull requests. Please review the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) for more information on contributing to Ansible.
+The Ansible project accepts contributions through GitHub pull requests.
+Please review the [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html) for more information on contributing to Ansible.
## BUG TO REPORT ?
-First and foremost, also check the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
+First and foremost, also check the [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html).
-You can report bugs or make enhancement requests at the [Ansible GitHub issue page](http://github.com/ansible/ansible/issues/new/choose) by filling out the issue template that will be presented.
+You can report bugs or make enhancement requests at
+the [Ansible GitHub issue page](http://github.com/ansible/ansible/issues/new/choose) by filling out the issue template that will be presented.
-Also please make sure you are testing on the latest released version of Ansible or the development branch; see the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details.
+Also please make sure you are testing on the latest released version of Ansible or the development branch.
+See the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details.
Thanks!
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 3159784d158..8f4944c43c0 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -19,13 +19,14 @@ body:
Also test if the latest release and devel branch are affected too.
- **Tip:** If you are seeking community support, please consider
- [starting a mailing list thread or chatting in IRC][ML||IRC].
+ **Tip:** If you are seeking community support, please see
+ [Communicating with the Ansible community][communication] to
+ get in touch and ask questions.
- [ML||IRC]:
- https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--bug_report.yml#mailing-list-information
+ [communication]:
+ https://docs.ansible.com/ansible/devel/community/communication.html
[issue search]: ../search?q=is%3Aissue&type=issues
@@ -54,7 +55,7 @@ body:
Why?
- We would do it by ourselves but unfortunatelly, the curent
+ We would do it by ourselves but unfortunately, the current
edition of GitHub Issue Forms Alpha does not support this yet 🤷
@@ -258,7 +259,7 @@ body:
description: |
Read the [Ansible Code of Conduct][CoC] first.
- [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--bug_report.yml
+ [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--bug_report.yml
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 74ec5696fdf..6aa4a2b7647 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -2,7 +2,7 @@
blank_issues_enabled: false # default: true
contact_links:
- name: 🔐 Security bug report 🔥
- url: https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser
+ url: https://docs.ansible.com/ansible/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser
about: |
Please learn how to report security vulnerabilities here.
@@ -11,12 +11,12 @@ contact_links:
a prompt response.
For more information, see
- https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
+ https://docs.ansible.com/ansible/devel/community/reporting_bugs_and_features.html
- name: 📝 Ansible Code of Conduct
- url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser
+ url: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser
about: ❤ Be nice to other members of the community. ☮ Behave.
-- name: 💬 Talks to the community
- url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
+- name: 💬 Talk to the community
+ url: https://docs.ansible.com/ansible/devel/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
about: Please ask and answer usage questions here
- name: ⚡ Working groups
url: https://github.com/ansible/community/wiki
diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml
index ca62bb55a77..efe8d1c2035 100644
--- a/.github/ISSUE_TEMPLATE/documentation_report.yml
+++ b/.github/ISSUE_TEMPLATE/documentation_report.yml
@@ -22,12 +22,14 @@ body:
Also test if the latest release and devel branch are affected too.
- **Tip:** If you are seeking community support, please consider
- [starting a mailing list thread or chatting in IRC][ML||IRC].
+ **Tip:** If you are seeking community support, please see
+ [Communicating with the Ansible community][communication] to
+ get in touch and ask questions.
- [ML||IRC]:
- https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--documentation_report.yml#mailing-list-information
+
+ [communication]:
+ https://docs.ansible.com/ansible/devel/community/communication.html
[issue search]: ../search?q=is%3Aissue&type=issues
@@ -205,7 +207,7 @@ body:
description: |
Read the [Ansible Code of Conduct][CoC] first.
- [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--documentation_report.yml
+ [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--documentation_report.yml
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index dd39c40de1c..2fce680fe64 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -21,8 +21,7 @@ body:
If unsure, consider filing a [new proposal] instead outlining your
use-cases, the research and implementation considerations. Then,
- start a discussion on one of the public [IRC meetings] we have just
- for this.
+ start a discussion in the [Ansible forum][forum].
@@ -44,21 +43,22 @@ body:
Also test if the devel branch does not already implement this.
- **Tip:** If you are seeking community support, please consider
- [starting a mailing list thread or chatting in IRC][ML||IRC].
+ **Tip:** If you are seeking community support, please see
+ [Communicating with the Ansible community][communication] to
+ get in touch and ask questions.
[contribute to collections]:
https://docs.ansible.com/ansible-core/devel/community/contributing_maintained_collections.html?utm_medium=github&utm_source=issue_form--feature_request.yml
- [IRC meetings]:
- https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--feature_request.yml#irc-meetings
+ [communication]:
+ https://docs.ansible.com/ansible/devel/community/communication.html
[issue search]: ../search?q=is%3Aissue&type=issues
- [ML||IRC]:
- https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--feature_request.yml#mailing-list-information
+ [forum help]:
+ https://forum.ansible.com/c/help/6
[new proposal]: ../../proposals/issues/new
@@ -109,7 +109,7 @@ body:
Why?
- We would do it by ourselves but unfortunatelly, the curent
+ We would do it by ourselves but unfortunately, the current
edition of GitHub Issue Forms Alpha does not support this yet 🤷
@@ -185,7 +185,7 @@ body:
description: |
Read the [Ansible Code of Conduct][CoC] first.
- [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--feature_request.yml
+ [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--feature_request.yml
options:
- label: I agree to follow the Ansible Code of Conduct
required: true
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 33504c1d708..00000000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,25 +0,0 @@
-##### SUMMARY
-
-
-
-
-
-##### ISSUE TYPE
-
-
-
-- Bugfix Pull Request
-- Docs Pull Request
-- Feature Pull Request
-- Test Pull Request
-
-##### ADDITIONAL INFORMATION
-
-
-
-
-
-
-```paste below
-
-```
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 120000
index 00000000000..c8ecb720058
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1 @@
+PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md
\ No newline at end of file
diff --git a/.github/PULL_REQUEST_TEMPLATE/Bug fix.md b/.github/PULL_REQUEST_TEMPLATE/Bug fix.md
new file mode 100644
index 00000000000..b400b336dff
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE/Bug fix.md
@@ -0,0 +1,20 @@
+##### SUMMARY
+
+
+
+
+
+##### ISSUE TYPE
+
+- Bugfix Pull Request
+
+##### ADDITIONAL INFORMATION
+
+
+
+
+
+
+```paste below
+
+```
diff --git a/.github/PULL_REQUEST_TEMPLATE/Documentation change.md b/.github/PULL_REQUEST_TEMPLATE/Documentation change.md
new file mode 100644
index 00000000000..c62ff7bfc55
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE/Documentation change.md
@@ -0,0 +1,19 @@
+##### SUMMARY
+
+
+
+
+
+##### ISSUE TYPE
+
+- Docs Pull Request
+
+##### ADDITIONAL INFORMATION
+
+
+
+
+
+```paste below
+
+```
diff --git a/.github/PULL_REQUEST_TEMPLATE/New feature.md b/.github/PULL_REQUEST_TEMPLATE/New feature.md
new file mode 100644
index 00000000000..9e10c45d5d4
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE/New feature.md
@@ -0,0 +1,19 @@
+##### SUMMARY
+
+
+
+
+
+##### ISSUE TYPE
+
+- Feature Pull Request
+
+##### ADDITIONAL INFORMATION
+
+
+
+
+
+```paste below
+
+```
diff --git a/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md
new file mode 120000
index 00000000000..3df4f489ad7
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1 @@
+Unclear purpose or motivation.md
\ No newline at end of file
diff --git a/.github/PULL_REQUEST_TEMPLATE/Tests.md b/.github/PULL_REQUEST_TEMPLATE/Tests.md
new file mode 100644
index 00000000000..b059793b49a
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE/Tests.md
@@ -0,0 +1,20 @@
+##### SUMMARY
+
+
+
+
+
+##### ISSUE TYPE
+
+- Test Pull Request
+
+##### ADDITIONAL INFORMATION
+
+
+
+
+
+
+```paste below
+
+```
diff --git a/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md b/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md
new file mode 100644
index 00000000000..33504c1d708
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md
@@ -0,0 +1,25 @@
+##### SUMMARY
+
+
+
+
+
+##### ISSUE TYPE
+
+
+
+- Bugfix Pull Request
+- Docs Pull Request
+- Feature Pull Request
+- Test Pull Request
+
+##### ADDITIONAL INFORMATION
+
+
+
+
+
+
+```paste below
+
+```
diff --git a/.github/RELEASE_NAMES.txt b/.github/RELEASE_NAMES.txt
index 86e1d1454a3..17d96a6897e 100644
--- a/.github/RELEASE_NAMES.txt
+++ b/.github/RELEASE_NAMES.txt
@@ -1,3 +1,5 @@
+2.19.0 What Is and What Should Never Be
+2.18.0 Fool in the Rain
2.17.0 Gallows Pole
2.16.0 All My Love
2.15.0 Ten Years Gone
diff --git a/.gitignore b/.gitignore
index 8b244f60ee7..57019fd1ab6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -92,6 +92,8 @@ Vagrantfile
/lib/ansible_base.egg-info/
# First used in the `devel` branch during Ansible 2.11 development.
/lib/ansible_core.egg-info/
+# First used in the `devel` branch during Ansible 2.18 development.
+/ansible_core.egg-info/
# vendored lib dir
lib/ansible/_vendor/*
!lib/ansible/_vendor/__init__.py
diff --git a/MANIFEST.in b/MANIFEST.in
index bf7a6a047e2..fa609f52e9a 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,11 +1,10 @@
include COPYING
-include bin/*
include changelogs/CHANGELOG*.rst
include changelogs/changelog.yaml
include licenses/*.txt
include requirements.txt
recursive-include packaging *.py *.j2
recursive-include test/integration *
-recursive-include test/sanity *.in *.json *.py *.txt
+recursive-include test/sanity *.in *.json *.py *.txt *.ini
recursive-include test/support *.py *.ps1 *.psm1 *.cs *.md
recursive-include test/units *
diff --git a/README.md b/README.md
index baf22c0e052..9685e77748d 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
[![PyPI version](https://img.shields.io/pypi/v/ansible-core.svg)](https://pypi.org/project/ansible-core)
[![Docs badge](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.ansible.com/ansible/latest/)
-[![Chat badge](https://img.shields.io/badge/chat-IRC-brightgreen.svg)](https://docs.ansible.com/ansible/latest/community/communication.html)
+[![Chat badge](https://img.shields.io/badge/chat-IRC-brightgreen.svg)](https://docs.ansible.com/ansible/devel/community/communication.html)
[![Build Status](https://dev.azure.com/ansible/ansible/_apis/build/status/CI?branchName=devel)](https://dev.azure.com/ansible/ansible/_build/latest?definitionId=20&branchName=devel)
-[![Ansible Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
-[![Ansible mailing lists](https://img.shields.io/badge/mailing%20lists-Ansible-orange.svg)](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information)
+[![Ansible Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html)
+[![Ansible mailing lists](https://img.shields.io/badge/mailing%20lists-Ansible-orange.svg)](https://docs.ansible.com/ansible/devel/community/communication.html#mailing-list-information)
[![Repository License](https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg)](COPYING)
[![Ansible CII Best Practices certification](https://bestpractices.coreinfrastructure.org/projects/2372/badge)](https://bestpractices.coreinfrastructure.org/projects/2372)
@@ -40,21 +40,33 @@ features and fixes, directly. Although it is reasonably stable, you are more lik
breaking changes when running the `devel` branch. We recommend getting involved
in the Ansible community if you want to run the `devel` branch.
-## Get Involved
+## Communication
-* Read [Community Information](https://docs.ansible.com/ansible/latest/community) for all
+Join the Ansible forum to ask questions, get help, and interact with the
+community.
+
+* [Get Help](https://forum.ansible.com/c/help/6): Find help or share your Ansible knowledge to help others.
+ Use tags to filter and subscribe to posts, such as the following:
+ * Posts tagged with [ansible](https://forum.ansible.com/tag/ansible)
+ * Posts tagged with [ansible-core](https://forum.ansible.com/tag/ansible-core)
+ * Posts tagged with [playbook](https://forum.ansible.com/tag/playbook)
+* [Social Spaces](https://forum.ansible.com/c/chat/4): Meet and interact with fellow enthusiasts.
+* [News & Announcements](https://forum.ansible.com/c/news/5): Track project-wide announcements including social events.
+* [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): Get release announcements and important changes.
+
+For more ways to get in touch, see [Communicating with the Ansible community](https://docs.ansible.com/ansible/devel/community/communication.html).
+
+## Contribute to Ansible
+
+* Check out the [Contributor's Guide](./.github/CONTRIBUTING.md).
+* Read [Community Information](https://docs.ansible.com/ansible/devel/community) for all
kinds of ways to contribute to and interact with the project,
- including mailing list information and how to submit bug reports and
- code to Ansible.
-* Join a [Working Group](https://github.com/ansible/community/wiki),
- an organized community devoted to a specific technology domain or platform.
+ including how to submit bug reports and code to Ansible.
* Submit a proposed code update through a pull request to the `devel` branch.
* Talk to us before making larger changes
to avoid duplicate efforts. This not only helps everyone
know what is going on, but it also helps save time and effort if we decide
some changes are needed.
-* For a list of email lists, IRC channels and Working Groups, see the
- [Communication page](https://docs.ansible.com/ansible/latest/community/communication.html)
## Coding Guidelines
@@ -67,7 +79,7 @@ We document our Coding Guidelines in the [Developer Guide](https://docs.ansible.
* The `devel` branch corresponds to the release actively under development.
* The `stable-2.X` branches correspond to stable releases.
-* Create a branch based on `devel` and set up a [dev environment](https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_general.html#common-environment-setup) if you want to open a PR.
+* Create a branch based on `devel` and set up a [dev environment](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_general.html#common-environment-setup) if you want to open a PR.
* See the [Ansible release and maintenance](https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html) page for information about active branches.
## Roadmap
diff --git a/bin/ansible-connection b/bin/ansible-connection
deleted file mode 120000
index a20affdbe6a..00000000000
--- a/bin/ansible-connection
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ansible/cli/scripts/ansible_connection_cli_stub.py
\ No newline at end of file
diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml
index 326d4c1567c..231ace8c768 100644
--- a/changelogs/changelog.yaml
+++ b/changelogs/changelog.yaml
@@ -1,2 +1,2 @@
-ancestor: 2.16.0
+ancestor: 2.18.0
releases: {}
diff --git a/changelogs/fragments/49809_apt_repository.yml b/changelogs/fragments/49809_apt_repository.yml
deleted file mode 100644
index 0a37a7ef183..00000000000
--- a/changelogs/fragments/49809_apt_repository.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
-- apt_repository - do not modify repo files if the file is a symlink (https://github.com/ansible/ansible/issues/49809).
diff --git a/changelogs/fragments/76727-chattr-fix-for-backups-of-symlinks.yml b/changelogs/fragments/76727-chattr-fix-for-backups-of-symlinks.yml
deleted file mode 100644
index fe2c0946411..00000000000
--- a/changelogs/fragments/76727-chattr-fix-for-backups-of-symlinks.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - check if there are attributes to set before attempting to set them (https://github.com/ansible/ansible/issues/76727)
diff --git a/changelogs/fragments/77077_iptables.yml b/changelogs/fragments/77077_iptables.yml
deleted file mode 100644
index 5060b231868..00000000000
--- a/changelogs/fragments/77077_iptables.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
-- iptables - set jump to DSCP when set_dscp_mark or set_dscp_mark_class is set (https://github.com/ansible/ansible/issues/77077).
diff --git a/changelogs/fragments/77336-include_role-callbacks-stats.yml b/changelogs/fragments/77336-include_role-callbacks-stats.yml
deleted file mode 100644
index 55133d49e3e..00000000000
--- a/changelogs/fragments/77336-include_role-callbacks-stats.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - "``include_role`` - properly execute ``v2_playbook_on_include`` and ``v2_runner_on_failed`` callbacks as well as increase ``ok`` and ``failed`` stats in the play recap, when appropriate (https://github.com/ansible/ansible/issues/77336)"
diff --git a/changelogs/fragments/78703_undefined.yml b/changelogs/fragments/78703_undefined.yml
deleted file mode 100644
index d9e4a48770e..00000000000
--- a/changelogs/fragments/78703_undefined.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - Do not print undefined error message twice (https://github.com/ansible/ansible/issues/78703).
diff --git a/changelogs/fragments/79945-host_group_vars-improvements.yml b/changelogs/fragments/79945-host_group_vars-improvements.yml
deleted file mode 100644
index 684ecdb72dc..00000000000
--- a/changelogs/fragments/79945-host_group_vars-improvements.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-bugfixes:
- - Cache host_group_vars after instantiating it once and limit the amount of repetitive work it needs to do every time it runs.
- - Call PluginLoader.all() once for vars plugins, and load vars plugins that run automatically or are enabled specifically by name subsequently.
-deprecated_features:
- - Old style vars plugins which use the entrypoints `get_host_vars` or `get_group_vars` are deprecated. The plugin should be updated to inherit from `BaseVarsPlugin` and define a `get_vars` method as the entrypoint.
diff --git a/changelogs/fragments/80110-repos-gpgcheck.yml b/changelogs/fragments/80110-repos-gpgcheck.yml
deleted file mode 100644
index 5a6d56f605d..00000000000
--- a/changelogs/fragments/80110-repos-gpgcheck.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - dnf - properly set gpg check options on enabled repositories according to the ``disable_gpg_check`` option (https://github.com/ansible/ansible/issues/80110)
diff --git a/changelogs/fragments/80267-ansible_builtin_user-remove-user-not-found-warning.yml b/changelogs/fragments/80267-ansible_builtin_user-remove-user-not-found-warning.yml
deleted file mode 100644
index 6bff3c000d8..00000000000
--- a/changelogs/fragments/80267-ansible_builtin_user-remove-user-not-found-warning.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - ansible.builtin.user - Remove user not found warning (https://github.com/ansible/ansible/issues/80267)
diff --git a/changelogs/fragments/80478-extend-mount-info.yml b/changelogs/fragments/80478-extend-mount-info.yml
deleted file mode 100644
index fcb20e15116..00000000000
--- a/changelogs/fragments/80478-extend-mount-info.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - "Add ``dump`` and ``passno`` mount information to facts component (https://github.com/ansible/ansible/issues/80478)"
diff --git a/changelogs/fragments/80561.yml b/changelogs/fragments/80561.yml
deleted file mode 100644
index af8ce3f0937..00000000000
--- a/changelogs/fragments/80561.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
-- Provide additional information about the alternative plugin in the deprecation message (https://github.com/ansible/ansible/issues/80561).
diff --git a/changelogs/fragments/80590-dnf-skip_broken-unavailable-pkgs.yml b/changelogs/fragments/80590-dnf-skip_broken-unavailable-pkgs.yml
deleted file mode 100644
index f82c7aef72a..00000000000
--- a/changelogs/fragments/80590-dnf-skip_broken-unavailable-pkgs.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - dnf - properly skip unavailable packages when ``skip_broken`` is enabled (https://github.com/ansible/ansible/issues/80590)
diff --git a/changelogs/fragments/80882-Amazon-os-family-compat.yaml b/changelogs/fragments/80882-Amazon-os-family-compat.yaml
deleted file mode 100644
index cb838736550..00000000000
--- a/changelogs/fragments/80882-Amazon-os-family-compat.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
-- Interpreter discovery - Add ``Amzn`` to ``OS_FAMILY_MAP`` for correct family fallback for interpreter discovery (https://github.com/ansible/ansible/issues/80882).
diff --git a/changelogs/fragments/80975-systemd-detect.yml b/changelogs/fragments/80975-systemd-detect.yml
deleted file mode 100644
index da4bbb986a9..00000000000
--- a/changelogs/fragments/80975-systemd-detect.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - Consolidate systemd detection logic into one place (https://github.com/ansible/ansible/issues/80975).
diff --git a/changelogs/fragments/80995-include-all-var-files.yml b/changelogs/fragments/80995-include-all-var-files.yml
deleted file mode 100644
index f1ac72b8970..00000000000
--- a/changelogs/fragments/80995-include-all-var-files.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
-- include_vars - fix calculating ``depth`` relative to the root and ensure all files are included (https://github.com/ansible/ansible/issues/80987).
diff --git a/changelogs/fragments/81053-templated-tags-inheritance.yml b/changelogs/fragments/81053-templated-tags-inheritance.yml
deleted file mode 100644
index 64cacdd6b8a..00000000000
--- a/changelogs/fragments/81053-templated-tags-inheritance.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Properly template tags in parent blocks (https://github.com/ansible/ansible/issues/81053)
diff --git a/changelogs/fragments/81188_better_error.yml b/changelogs/fragments/81188_better_error.yml
deleted file mode 100644
index a581dbeb858..00000000000
--- a/changelogs/fragments/81188_better_error.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - Reword the error message when the module fails to parse parameters in JSON format (https://github.com/ansible/ansible/issues/81188).
diff --git a/changelogs/fragments/81532-fix-nested-flush_handlers.yml b/changelogs/fragments/81532-fix-nested-flush_handlers.yml
deleted file mode 100644
index e43aa2e1df7..00000000000
--- a/changelogs/fragments/81532-fix-nested-flush_handlers.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - flush_handlers - properly handle a handler failure in a nested block when ``force_handlers`` is set (http://github.com/ansible/ansible/issues/81532)
diff --git a/changelogs/fragments/81584-daemonize-follow-up-fixes.yml b/changelogs/fragments/81584-daemonize-follow-up-fixes.yml
deleted file mode 100644
index 5842a0a37ca..00000000000
--- a/changelogs/fragments/81584-daemonize-follow-up-fixes.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - "``ansible.module_utils.service`` - ensure binary data transmission in ``daemonize()``"
diff --git a/changelogs/fragments/81606-ansible-galaxy-collection-pre-releases.yml b/changelogs/fragments/81606-ansible-galaxy-collection-pre-releases.yml
deleted file mode 100644
index 129ccfd2507..00000000000
--- a/changelogs/fragments/81606-ansible-galaxy-collection-pre-releases.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-bugfixes:
-- >-
- ansible-galaxy - started allowing the use of pre-releases
- for dependencies on any level of the dependency tree that
- specifically demand exact pre-release versions of
- collections and not version ranges.
- (https://github.com/ansible/ansible/pull/81606)
-- >-
- ansible-galaxy - started allowing the use of pre-releases
- for collections that do not have any stable versions
- published.
- (https://github.com/ansible/ansible/pull/81606)
-
-...
diff --git a/changelogs/fragments/81659_varswithsources.yml b/changelogs/fragments/81659_varswithsources.yml
deleted file mode 100644
index 32133e1c4e3..00000000000
--- a/changelogs/fragments/81659_varswithsources.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
-- vars - handle exception while combining VarsWithSources and dict (https://github.com/ansible/ansible/issues/81659).
diff --git a/changelogs/fragments/81666-handlers-run_once.yml b/changelogs/fragments/81666-handlers-run_once.yml
deleted file mode 100644
index e5cac9e2161..00000000000
--- a/changelogs/fragments/81666-handlers-run_once.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Fix ``run_once`` being incorrectly interpreted on handlers (https://github.com/ansible/ansible/issues/81666)
diff --git a/changelogs/fragments/81699-zip-permission.yml b/changelogs/fragments/81699-zip-permission.yml
deleted file mode 100644
index d7ca23fd9bc..00000000000
--- a/changelogs/fragments/81699-zip-permission.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - unarchive - add support for 8 character permission strings for zip archives (https://github.com/ansible/ansible/pull/81705).
\ No newline at end of file
diff --git a/changelogs/fragments/81714-remove-deprecated-jinja2_native_warning.yml b/changelogs/fragments/81714-remove-deprecated-jinja2_native_warning.yml
deleted file mode 100644
index 53f9ce330ad..00000000000
--- a/changelogs/fragments/81714-remove-deprecated-jinja2_native_warning.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-removed_features:
- - Remove deprecated JINJA2_NATIVE_WARNING environment variable (https://github.com/ansible/ansible/issues/81714)
diff --git a/changelogs/fragments/81716-ansible-doc.yml b/changelogs/fragments/81716-ansible-doc.yml
deleted file mode 100644
index 7dcb2901f94..00000000000
--- a/changelogs/fragments/81716-ansible-doc.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-removed_features:
- - Remove deprecated APIs from ansible-docs (https://github.com/ansible/ansible/issues/81716).
diff --git a/changelogs/fragments/81717-remove-deprecated-crypt-support.yml b/changelogs/fragments/81717-remove-deprecated-crypt-support.yml
deleted file mode 100644
index b6b0b71c842..00000000000
--- a/changelogs/fragments/81717-remove-deprecated-crypt-support.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-removed_features:
- - Remove deprecated crypt support from ansible.utils.encrypt (https://github.com/ansible/ansible/issues/81717)
diff --git a/changelogs/fragments/81722-handler-subdir-include_tasks.yml b/changelogs/fragments/81722-handler-subdir-include_tasks.yml
deleted file mode 100644
index 97607818a8f..00000000000
--- a/changelogs/fragments/81722-handler-subdir-include_tasks.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Allow for searching handler subdir for included task via include_role (https://github.com/ansible/ansible/issues/81722)
diff --git a/changelogs/fragments/81732-cloudstack-test-container-1.7.0.yml b/changelogs/fragments/81732-cloudstack-test-container-1.7.0.yml
deleted file mode 100644
index c37b8bfb911..00000000000
--- a/changelogs/fragments/81732-cloudstack-test-container-1.7.0.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-minor_changes:
- - ansible-test - Updated the CloudStack test container to version 1.7.0.
diff --git a/changelogs/fragments/81775-add-regex_replace-parameters.yml b/changelogs/fragments/81775-add-regex_replace-parameters.yml
deleted file mode 100644
index 1bca5c7ded0..00000000000
--- a/changelogs/fragments/81775-add-regex_replace-parameters.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-minor_changes:
- - filter plugin - Add the count and mandatory_count parameters in the regex_replace filter
diff --git a/changelogs/fragments/81901-galaxy-requirements-format.yml b/changelogs/fragments/81901-galaxy-requirements-format.yml
deleted file mode 100644
index 2e57a955503..00000000000
--- a/changelogs/fragments/81901-galaxy-requirements-format.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
-- ansible-galaxy - Provide a better error message when using a requirements file with an invalid format - https://github.com/ansible/ansible/issues/81901
diff --git a/changelogs/fragments/81931-locale-related-parsing-error-git.yml b/changelogs/fragments/81931-locale-related-parsing-error-git.yml
deleted file mode 100644
index 8ec3e9c5082..00000000000
--- a/changelogs/fragments/81931-locale-related-parsing-error-git.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - Fix setting proper locale for git executable when running on non english systems, ensuring git output can always be parsed.
diff --git a/changelogs/fragments/81978-launchpad-api-endpoint.yml b/changelogs/fragments/81978-launchpad-api-endpoint.yml
deleted file mode 100644
index 47d96fe3cd8..00000000000
--- a/changelogs/fragments/81978-launchpad-api-endpoint.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-minor_changes:
- - apt_repository.py - use api.launchpad.net endpoint instead of launchpad.net/api
diff --git a/changelogs/fragments/82027_find.yml b/changelogs/fragments/82027_find.yml
deleted file mode 100644
index a1b5cdbba37..00000000000
--- a/changelogs/fragments/82027_find.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - find - do not fail on Permission errors (https://github.com/ansible/ansible/issues/82027).
diff --git a/changelogs/fragments/82066.yml b/changelogs/fragments/82066.yml
deleted file mode 100644
index 5f2e34d0ca8..00000000000
--- a/changelogs/fragments/82066.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-minor_changes:
- - removed the unused argument ``create_new_password`` from ``CLI.build_vault_ids`` (https://github.com/ansible/ansible/pull/82066).
\ No newline at end of file
diff --git a/changelogs/fragments/82175-fix-ansible-galaxy-role-import-rc.yml b/changelogs/fragments/82175-fix-ansible-galaxy-role-import-rc.yml
deleted file mode 100644
index ec225b65cbb..00000000000
--- a/changelogs/fragments/82175-fix-ansible-galaxy-role-import-rc.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-galaxy role import - exit with 1 when the import fails (https://github.com/ansible/ansible/issues/82175).
diff --git a/changelogs/fragments/82187-uri-handle-force.yml b/changelogs/fragments/82187-uri-handle-force.yml
deleted file mode 100644
index 752c259e546..00000000000
--- a/changelogs/fragments/82187-uri-handle-force.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-bugfixes:
- - ansible.builtin.uri - the module was ignoring the ``force`` parameter and always
- requesting a cached copy (via the ``If-Modified-Since`` header) when downloading
- to an existing local file. Disable caching when ``force`` is ``true``, as
- documented (https://github.com/ansible/ansible/issues/82166).
diff --git a/changelogs/fragments/82241-handler-include-tasks-from.yml b/changelogs/fragments/82241-handler-include-tasks-from.yml
deleted file mode 100644
index 276a612bf7e..00000000000
--- a/changelogs/fragments/82241-handler-include-tasks-from.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Fix issue where an ``include_tasks`` handler in a role was not able to locate a file in ``tasks/`` when ``tasks_from`` was used as a role entry point and ``main.yml`` was not present (https://github.com/ansible/ansible/issues/82241)
diff --git a/changelogs/fragments/82353-ansible-sanity-examples.yml b/changelogs/fragments/82353-ansible-sanity-examples.yml
deleted file mode 100644
index 66f65bc629d..00000000000
--- a/changelogs/fragments/82353-ansible-sanity-examples.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-minor_changes:
- - ansible-test - sanity test allows ``EXAMPLES`` to be multi-document YAML (https://github.com/ansible/ansible/issues/82353).
- - ansible-test - document block name now included in error message for YAML parsing errors (https://github.com/ansible/ansible/issues/82353).
diff --git a/changelogs/fragments/82359_assemble_diff.yml b/changelogs/fragments/82359_assemble_diff.yml
deleted file mode 100644
index e3bf64f1a46..00000000000
--- a/changelogs/fragments/82359_assemble_diff.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - assemble - fixed missing parameter 'content' in _get_diff_data API (https://github.com/ansible/ansible/issues/82359).
diff --git a/changelogs/fragments/82363-multiple-handlers-with-recursive-notification.yml b/changelogs/fragments/82363-multiple-handlers-with-recursive-notification.yml
deleted file mode 100644
index 92bd37b486e..00000000000
--- a/changelogs/fragments/82363-multiple-handlers-with-recursive-notification.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Run all handlers with the same ``listen`` topic, even when notified from another handler (https://github.com/ansible/ansible/issues/82363).
diff --git a/changelogs/fragments/82455-new-vmware-productname.yml b/changelogs/fragments/82455-new-vmware-productname.yml
deleted file mode 100644
index 497c506adb4..00000000000
--- a/changelogs/fragments/82455-new-vmware-productname.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - "facts - detect VMware ESXi 8.0 virtualization by product name VMware20,1"
diff --git a/changelogs/fragments/82465-ansible-doc-paragraphs.yml b/changelogs/fragments/82465-ansible-doc-paragraphs.yml
deleted file mode 100644
index a9fb63245e3..00000000000
--- a/changelogs/fragments/82465-ansible-doc-paragraphs.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - "ansible-doc - treat double newlines in documentation strings as paragraph breaks. This is useful to create multi-paragraph notes in module/plugin documentation (https://github.com/ansible/ansible/pull/82465)."
diff --git a/changelogs/fragments/82496-add-alp-dolomite-suse-family.yaml b/changelogs/fragments/82496-add-alp-dolomite-suse-family.yaml
deleted file mode 100644
index 3d3590a3410..00000000000
--- a/changelogs/fragments/82496-add-alp-dolomite-suse-family.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - distribution.py - Recognize ALP-Dolomite as part of the SUSE OS family in Ansible, fixing its previous misidentification (https://github.com/ansible/ansible/pull/82496).
diff --git a/changelogs/fragments/82580_constructed.yml b/changelogs/fragments/82580_constructed.yml
deleted file mode 100644
index 22a57a7287e..00000000000
--- a/changelogs/fragments/82580_constructed.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - "constructed inventory plugin - Adding a note that only group_vars of explicit groups are loaded (https://github.com/ansible/ansible/pull/82580)."
diff --git a/changelogs/fragments/82606-template-python-syntax-error.yml b/changelogs/fragments/82606-template-python-syntax-error.yml
deleted file mode 100644
index 4bb13714193..00000000000
--- a/changelogs/fragments/82606-template-python-syntax-error.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - templating - ensure syntax errors originating from a template being compiled into Python code object result in a failure (https://github.com/ansible/ansible/issues/82606)
diff --git a/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml b/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml
new file mode 100644
index 00000000000..07d6312cb4d
--- /dev/null
+++ b/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - copy - parameter ``local_follow`` was incorrectly documented as having default value ``True`` (https://github.com/ansible/ansible/pull/83643).
+ - copy - fix sanity test failures (https://github.com/ansible/ansible/pull/83643).
diff --git a/changelogs/fragments/83690-get_url-content-disposition-filename.yml b/changelogs/fragments/83690-get_url-content-disposition-filename.yml
new file mode 100644
index 00000000000..47f9734c35e
--- /dev/null
+++ b/changelogs/fragments/83690-get_url-content-disposition-filename.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - get_url - fix honoring ``filename`` from the ``content-disposition`` header even when the type is ``inline`` (https://github.com/ansible/ansible/issues/83690)
diff --git a/changelogs/fragments/83965-action-groups-schema.yml b/changelogs/fragments/83965-action-groups-schema.yml
new file mode 100644
index 00000000000..cd4a439044d
--- /dev/null
+++ b/changelogs/fragments/83965-action-groups-schema.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - "runtime-metadata sanity test - improve validation of ``action_groups`` (https://github.com/ansible/ansible/pull/83965)."
diff --git a/changelogs/fragments/84008-additional-logging.yml b/changelogs/fragments/84008-additional-logging.yml
new file mode 100644
index 00000000000..80bd3a7ddd9
--- /dev/null
+++ b/changelogs/fragments/84008-additional-logging.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - Added a -vvvvv log message indicating when a host fails to produce output within the timeout period.
+ - SSH Escalation-related -vvv log messages now include the associated host information.
diff --git a/changelogs/fragments/ansible-galaxy-role-install-symlink.yml b/changelogs/fragments/ansible-galaxy-role-install-symlink.yml
deleted file mode 100644
index 856c501455c..00000000000
--- a/changelogs/fragments/ansible-galaxy-role-install-symlink.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-galaxy role install - normalize tarfile paths and symlinks using ``ansible.utils.path.unfrackpath`` and consider them valid as long as the realpath is in the tarfile's role directory (https://github.com/ansible/ansible/issues/81965).
diff --git a/changelogs/fragments/ansible-test-added-fedora-39.yml b/changelogs/fragments/ansible-test-added-fedora-39.yml
deleted file mode 100644
index 192536c3e16..00000000000
--- a/changelogs/fragments/ansible-test-added-fedora-39.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - Add Fedora 39 remote.
- - ansible-test - Add Fedora 39 container.
diff --git a/changelogs/fragments/ansible-test-cgroup-split.yml b/changelogs/fragments/ansible-test-cgroup-split.yml
deleted file mode 100644
index c9dec0cf5cb..00000000000
--- a/changelogs/fragments/ansible-test-cgroup-split.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-test - Fix parsing of cgroup entries which contain a ``:`` in the path (https://github.com/ansible/ansible/issues/81977).
diff --git a/changelogs/fragments/ansible-test-container-sleep.yml b/changelogs/fragments/ansible-test-container-sleep.yml
deleted file mode 100644
index e862530a1d4..00000000000
--- a/changelogs/fragments/ansible-test-container-sleep.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
-- ansible-test - When invoking ``sleep`` in containers during container setup, the ``env`` command is used to avoid invoking
- the shell builtin, if present.
diff --git a/changelogs/fragments/ansible-test-coverage-update.yml b/changelogs/fragments/ansible-test-coverage-update.yml
deleted file mode 100644
index e406cea14c6..00000000000
--- a/changelogs/fragments/ansible-test-coverage-update.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-bugfixes:
-- ansible-test — Python 3.8–3.12 will use ``coverage`` v7.3.2.
-
-...
diff --git a/changelogs/fragments/ansible-test-cryptography.yml b/changelogs/fragments/ansible-test-cryptography.yml
deleted file mode 100644
index f4b4457b108..00000000000
--- a/changelogs/fragments/ansible-test-cryptography.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - ansible-test - Special handling for installation of ``cryptography`` has been removed, as it is no longer necessary.
diff --git a/changelogs/fragments/ansible-test-default-containers.yml b/changelogs/fragments/ansible-test-default-containers.yml
deleted file mode 100644
index a48520bad79..00000000000
--- a/changelogs/fragments/ansible-test-default-containers.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - Update the base and default containers to use Ubuntu 22.04 for the base image.
- This also updates PowerShell to version 7.4.0 with .NET 8.0.0 and ShellCheck to version 0.8.0.
diff --git a/changelogs/fragments/ansible-test-distro-containers.yml b/changelogs/fragments/ansible-test-distro-containers.yml
deleted file mode 100644
index 4b9fc793ebc..00000000000
--- a/changelogs/fragments/ansible-test-distro-containers.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
-- ansible-test - Updated the distro test containers to version 6.3.0 to include coverage 7.3.2 for Python 3.8+.
- The alpine3 container is now based on 3.18 instead of 3.17 and includes Python 3.11 instead of Python 3.10.
diff --git a/changelogs/fragments/ansible-test-docker-forwards.yml b/changelogs/fragments/ansible-test-docker-forwards.yml
deleted file mode 100644
index 9936714056f..00000000000
--- a/changelogs/fragments/ansible-test-docker-forwards.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-bugfixes:
-- ansible-test - Properly detect docker host when using ``ssh://`` protocol for connecting to the docker daemon.
-- ansible-test - Explicitly supply ``ControlPath=none`` when setting up port forwarding over SSH to address
- the scenario where the local ssh configuration uses ``ControlPath`` for all hosts, and would prevent
- ports to be forwarded after the initial connection to the host.
diff --git a/changelogs/fragments/ansible-test-nios-container.yml b/changelogs/fragments/ansible-test-nios-container.yml
index c01817370c3..f4b2a99acdd 100644
--- a/changelogs/fragments/ansible-test-nios-container.yml
+++ b/changelogs/fragments/ansible-test-nios-container.yml
@@ -1,2 +1,2 @@
minor_changes:
-- ansible-test - Bump the ``nios-test-container`` from version 2.0.0 to version 3.0.0.
+ - ansible-test - Update ``nios-test-container`` to version 6.0.0.
diff --git a/changelogs/fragments/ansible-test-pep-668.yml b/changelogs/fragments/ansible-test-pep-668.yml
deleted file mode 100644
index b17428c1463..00000000000
--- a/changelogs/fragments/ansible-test-pep-668.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
-- ansible-test - When ansible-test installs requirements, it now instructs pip to allow installs on externally managed environments as defined by PEP 668.
- This only occurs in ephemeral environments managed by ansible-test, such as containers, or when the `--requirements` option is used.
diff --git a/changelogs/fragments/ansible-test-pylint-update.yml b/changelogs/fragments/ansible-test-pylint-update.yml
deleted file mode 100644
index d4eb4105c5e..00000000000
--- a/changelogs/fragments/ansible-test-pylint-update.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-bugfixes:
- - ansible-test - Update ``pylint`` to version 3.0.1.
- - ansible-test - Include missing ``pylint`` requirements for Python 3.10.
diff --git a/changelogs/fragments/ansible-test-python-3.12-compat.yml b/changelogs/fragments/ansible-test-python-3.12-compat.yml
deleted file mode 100644
index e77d7f8e00d..00000000000
--- a/changelogs/fragments/ansible-test-python-3.12-compat.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-test - Add a ``pylint`` plugin to work around a known issue on Python 3.12.
diff --git a/changelogs/fragments/ansible-test-python-default.yml b/changelogs/fragments/ansible-test-python-default.yml
deleted file mode 100644
index e03855442cb..00000000000
--- a/changelogs/fragments/ansible-test-python-default.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - ansible-test - Make Python 3.12 the default version used in the ``base`` and ``default`` containers.
diff --git a/changelogs/fragments/ansible-test-remove-rhel-9_2-remote.yml b/changelogs/fragments/ansible-test-remove-rhel-9_2-remote.yml
deleted file mode 100644
index 1a34cfa2bd4..00000000000
--- a/changelogs/fragments/ansible-test-remove-rhel-9_2-remote.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - "ansible-test - Remove rhel/9.2 test remote"
diff --git a/changelogs/fragments/ansible-test-rhel-9.3.yml b/changelogs/fragments/ansible-test-rhel-9.3.yml
deleted file mode 100644
index b8d1a3f3b49..00000000000
--- a/changelogs/fragments/ansible-test-rhel-9.3.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - ansible-test - Add support for RHEL 9.3 remotes.
diff --git a/changelogs/fragments/ansible-test-rmv-fedora-38.yml b/changelogs/fragments/ansible-test-rmv-fedora-38.yml
deleted file mode 100644
index 6ca4ed65e90..00000000000
--- a/changelogs/fragments/ansible-test-rmv-fedora-38.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - Remove Fedora 38 remote support.
- - ansible-test - Remove Fedora 38 test container.
diff --git a/changelogs/fragments/ansible-test-sanity-no-basestring.yml b/changelogs/fragments/ansible-test-sanity-no-basestring.yml
deleted file mode 100644
index 4190bb6e1b6..00000000000
--- a/changelogs/fragments/ansible-test-sanity-no-basestring.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - Removed the ``no-basestring`` sanity test.
- The test is no longer necessary now that Python 3 is required.
diff --git a/changelogs/fragments/ansible-test-sanity-no-dict.yml b/changelogs/fragments/ansible-test-sanity-no-dict.yml
deleted file mode 100644
index b985ace466e..00000000000
--- a/changelogs/fragments/ansible-test-sanity-no-dict.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - Removed the ``no-dict-iteritems``, ``no-dict-iterkeys`` and ``no-dict-itervalues`` sanity tests.
- The tests are no longer necessary since Python 3 is required.
diff --git a/changelogs/fragments/ansible-test-sanity-no-main-display.yml b/changelogs/fragments/ansible-test-sanity-no-main-display.yml
deleted file mode 100644
index e59a72b29ae..00000000000
--- a/changelogs/fragments/ansible-test-sanity-no-main-display.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - Removed the ``no-main-display`` sanity test.
- The unwanted pattern is unlikely to occur, since the test has existed since Ansible 2.8.
diff --git a/changelogs/fragments/ansible-test-sanity-unicode-literals.yml b/changelogs/fragments/ansible-test-sanity-unicode-literals.yml
deleted file mode 100644
index 7a41660bd1a..00000000000
--- a/changelogs/fragments/ansible-test-sanity-unicode-literals.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - Removed the ``no-unicode-literals`` sanity test.
- The test is unnecessary now that Python 3 is required and the ``unicode_literals`` feature has no effect.
diff --git a/changelogs/fragments/ansible-test-shellcheck-exclude.yml b/changelogs/fragments/ansible-test-shellcheck-exclude.yml
deleted file mode 100644
index 8bf9aa9b812..00000000000
--- a/changelogs/fragments/ansible-test-shellcheck-exclude.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - The ``shellcheck`` sanity test no longer disables the ``SC2164`` check.
- In most cases, seeing this error means the script is missing ``set -e``.
diff --git a/changelogs/fragments/ansible-test-unidiomatic-type-check.yml b/changelogs/fragments/ansible-test-unidiomatic-type-check.yml
deleted file mode 100644
index 8f39e236b3c..00000000000
--- a/changelogs/fragments/ansible-test-unidiomatic-type-check.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - ansible-test - The ``unidiomatic-typecheck`` rule has been removed from the ``validate-modules`` sanity test.
- - ansible-test - The ``unidiomatic-typecheck`` rule has been enabled in the ``pylint`` sanity test.
diff --git a/changelogs/fragments/ansible-test-update.yml b/changelogs/fragments/ansible-test-update.yml
new file mode 100644
index 00000000000..8431887dedb
--- /dev/null
+++ b/changelogs/fragments/ansible-test-update.yml
@@ -0,0 +1,5 @@
+minor_changes:
+ - ansible-test - Update ``pylint`` sanity test to use version 3.3.1.
+ - ansible-test - Default to Python 3.13 in the ``base`` and ``default`` containers.
+ - ansible-test - Disable the ``deprecated-`` prefixed ``pylint`` rules as their results vary by Python version.
+ - ansible-test - Update the ``base`` and ``default`` containers.
diff --git a/changelogs/fragments/ansible-test-venv.yml b/changelogs/fragments/ansible-test-venv.yml
deleted file mode 100644
index 0efdc945515..00000000000
--- a/changelogs/fragments/ansible-test-venv.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - ansible-test - Removed fallback to ``virtualenv`` when ``-m venv`` is non-functional.
diff --git a/changelogs/fragments/any_errors_fatal-fixes.yml b/changelogs/fragments/any_errors_fatal-fixes.yml
deleted file mode 100644
index 10d9783ff82..00000000000
--- a/changelogs/fragments/any_errors_fatal-fixes.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-bugfixes:
- - Fix for when ``any_errors_fatal`` was ignored if error occurred in a block with always (https://github.com/ansible/ansible/issues/31543)
- - Fix ``force_handlers`` not working with ``any_errors_fatal`` (https://github.com/ansible/ansible/issues/36308)
- - Fix tasks in always section not being executed for nested blocks with ``any_errors_fatal`` (https://github.com/ansible/ansible/issues/73246)
- - "``any_errors_fatal`` should fail all hosts and rescue all of them when a ``rescue`` section is specified (https://github.com/ansible/ansible/issues/80981)"
- - Fix issues when tasks withing nested blocks wouldn't run when ``force_handlers`` is set (https://github.com/ansible/ansible/issues/81533)
diff --git a/changelogs/fragments/argument-spec-context.yml b/changelogs/fragments/argument-spec-context.yml
deleted file mode 100644
index 985a2050dd8..00000000000
--- a/changelogs/fragments/argument-spec-context.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-minor_changes:
-- module argument spec - Allow module authors to include arbitrary additional context in the argument spec, by making use of a new top level key
- called ``context``. This key should be a dict type. This allows for users to customize what they place in the argument spec, without having to
- ignore sanity tests that validate the schema.
diff --git a/changelogs/fragments/config_set_option_fix.yml b/changelogs/fragments/config_set_option_fix.yml
deleted file mode 100644
index 57ebec932a3..00000000000
--- a/changelogs/fragments/config_set_option_fix.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - set_option method for plugins to update config now properly passes through type casting and validation.
diff --git a/changelogs/fragments/config_validate_updates.yml b/changelogs/fragments/config_validate_updates.yml
deleted file mode 100644
index 548467a0bf7..00000000000
--- a/changelogs/fragments/config_validate_updates.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - validate-modules tests now correctly handles ``choices`` in dictionary format.
diff --git a/changelogs/fragments/copy_keep_suffix_temp.yml b/changelogs/fragments/copy_keep_suffix_temp.yml
deleted file mode 100644
index d6dc36dc54e..00000000000
--- a/changelogs/fragments/copy_keep_suffix_temp.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-bugfixes:
- - copy action now ensures that tempfiles use the same suffix as destination, to allow for ``validate`` to work with utilities that check extensions.
- - copy action now also generates temprary files as hidden ('.' prefixed) to avoid accidental pickup by running services that glob by extension.
- - template action will also inherit the behavior from copy (as it uses it internally).
diff --git a/changelogs/fragments/cron_err.yml b/changelogs/fragments/cron_err.yml
new file mode 100644
index 00000000000..5e65a7b68ec
--- /dev/null
+++ b/changelogs/fragments/cron_err.yml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - cron - Provide additional error information while writing cron file (https://github.com/ansible/ansible/issues/83223).
diff --git a/changelogs/fragments/csvfile-keycol.yml b/changelogs/fragments/csvfile-keycol.yml
deleted file mode 100644
index 66819ba2f73..00000000000
--- a/changelogs/fragments/csvfile-keycol.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-
-minor_changes:
- - csvfile - add a keycol parameter to specify in which column to search.
diff --git a/changelogs/fragments/cve-2023-5115.yml b/changelogs/fragments/cve-2023-5115.yml
deleted file mode 100644
index 69e0ddb7659..00000000000
--- a/changelogs/fragments/cve-2023-5115.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-security_fixes:
-- ansible-galaxy - Prevent roles from using symlinks to overwrite
- files outside of the installation directory (CVE-2023-5115)
diff --git a/changelogs/fragments/cve-2024-0690.yml b/changelogs/fragments/cve-2024-0690.yml
deleted file mode 100644
index 0e030d88864..00000000000
--- a/changelogs/fragments/cve-2024-0690.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-security_fixes:
-- ANSIBLE_NO_LOG - Address issue where ANSIBLE_NO_LOG was ignored (CVE-2024-0690)
diff --git a/changelogs/fragments/deb822_repo_idem.yml b/changelogs/fragments/deb822_repo_idem.yml
deleted file mode 100644
index 5f60a78f75f..00000000000
--- a/changelogs/fragments/deb822_repo_idem.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - deb822_repository - handle idempotency if the order of parameters is changed (https://github.com/ansible/ansible/issues/82454).
diff --git a/changelogs/fragments/debconf_empty_password.yml b/changelogs/fragments/debconf_empty_password.yml
new file mode 100644
index 00000000000..473dc53e0d5
--- /dev/null
+++ b/changelogs/fragments/debconf_empty_password.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - debconf - set empty password values (https://github.com/ansible/ansible/issues/83214).
diff --git a/changelogs/fragments/dedupe_config_init.yml b/changelogs/fragments/dedupe_config_init.yml
deleted file mode 100644
index 16306fc13fa..00000000000
--- a/changelogs/fragments/dedupe_config_init.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-config init will now dedupe ini entries from plugins.
diff --git a/changelogs/fragments/delegate_to_invalid.yml b/changelogs/fragments/delegate_to_invalid.yml
deleted file mode 100644
index 5eca5f189ba..00000000000
--- a/changelogs/fragments/delegate_to_invalid.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - delegate_to when set to an empty or undefined variable will now give a proper error.
diff --git a/changelogs/fragments/distlib-dataclass-annotation.yml b/changelogs/fragments/distlib-dataclass-annotation.yml
deleted file mode 100644
index d86496dfac0..00000000000
--- a/changelogs/fragments/distlib-dataclass-annotation.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-bugfixes:
-- ansible-galaxy - Resolve issue with the dataclass used for galaxy.yml manifest caused by using
- future annotations
diff --git a/changelogs/fragments/dnf5-from-rpm-allow_downgrade.yml b/changelogs/fragments/dnf5-from-rpm-allow_downgrade.yml
deleted file mode 100644
index 97716d04693..00000000000
--- a/changelogs/fragments/dnf5-from-rpm-allow_downgrade.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - dnf5 - respect ``allow_downgrade`` when installing packages directly from rpm files
diff --git a/changelogs/fragments/dnf5-plugins-compat.yml b/changelogs/fragments/dnf5-plugins-compat.yml
new file mode 100644
index 00000000000..5d42b0f99f1
--- /dev/null
+++ b/changelogs/fragments/dnf5-plugins-compat.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "dnf5 - fix traceback when ``enable_plugins``/``disable_plugins`` is used on ``python3-libdnf5`` versions that do not support this functionality"
diff --git a/changelogs/fragments/dwim_is_role_fix.yml b/changelogs/fragments/dwim_is_role_fix.yml
deleted file mode 100644
index bab02012ff8..00000000000
--- a/changelogs/fragments/dwim_is_role_fix.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - dwim functions for lookups should be better at detectging role context even in abscense of tasks/main.
diff --git a/changelogs/fragments/file_simplify.yml b/changelogs/fragments/file_simplify.yml
new file mode 100644
index 00000000000..63e48fbdb9a
--- /dev/null
+++ b/changelogs/fragments/file_simplify.yml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - file - make code more readable and simple.
diff --git a/changelogs/fragments/find-encoding.yml b/changelogs/fragments/find-encoding.yml
deleted file mode 100644
index 77449455517..00000000000
--- a/changelogs/fragments/find-encoding.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - find - add a encoding parameter to specify which encoding of the files to be searched.
diff --git a/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml b/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml
new file mode 100644
index 00000000000..aba789bdadd
--- /dev/null
+++ b/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fix disabling SSL verification when installing collections and roles from git repositories. If ``--ignore-certs`` isn't provided, the value for the ``GALAXY_IGNORE_CERTS`` configuration option will be used (https://github.com/ansible/ansible/issues/83326).
diff --git a/changelogs/fragments/fix-ansible-galaxy-info-no-role-found.yml b/changelogs/fragments/fix-ansible-galaxy-info-no-role-found.yml
deleted file mode 100644
index 96c1ef7a0f4..00000000000
--- a/changelogs/fragments/fix-ansible-galaxy-info-no-role-found.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-galaxy info - fix reporting no role found when lookup_role_by_name returns None.
diff --git a/changelogs/fragments/fix-build-files-manifest-walk.yml b/changelogs/fragments/fix-build-files-manifest-walk.yml
deleted file mode 100644
index 3d1000698bf..00000000000
--- a/changelogs/fragments/fix-build-files-manifest-walk.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-minor_changes:
- - >-
- ansible-galaxy - Started normalizing build directory with a trailing separator when building collections, internally.
- (https://github.com/ansible/ansible/pull/81619).
diff --git a/changelogs/fragments/fix-default-ansible-galaxy-role-import-name.yml b/changelogs/fragments/fix-default-ansible-galaxy-role-import-name.yml
deleted file mode 100644
index d8ef1264d97..00000000000
--- a/changelogs/fragments/fix-default-ansible-galaxy-role-import-name.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-bugfixes:
- - >-
- ``ansible-galaxy role import`` - fix using the ``role_name`` in a standalone role's
- ``galaxy_info`` metadata by disabling automatic removal of the ``ansible-role-`` prefix.
- This matches the behavior of the Galaxy UI which also no longer implicitly removes the
- ``ansible-role-`` prefix.
- Use the ``--role-name`` option or add a ``role_name`` to the ``galaxy_info`` dictionary
- in the role's ``meta/main.yml`` to use an alternate role name.
diff --git a/changelogs/fragments/fix-dnf-install-missing-url.yml b/changelogs/fragments/fix-dnf-install-missing-url.yml
deleted file mode 100644
index c327f59b84c..00000000000
--- a/changelogs/fragments/fix-dnf-install-missing-url.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Fix handling missing urls in ansible.module_utils.urls.fetch_file for Python 3.
diff --git a/changelogs/fragments/fix-expect-indefinite-timeout.yml b/changelogs/fragments/fix-expect-indefinite-timeout.yml
deleted file mode 100644
index 32bf6c274aa..00000000000
--- a/changelogs/fragments/fix-expect-indefinite-timeout.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - expect - fix argument spec error using timeout=null (https://github.com/ansible/ansible/issues/80982).
diff --git a/changelogs/fragments/fix-module-utils-facts-timeout.yml b/changelogs/fragments/fix-module-utils-facts-timeout.yml
new file mode 100644
index 00000000000..3ecc95dfab3
--- /dev/null
+++ b/changelogs/fragments/fix-module-utils-facts-timeout.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Use the requested error message in the ansible.module_utils.facts.timeout timeout function instead of hardcoding one.
diff --git a/changelogs/fragments/fix-reboot-plugin.yml b/changelogs/fragments/fix-reboot-plugin.yml
deleted file mode 100644
index 6dfd2b6bf16..00000000000
--- a/changelogs/fragments/fix-reboot-plugin.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Stopped misleadingly advertising ``async`` mode support in the ``reboot`` module (https://github.com/ansible/ansible/issues/71517).
diff --git a/changelogs/fragments/fix-runtime-metadata-modules-action_plugin.yml b/changelogs/fragments/fix-runtime-metadata-modules-action_plugin.yml
deleted file mode 100644
index 7826c8c0d24..00000000000
--- a/changelogs/fragments/fix-runtime-metadata-modules-action_plugin.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-bugfixes:
-- >-
- ``ansible-test sanity --test runtime-metadata`` - add ``action_plugin`` as a valid field
- for modules in the schema (https://github.com/ansible/ansible/pull/82562).
diff --git a/changelogs/fragments/fix-vars-plugins-in-roles.yml b/changelogs/fragments/fix-vars-plugins-in-roles.yml
deleted file mode 100644
index b64d586b9ef..00000000000
--- a/changelogs/fragments/fix-vars-plugins-in-roles.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - Fix loading vars_plugins in roles (https://github.com/ansible/ansible/issues/82239).
diff --git a/changelogs/fragments/fix_errors.yml b/changelogs/fragments/fix_errors.yml
new file mode 100644
index 00000000000..995cc28ffda
--- /dev/null
+++ b/changelogs/fragments/fix_errors.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Errors now preserve stacked error messages even when YAML is involved.
diff --git a/changelogs/fragments/functools-update-wrapper.yml b/changelogs/fragments/functools-update-wrapper.yml
deleted file mode 100644
index ebf9b60c1b1..00000000000
--- a/changelogs/fragments/functools-update-wrapper.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - Use Python's built-in ``functools.update_wrapper`` instead an inline copy from Python 3.7.
diff --git a/changelogs/fragments/galaxy_dep_res_msgs.yml b/changelogs/fragments/galaxy_dep_res_msgs.yml
deleted file mode 100644
index 733262b107d..00000000000
--- a/changelogs/fragments/galaxy_dep_res_msgs.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-minor_changes:
- - ansible-galaxy dependency resolution messages have changed the unexplained 'virtual' collection for the specific type ('scm', 'dir', etc) that is more user friendly
-bugfixes:
- - ansible-galaxy error on dependency resolution will not error itself due to 'virtual' collections not having a name/namespace.
diff --git a/changelogs/fragments/get_bin_path_required_parameter.yml b/changelogs/fragments/get_bin_path_required_parameter.yml
deleted file mode 100644
index 8c3ca61c644..00000000000
--- a/changelogs/fragments/get_bin_path_required_parameter.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-deprecated_features:
- - The 'required' parameter in 'ansible.module_utils.common.process.get_bin_path' API is deprecated (https://github.com/ansible/ansible/issues/82464).
diff --git a/changelogs/fragments/import_role_goes_public.yml b/changelogs/fragments/import_role_goes_public.yml
deleted file mode 100644
index 29a08271961..00000000000
--- a/changelogs/fragments/import_role_goes_public.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-minor_changes:
- - "``import_role`` action now also gets a ``public`` option that controls variable exports, default depending on ``DEFAULT_PRIVATE_ROLE_VARS`` (if using defaults equates to ``public=True``)."
- - "``DEFAULT_PRIVATE_ROLE_VARS`` is now overridden by explicit setting of ``public`` for ``include_roles`` and ``import_roles``."
diff --git a/changelogs/fragments/improve-tombstone-error.yml b/changelogs/fragments/improve-tombstone-error.yml
deleted file mode 100644
index faee68b083d..00000000000
--- a/changelogs/fragments/improve-tombstone-error.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-bugfixes:
- - Include the task location when a module or action plugin is deprecated (https://github.com/ansible/ansible/issues/82450).
- - Give the tombstone error for ``include`` pre-fork like other tombstoned action/module plugins.
diff --git a/changelogs/fragments/interpreter_discovery.yml b/changelogs/fragments/interpreter_discovery.yml
deleted file mode 100644
index 0e8a7c1093c..00000000000
--- a/changelogs/fragments/interpreter_discovery.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
-- interpreter_discovery - handle AnsibleError exception raised while interpreter discovery (https://github.com/ansible/ansible/issues/78264).
diff --git a/changelogs/fragments/inv_available_hosts_to_frozenset.yml b/changelogs/fragments/inv_available_hosts_to_frozenset.yml
deleted file mode 100644
index 094fe6ddd54..00000000000
--- a/changelogs/fragments/inv_available_hosts_to_frozenset.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-inventory - index available_hosts for major performance boost when dumping large inventories
\ No newline at end of file
diff --git a/changelogs/fragments/j2_load_fix.yml b/changelogs/fragments/j2_load_fix.yml
deleted file mode 100644
index 4fd33b964a8..00000000000
--- a/changelogs/fragments/j2_load_fix.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-bugfixes:
- - Plugin loader does not dedupe nor cache filter/test plugins by file basename, but full path name.
- - Restoring the ability of filters/tests can have same file base name but different tests/filters defined inside.
diff --git a/changelogs/fragments/known_hosts_cert-authority_keys.yml b/changelogs/fragments/known_hosts_cert-authority_keys.yml
deleted file mode 100644
index 0f3e31709c4..00000000000
--- a/changelogs/fragments/known_hosts_cert-authority_keys.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - known_hosts - Fix issue with `@cert-authority` entries in known_hosts incorrectly being removed.
diff --git a/changelogs/fragments/log_id.yml b/changelogs/fragments/log_id.yml
deleted file mode 100644
index 3233bf796c4..00000000000
--- a/changelogs/fragments/log_id.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - added configuration item ``TARGET_LOG_INFO`` that allows the user/author to add an information string to the log output on targets.
diff --git a/changelogs/fragments/log_verbosity.yml b/changelogs/fragments/log_verbosity.yml
deleted file mode 100644
index 3e67aac8d94..00000000000
--- a/changelogs/fragments/log_verbosity.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - User can now set ansible.log to record higher verbosity than what is specified for display via new configuration item LOG_VERBOSITY.
diff --git a/changelogs/fragments/lookups_updated.yml b/changelogs/fragments/lookups_updated.yml
deleted file mode 100644
index 15104bc23d8..00000000000
--- a/changelogs/fragments/lookups_updated.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - All core lookups now use set_option(s) even when doing their own custom parsing. This ensures that the options are always the proper type.
diff --git a/changelogs/fragments/module-ignore-unknown-options.yml b/changelogs/fragments/module-ignore-unknown-options.yml
deleted file mode 100644
index c2d380f0901..00000000000
--- a/changelogs/fragments/module-ignore-unknown-options.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-minor_changes:
-- >-
- modules - Add the ability for an action plugin to call ``self._execute_module(*, ignore_unknown_opts=True)`` to execute a module with options that may not be
- supported for the version being called. This tells the module basic wrapper to ignore validating the options provided match the arg spec.
-bugfixes:
-- fetch - Do not calculate the file size for Windows fetch targets to improve performance.
diff --git a/changelogs/fragments/module_utils-basic-deprecations.yml b/changelogs/fragments/module_utils-basic-deprecations.yml
deleted file mode 100644
index b44d9371036..00000000000
--- a/changelogs/fragments/module_utils-basic-deprecations.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-deprecated_features:
- - >-
- ``module_utils`` - importing the following convenience helpers from ``ansible.module_utils.basic`` has been deprecated:
- ``get_exception``, ``literal_eval``, ``_literal_eval``, ``datetime``, ``signal``, ``types``, ``chain``, ``repeat``,
- ``PY2``, ``PY3``, ``b``, ``binary_type``, ``integer_types``, ``iteritems``, ``string_types``, ``test_type``, ``map`` and ``shlex_quote``.
diff --git a/changelogs/fragments/no_log_booly.yml b/changelogs/fragments/no_log_booly.yml
deleted file mode 100644
index 2fc499ddd9f..00000000000
--- a/changelogs/fragments/no_log_booly.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - module no_log will no longer affect top level booleans, for example ``no_log_module_parameter='a'`` will no longer hide ``changed=False`` as a 'no log value' (matches 'a').
diff --git a/changelogs/fragments/os_family.yml b/changelogs/fragments/os_family.yml
new file mode 100644
index 00000000000..7126a00c27b
--- /dev/null
+++ b/changelogs/fragments/os_family.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - facts - skip if distribution file path is directory, instead of raising error (https://github.com/ansible/ansible/issues/84006).
diff --git a/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml b/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml
new file mode 100644
index 00000000000..8dd037a4e02
--- /dev/null
+++ b/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "``package``/``dnf`` action plugins - provide the reason behind the failure to gather the ``ansible_pkg_mgr`` fact to identify the package backend"
diff --git a/changelogs/fragments/paramiko_globals.yml b/changelogs/fragments/paramiko_globals.yml
deleted file mode 100644
index 1a3b1da3a27..00000000000
--- a/changelogs/fragments/paramiko_globals.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-deprecated_features:
- - paramiko connection plugin, configuration items in the global scope are being deprecated and will be removed in favor or the existing same options in the plugin itself.
- Users should not need to change anything (how to configure them are the same) but plugin authors using the global constants should move to using the plugin's get_option().
diff --git a/changelogs/fragments/pc_fixes.yml b/changelogs/fragments/pc_fixes.yml
deleted file mode 100644
index 951c34a23dd..00000000000
--- a/changelogs/fragments/pc_fixes.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - moved assemble, raw, copy, fetch, reboot, script and wait_for_connection to query task instead of play_context ensuring they get the lastest and most correct data.
diff --git a/changelogs/fragments/prettydoc.yml b/changelogs/fragments/prettydoc.yml
deleted file mode 100644
index d34b539e1c3..00000000000
--- a/changelogs/fragments/prettydoc.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
- - ansible-doc output has been revamped to make it more visually pleasing when going to a terminal, also more concise, use -v to show extra information.
diff --git a/changelogs/fragments/pull_file_secrets.yml b/changelogs/fragments/pull_file_secrets.yml
deleted file mode 100644
index d8ea3554904..00000000000
--- a/changelogs/fragments/pull_file_secrets.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-pull will now correctly handle become and connection password file options for ansible-playbook.
diff --git a/changelogs/fragments/pull_unfrack_dest.yml b/changelogs/fragments/pull_unfrack_dest.yml
deleted file mode 100644
index 71724232033..00000000000
--- a/changelogs/fragments/pull_unfrack_dest.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - ansible-pull now will expand relative paths for the ``-d|--directory`` option is now expanded before use.
diff --git a/changelogs/fragments/py-tmpl-hardening.yml b/changelogs/fragments/py-tmpl-hardening.yml
deleted file mode 100644
index 4d95e66d595..00000000000
--- a/changelogs/fragments/py-tmpl-hardening.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
-- Harden python templates for respawn and ansiballz around str literal quoting
diff --git a/changelogs/fragments/reboot_timeout_fix.yml b/changelogs/fragments/reboot_timeout_fix.yml
deleted file mode 100644
index 74fdd41b5dc..00000000000
--- a/changelogs/fragments/reboot_timeout_fix.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
-- reboot action now handles connections with 'timeout' vs only 'connection_timeout' settings.
diff --git a/changelogs/fragments/remove_ini_ignored_dir.yml b/changelogs/fragments/remove_ini_ignored_dir.yml
new file mode 100644
index 00000000000..10a5a8e61ce
--- /dev/null
+++ b/changelogs/fragments/remove_ini_ignored_dir.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - INVENTORY_IGNORE_EXTS config, removed ``ini`` from the default list, inventory scripts using a corresponding .ini configuration are rare now and inventory.ini files are more common. Those that need to ignore the ini files for inventory scripts can still add it to configuration.
diff --git a/changelogs/fragments/restore_role_param_precedence.yml b/changelogs/fragments/restore_role_param_precedence.yml
deleted file mode 100644
index a3ac0b097f9..00000000000
--- a/changelogs/fragments/restore_role_param_precedence.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - role params now have higher precedence than host facts again, matching documentation, this had unintentionally changed in 2.15.
diff --git a/changelogs/fragments/role_fixes.yml b/changelogs/fragments/role_fixes.yml
deleted file mode 100644
index ef68d0f2762..00000000000
--- a/changelogs/fragments/role_fixes.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-bugfixes:
- - roles, the ``static`` property is now correctly set, this will fix issues with ``public`` and ``DEFAULT_PRIVATE_ROLE_VARS`` controls on exporting vars.
- - roles, code cleanup and performance optimization of dependencies, now cached, and ``public`` setting is now determined once, at role instantiation.
diff --git a/changelogs/fragments/skip-handlers-tagged-play.yml b/changelogs/fragments/skip-handlers-tagged-play.yml
new file mode 100644
index 00000000000..755308eafbe
--- /dev/null
+++ b/changelogs/fragments/skip-handlers-tagged-play.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "Do not run implicit ``flush_handlers`` meta tasks when the whole play is excluded from the run due to tags specified."
diff --git a/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml b/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml
new file mode 100644
index 00000000000..a4c913791d2
--- /dev/null
+++ b/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "Improve performance on large inventories by reducing the number of implicit meta tasks."
diff --git a/changelogs/fragments/skip-role-task-iterator.yml b/changelogs/fragments/skip-role-task-iterator.yml
new file mode 100644
index 00000000000..1cf6b4cbb84
--- /dev/null
+++ b/changelogs/fragments/skip-role-task-iterator.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - PlayIterator - do not return tasks from already executed roles so specific strategy plugins do not have to do the filtering of such tasks themselves
diff --git a/changelogs/fragments/syslog_exception.yml b/changelogs/fragments/syslog_exception.yml
deleted file mode 100644
index dc47ca6cb97..00000000000
--- a/changelogs/fragments/syslog_exception.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
-- syslog - Handle ValueError exception raised when sending Null Characters to syslog with Python 3.12.
diff --git a/changelogs/fragments/thread_counts.yml b/changelogs/fragments/thread_counts.yml
deleted file mode 100644
index c5df554c2d3..00000000000
--- a/changelogs/fragments/thread_counts.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - fact gathering on linux now handles thread count by using rounding vs dropping decimals, it should give slightly more accurate numbers.
diff --git a/changelogs/fragments/unarchive.yml b/changelogs/fragments/unarchive.yml
deleted file mode 100644
index 421497236d6..00000000000
--- a/changelogs/fragments/unarchive.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-bugfixes:
- - unarchive - force unarchive if symlink target changes (https://github.com/ansible/ansible/issues/30420).
diff --git a/changelogs/fragments/uri_action_cmode.yml b/changelogs/fragments/uri_action_cmode.yml
deleted file mode 100644
index f47cf71e4a4..00000000000
--- a/changelogs/fragments/uri_action_cmode.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - uri action plugin now skipped during check mode (not supported) instead of even trying to execute the module, which already skipped, this does not really change the result, but returns much faster.
diff --git a/changelogs/fragments/url_credentials_decode.yml b/changelogs/fragments/url_credentials_decode.yml
deleted file mode 100644
index b23362bb293..00000000000
--- a/changelogs/fragments/url_credentials_decode.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - support url-encoded credentials in URLs like http://x%40:%40@example.com (https://github.com/ansible/ansible/pull/82552)
diff --git a/changelogs/fragments/urls-no-py2.yml b/changelogs/fragments/urls-no-py2.yml
deleted file mode 100644
index 08a54eda64a..00000000000
--- a/changelogs/fragments/urls-no-py2.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-major_changes:
-- urls.py - Removed support for Python 2
diff --git a/changelogs/fragments/urls-tls13-post-handshake-auth.yml b/changelogs/fragments/urls-tls13-post-handshake-auth.yml
deleted file mode 100644
index c7eaba742bb..00000000000
--- a/changelogs/fragments/urls-tls13-post-handshake-auth.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
-- urls - Add support for TLS 1.3 post handshake certificate authentication - https://github.com/ansible/ansible/issues/81782
diff --git a/changelogs/fragments/user-accept-yescrypt-hash.yml b/changelogs/fragments/user-accept-yescrypt-hash.yml
deleted file mode 100644
index 2588fd4ade8..00000000000
--- a/changelogs/fragments/user-accept-yescrypt-hash.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-minor_changes:
-- user - accept yescrypt hash as user password
diff --git a/changelogs/fragments/user_action_fix.yml b/changelogs/fragments/user_action_fix.yml
new file mode 100644
index 00000000000..64ee997d688
--- /dev/null
+++ b/changelogs/fragments/user_action_fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - user module now avoids changing ownership of files symlinked in provided home dir skeleton
diff --git a/changelogs/fragments/v2.17.0-initial-commit.yaml b/changelogs/fragments/v2.19.0-initial-commit.yaml
similarity index 100%
rename from changelogs/fragments/v2.17.0-initial-commit.yaml
rename to changelogs/fragments/v2.19.0-initial-commit.yaml
diff --git a/changelogs/fragments/wait_for_mmap.yml b/changelogs/fragments/wait_for_mmap.yml
deleted file mode 100644
index c5ee952fce5..00000000000
--- a/changelogs/fragments/wait_for_mmap.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
- - wait_for should not handle 'non mmapable files' again.
diff --git a/changelogs/fragments/winrm-send-input.yml b/changelogs/fragments/winrm-send-input.yml
deleted file mode 100644
index a007bbd48ad..00000000000
--- a/changelogs/fragments/winrm-send-input.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-bugfixes:
-- winrm - Better handle send input failures when communicating with hosts under load
diff --git a/changelogs/fragments/yum-removal.yml b/changelogs/fragments/yum-removal.yml
deleted file mode 100644
index d9f9dc3ff4a..00000000000
--- a/changelogs/fragments/yum-removal.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-removed_features:
- - "With the removal of Python 2 support, the yum module and yum action plugin are removed and redirected to ``dnf``."
diff --git a/hacking/README.md b/hacking/README.md
index 51f17202ed5..534a7e4db0e 100644
--- a/hacking/README.md
+++ b/hacking/README.md
@@ -5,7 +5,7 @@ env-setup
---------
The 'env-setup' script modifies your environment to allow you to run
-ansible from a git checkout using python >= 3.10.
+ansible from a git checkout using python >= 3.11.
First, set up your environment to run from the checkout:
@@ -18,7 +18,7 @@ and do not wish to install them from your operating system package manager, you
can install them from pip
```shell
-easy_install pip # if pip is not already available
+python -Im ensurepip # if pip is not already available
pip install -r requirements.txt
```
diff --git a/hacking/backport/README.md b/hacking/backport/README.md
index 3fb212b33e0..ce7112b22f9 100644
--- a/hacking/backport/README.md
+++ b/hacking/backport/README.md
@@ -4,7 +4,7 @@ This directory contains scripts useful for dealing with and maintaining
backports. Scripts in it depend on pygithub, and expect a valid environment
variable called `GITHUB_TOKEN`.
-To generate a Github token, go to https://github.com/settings/tokens/new
+To generate a Github token, go to
## `backport_of_line_adder.py`
diff --git a/hacking/env-setup b/hacking/env-setup
index 0a86e0fe4fb..df1ea4020f2 100644
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -57,22 +57,6 @@ expr "$PYTHONPATH" : "${ANSIBLE_TEST_PREFIX_PYTHONPATH}.*" > /dev/null || prepen
expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || prepend_path PATH "$PREFIX_PATH"
expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || prepend_path MANPATH "$PREFIX_MANPATH"
-#
-# Generate egg_info so that pkg_resources works
-#
-
-# Do the work in a function so we don't repeat ourselves later
-gen_egg_info()
-{
- # check for current and past egg-info directory names
- if ls "$PREFIX_PYTHONPATH"/ansible*.egg-info >/dev/null 2>&1; then
- # bypass shell aliases with leading backslash
- # see https://github.com/ansible/ansible/pull/11967
- \rm -rf "$PREFIX_PYTHONPATH"/ansible*.egg-info
- fi
- "$PYTHON_BIN" setup.py egg_info
-}
-
if [ "$ANSIBLE_DEV_HOME" != "$PWD" ] ; then
current_dir="$PWD"
else
@@ -81,10 +65,8 @@ fi
(
cd "$ANSIBLE_DEV_HOME"
if [ "$verbosity" = silent ] ; then
- gen_egg_info > /dev/null 2>&1 &
find . -type f -name "*.pyc" -exec rm -f {} \; > /dev/null 2>&1
else
- gen_egg_info
find . -type f -name "*.pyc" -exec rm -f {} \;
fi
cd "$current_dir"
diff --git a/hacking/env-setup.fish b/hacking/env-setup.fish
index 529b57333ef..ee945ec1452 100644
--- a/hacking/env-setup.fish
+++ b/hacking/env-setup.fish
@@ -64,25 +64,11 @@ if not set -q PYTHON_BIN
end
end
-# Generate egg_info so that pkg_resources works
-function gen_egg_info
- # Check if ansible*.egg-info directory exists and remove if found
- if test -d $PREFIX_PYTHONPATH/ansible*.egg-info
- rm -rf $PREFIX_PYTHONPATH/ansible*.egg-info
- end
- # Execute setup.py egg_info using the chosen Python interpreter
- eval $PYTHON_BIN setup.py egg_info
-end
-
pushd $ANSIBLE_HOME
if test -n "$QUIET"
- # Run gen_egg_info in the background and redirect output to /dev/null
- gen_egg_info &> /dev/null
# Remove any .pyc files found
find . -type f -name "*.pyc" -exec rm -f '{}' ';' &> /dev/null
else
- # Run gen_egg_info
- gen_egg_info
# Remove any .pyc files found
find . -type f -name "*.pyc" -exec rm -f '{}' ';'
# Display setup details
diff --git a/hacking/ticket_stubs/bug_internal_api.md b/hacking/ticket_stubs/bug_internal_api.md
index 76a3bb085ca..89162558ca0 100644
--- a/hacking/ticket_stubs/bug_internal_api.md
+++ b/hacking/ticket_stubs/bug_internal_api.md
@@ -13,11 +13,11 @@ but this does not seem to match that case.
If you really need a stable API target to use Ansible, consider using ansible-runner:
-* https://github.com/ansible/ansible-runner
+*
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/bug_wrong_repo.md b/hacking/ticket_stubs/bug_wrong_repo.md
index b711e85f3ae..ed115232a20 100644
--- a/hacking/ticket_stubs/bug_wrong_repo.md
+++ b/hacking/ticket_stubs/bug_wrong_repo.md
@@ -8,29 +8,28 @@ This appears to be something that should be filed against another project or bug
<< CHOOSE AS APPROPRIATE >>
-* https://github.com/ansible-community/ansible-lint
-* https://github.com/ansible/ansible-runner
-* https://github.com/ansible/ansible-navigator
-* https://github.com/ansible-community/antsibull
-* https://github.com/ansible-community/ara
-* https://github.com/ansible/awx
-* https://github.com/ansible-collections/community.general
-* https://github.com/ansible-community/molecule
-* For AAP or Tower licensees report issues via your Red Hat representative or https://issues.redhat.com
+*
+*
+*
+*
+*
+*
+*
+*
+* For AAP Customer issues please see
If you can stop by the tracker or forum for one of those projects, we'd appreciate it.
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
Should you still wish to discuss things further, or if you disagree with our thought process, please stop by one of our two mailing lists:
-* https://groups.google.com/forum/#!forum/ansible-devel
+* [ansible-core on the Ansible Forum](https://forum.ansible.com/tag/ansible-core)
* Matrix: [#devel:ansible.im](https://matrix.to/#/#devel:ansible.im)
-* IRC: #ansible-devel on [irc.libera.chat](https://libera.chat/)
We'd be happy to discuss things.
See this page for a complete list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again!
diff --git a/hacking/ticket_stubs/collections.md b/hacking/ticket_stubs/collections.md
index eecd8151f15..3698ea14bd9 100644
--- a/hacking/ticket_stubs/collections.md
+++ b/hacking/ticket_stubs/collections.md
@@ -2,15 +2,16 @@ Hi!
Thank you very much for your submission to Ansible. It means a lot to us that you've taken the time to contribute.
-Since Ansible 2.10 we are no longer accepting new modules/plugins into Ansible core. However, we recommend looking into providing this functionality through Ansible Galaxy via Ansible Collections. You can find more information about collections at:
+Since Ansible 2.10 we are no longer accepting new modules/plugins into Ansible core.
+However, we recommend looking into providing this functionality through Ansible Galaxy via Ansible Collections. You can find more information about collections at:
-* https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html.
+* .
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
The mailing list and irc are great ways to ask questions, or post if you don't think this particular issue is resolved.
See this page for a complete and up to date list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md b/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
index 3f4de70d1ff..708eedc53d1 100644
--- a/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
+++ b/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md
@@ -9,13 +9,13 @@ Assuming that you wanted to create actual contribution, I think that
you may want to learn and read through the following articles I've
gathered for you:
-• https://opensource.guide/how-to-contribute/
-• https://docs.ansible.com/ansible/devel/community/
+•
+•
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
- https://docs.ansible.com/ansible/latest/community/communication.html
+
Have a nice day!
diff --git a/hacking/ticket_stubs/no_thanks.md b/hacking/ticket_stubs/no_thanks.md
index 2e2143fe619..8c32b6bc4f9 100644
--- a/hacking/ticket_stubs/no_thanks.md
+++ b/hacking/ticket_stubs/no_thanks.md
@@ -11,8 +11,9 @@ However, we're absolutely always up for discussion.
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
-In the future, sometimes starting a discussion on the development list prior to implementing a feature can make getting things included a little easier, but it's not always necessary.
+In the future, sometimes starting a discussion on the development list prior to implementing
+a feature can make getting things included a little easier, but it's not always necessary.
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/pr_duplicate.md b/hacking/ticket_stubs/pr_duplicate.md
index 01a2a72809f..080e4e4abf1 100644
--- a/hacking/ticket_stubs/pr_duplicate.md
+++ b/hacking/ticket_stubs/pr_duplicate.md
@@ -15,6 +15,6 @@ In the future, sometimes starting a discussion on the development list prior to
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/ticket_stubs/pr_merged.md b/hacking/ticket_stubs/pr_merged.md
index 0183ee90630..5d354e3586f 100644
--- a/hacking/ticket_stubs/pr_merged.md
+++ b/hacking/ticket_stubs/pr_merged.md
@@ -1,7 +1,7 @@
Hi!
This has been merged in, and will also be included in the next major release.
-For more info on our process see https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html#ansible-core-workflow
+For more info on our process see
If you or anyone else has any further questions, please let us know by stopping by one of the mailing lists or chat channels, as appropriate.
@@ -10,6 +10,6 @@ The mailing list and irc are great ways to ask questions, or post if you don't t
See this page for a complete and up to date list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you!
diff --git a/hacking/ticket_stubs/proposal.md b/hacking/ticket_stubs/proposal.md
index 25d4cb403fe..2d8182f12be 100644
--- a/hacking/ticket_stubs/proposal.md
+++ b/hacking/ticket_stubs/proposal.md
@@ -3,16 +3,15 @@ Hi!
Ansible has a Proposal process for large feature ideas or changes in current design and functionality, such as this.
If you are still interested in seeing this new feature get into Ansible, please submit a proposal for it using this process.
-https://github.com/ansible/proposals/blob/master/proposals_process_proposal.md
+
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-The mailing list and irc are great ways to ask questions, or post if you don't think this particular issue is resolved.
+The Forum is the best ways to ask questions, or post if you don't think this particular issue is resolved.
-* #ansible-devel on [irc.libera.chat](https://libera.chat/)
-* https://groups.google.com/forum/#!forum/ansible-devel
+*
Or check this page for a more complete list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you!
diff --git a/hacking/ticket_stubs/question_not_bug.md b/hacking/ticket_stubs/question_not_bug.md
index f4b143fbb60..dab0d2edba1 100644
--- a/hacking/ticket_stubs/question_not_bug.md
+++ b/hacking/ticket_stubs/question_not_bug.md
@@ -2,14 +2,13 @@ Hi!
Thanks very much for your interest in Ansible. It means a lot to us.
-This appears to be a user question, and we'd like to direct these kinds of things to either the mailing list or the IRC channel.
+This appears to be a user question, and we'd like to direct these topic to the Ansible Forum.
-* IRC: #ansible on [irc.libera.chat](https://libera.chat/)
-* mailing list: https://groups.google.com/forum/#!forum/ansible-project
+* [Ansible Forum](https://forum.ansible.com)
-See this page for a complete and up to date list of communication channels and their purposes:
+See this page for a complete and up to date list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
If don't you think this particular issue is resolved, you should still stop by there first, we'd appreciate it.
diff --git a/hacking/ticket_stubs/resolved.md b/hacking/ticket_stubs/resolved.md
index 8eedbcfc156..f040d6d05a4 100644
--- a/hacking/ticket_stubs/resolved.md
+++ b/hacking/ticket_stubs/resolved.md
@@ -11,6 +11,6 @@ The mailing list and irc are great ways to ask questions, or post if you don't t
See this page for a complete list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you!
diff --git a/hacking/ticket_stubs/wider_discussion.md b/hacking/ticket_stubs/wider_discussion.md
index 74585816fc7..3ab9073f443 100644
--- a/hacking/ticket_stubs/wider_discussion.md
+++ b/hacking/ticket_stubs/wider_discussion.md
@@ -8,14 +8,13 @@ Reasons for this include:
* INSERT REASONS!
Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time.
-Can you please post on ansible-development list so we can talk about this idea with the wider group?
+Can you please post Ansible Forum so we can talk about this idea with the wider group?
-* https://groups.google.com/forum/#!forum/ansible-devel
+* [Ansible Core on the Ansible Forum](https://forum.ansible.com/tag/ansible-core)
* Matrix: [#devel:ansible.im](https://matrix.to/#/#devel:ansible.im)
-* #ansible-devel on [irc.libera.chat](https://libera.chat/)
For other alternatives, check this page for a more complete list of communication channels and their purposes:
-* https://docs.ansible.com/ansible/latest/community/communication.html
+*
Thank you once again for this and your interest in Ansible!
diff --git a/hacking/update-sanity-requirements.py b/hacking/update-sanity-requirements.py
index 997d6dbf87a..aaaa803cde8 100755
--- a/hacking/update-sanity-requirements.py
+++ b/hacking/update-sanity-requirements.py
@@ -52,7 +52,6 @@ class SanityTest:
if pip_freeze.stdout:
raise Exception(f'Initial virtual environment is not empty:\n{pip_freeze.stdout}')
- subprocess.run(pip + ['install', 'wheel'], env=env, check=True) # make bdist_wheel available during pip install
subprocess.run(pip + ['install', '-r', self.source_path], env=env, check=True)
freeze_options = ['--all']
diff --git a/lib/ansible/__main__.py b/lib/ansible/__main__.py
index cb7006285b4..afdd2849739 100644
--- a/lib/ansible/__main__.py
+++ b/lib/ansible/__main__.py
@@ -3,9 +3,6 @@
from __future__ import annotations
import argparse
-import importlib
-import os
-import sys
from importlib.metadata import distribution
@@ -19,22 +16,10 @@ def main():
ep_map = {_short_name(ep.name): ep for ep in dist.entry_points if ep.group == 'console_scripts'}
parser = argparse.ArgumentParser(prog='python -m ansible', add_help=False)
- parser.add_argument('entry_point', choices=list(ep_map) + ['test'])
+ parser.add_argument('entry_point', choices=list(ep_map))
args, extra = parser.parse_known_args()
- if args.entry_point == 'test':
- ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- source_root = os.path.join(ansible_root, 'test', 'lib')
-
- if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', '__init__.py')):
- # running from source, use that version of ansible-test instead of any version that may already be installed
- sys.path.insert(0, source_root)
-
- module = importlib.import_module('ansible_test._util.target.cli.ansible_test_cli_stub')
- main = module.main
- else:
- main = ep_map[args.entry_point].load()
-
+ main = ep_map[args.entry_point].load()
main([args.entry_point] + extra)
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
index 64ab78fb0ab..8b12aec17f4 100644
--- a/lib/ansible/cli/__init__.py
+++ b/lib/ansible/cli/__init__.py
@@ -11,9 +11,9 @@ import sys
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
-if sys.version_info < (3, 10):
+if sys.version_info < (3, 11):
raise SystemExit(
- 'ERROR: Ansible requires Python 3.10 or newer on the controller. '
+ 'ERROR: Ansible requires Python 3.11 or newer on the controller. '
'Current version: %s' % ''.join(sys.version.splitlines())
)
@@ -167,19 +167,7 @@ class CLI(ABC):
else:
display.v(u"No config file found; using defaults")
- # warn about deprecated config options
- for deprecated in C.config.DEPRECATED:
- name = deprecated[0]
- why = deprecated[1]['why']
- if 'alternatives' in deprecated[1]:
- alt = ', use %s instead' % deprecated[1]['alternatives']
- else:
- alt = ''
- ver = deprecated[1].get('version')
- date = deprecated[1].get('date')
- collection_name = deprecated[1].get('collection_name')
- display.deprecated("%s option, %s%s" % (name, why, alt),
- version=ver, date=date, collection_name=collection_name)
+ C.handle_config_noise(display)
@staticmethod
def split_vault_id(vault_id):
@@ -425,6 +413,10 @@ class CLI(ABC):
skip_tags.add(tag.strip())
options.skip_tags = list(skip_tags)
+ # Make sure path argument doesn't have a backslash
+ if hasattr(options, 'action') and options.action in ['install', 'download'] and hasattr(options, 'args'):
+ options.args = [path.rstrip("/") for path in options.args]
+
# process inventory options except for CLIs that require their own processing
if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
diff --git a/lib/ansible/cli/config.py b/lib/ansible/cli/config.py
index e7457c18f43..da9231af74b 100755
--- a/lib/ansible/cli/config.py
+++ b/lib/ansible/cli/config.py
@@ -9,9 +9,10 @@ from __future__ import annotations
from ansible.cli import CLI
import os
-import yaml
import shlex
import subprocess
+import sys
+import yaml
from collections.abc import Mapping
@@ -21,7 +22,7 @@ import ansible.plugins.loader as plugin_loader
from ansible import constants as C
from ansible.cli.arguments import option_helpers as opt_help
from ansible.config.manager import ConfigManager, Setting
-from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleRequiredOptionError
from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
from ansible.module_utils.common.json import json_dump
from ansible.module_utils.six import string_types
@@ -34,6 +35,9 @@ from ansible.utils.path import unfrackpath
display = Display()
+_IGNORE_CHANGED = frozenset({'_terms', '_input'})
+
+
def yaml_dump(data, default_flow_style=False, default_style=None):
return yaml.dump(data, Dumper=AnsibleDumper, default_flow_style=default_flow_style, default_style=default_style)
@@ -49,6 +53,37 @@ def get_constants():
return get_constants.cvars
+def _ansible_env_vars(varname):
+ ''' return true or false depending if variable name is possibly a 'configurable' ansible env variable '''
+ return all(
+ [
+ varname.startswith("ANSIBLE_"),
+ not varname.startswith(("ANSIBLE_TEST_", "ANSIBLE_LINT_")),
+ varname not in ("ANSIBLE_CONFIG", "ANSIBLE_DEV_HOME"),
+ ]
+ )
+
+
+def _get_evar_list(settings):
+ data = []
+ for setting in settings:
+ if 'env' in settings[setting] and settings[setting]['env']:
+ for varname in settings[setting]['env']:
+ data.append(varname.get('name'))
+ return data
+
+
+def _get_ini_entries(settings):
+ data = {}
+ for setting in settings:
+ if 'ini' in settings[setting] and settings[setting]['ini']:
+ for kv in settings[setting]['ini']:
+ if not kv['section'] in data:
+ data[kv['section']] = set()
+ data[kv['section']].add(kv['key'])
+ return data
+
+
class ConfigCLI(CLI):
""" Config command line class """
@@ -99,9 +134,13 @@ class ConfigCLI(CLI):
init_parser.add_argument('--disabled', dest='commented', action='store_true', default=False,
help='Prefixes all entries with a comment character to disable them')
- # search_parser = subparsers.add_parser('find', help='Search configuration')
- # search_parser.set_defaults(func=self.execute_search)
- # search_parser.add_argument('args', help='Search term', metavar='')
+ validate_parser = subparsers.add_parser('validate',
+ help='Validate the configuration file and environment variables. '
+ 'By default it only checks the base settings without accounting for plugins (see -t).',
+ parents=[common])
+ validate_parser.set_defaults(func=self.execute_validate)
+ validate_parser.add_argument('--format', '-f', dest='format', action='store', choices=['ini', 'env'] , default='ini',
+ help='Output format for init')
def post_process_args(self, options):
options = super(ConfigCLI, self).post_process_args(options)
@@ -113,6 +152,10 @@ class ConfigCLI(CLI):
super(ConfigCLI, self).run()
+ # initialize each galaxy server's options from known listed servers
+ self._galaxy_servers = [s for s in C.GALAXY_SERVER_LIST or [] if s] # clean list, reused later here
+ C.config.load_galaxy_server_defs(self._galaxy_servers)
+
if context.CLIARGS['config_file']:
self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False)
b_config = to_bytes(self.config_file)
@@ -226,11 +269,17 @@ class ConfigCLI(CLI):
'''
build a dict with the list requested configs
'''
+
config_entries = {}
if context.CLIARGS['type'] in ('base', 'all'):
# this dumps main/common configs
config_entries = self.config.get_configuration_definitions(ignore_private=True)
+ # for base and all, we include galaxy servers
+ config_entries['GALAXY_SERVERS'] = {}
+ for server in self._galaxy_servers:
+ config_entries['GALAXY_SERVERS'][server] = self.config.get_configuration_definitions('galaxy_server', server)
+
if context.CLIARGS['type'] != 'base':
config_entries['PLUGINS'] = {}
@@ -239,6 +288,7 @@ class ConfigCLI(CLI):
for ptype in C.CONFIGURABLE_PLUGINS:
config_entries['PLUGINS'][ptype.upper()] = self._list_plugin_settings(ptype)
elif context.CLIARGS['type'] != 'base':
+ # only for requested types
config_entries['PLUGINS'][context.CLIARGS['type']] = self._list_plugin_settings(context.CLIARGS['type'], context.CLIARGS['args'])
return config_entries
@@ -269,7 +319,7 @@ class ConfigCLI(CLI):
if not settings[setting].get('description'):
continue
- default = settings[setting].get('default', '')
+ default = self.config.template_default(settings[setting].get('default', ''), get_constants())
if subkey == 'env':
stype = settings[setting].get('type', '')
if stype == 'boolean':
@@ -351,14 +401,14 @@ class ConfigCLI(CLI):
if entry['key'] not in seen[entry['section']]:
seen[entry['section']].append(entry['key'])
- default = opt.get('default', '')
+ default = self.config.template_default(opt.get('default', ''), get_constants())
if opt.get('type', '') == 'list' and not isinstance(default, string_types):
# python lists are not valid ini ones
default = ', '.join(default)
elif default is None:
default = ''
- if context.CLIARGS['commented']:
+ if context.CLIARGS.get('commented', False):
entry['key'] = ';%s' % entry['key']
key = desc + '\n%s=%s' % (entry['key'], default)
@@ -408,19 +458,21 @@ class ConfigCLI(CLI):
entries = []
for setting in sorted(config):
- changed = (config[setting].origin not in ('default', 'REQUIRED'))
+ changed = (config[setting].origin not in ('default', 'REQUIRED') and setting not in _IGNORE_CHANGED)
if context.CLIARGS['format'] == 'display':
if isinstance(config[setting], Setting):
# proceed normally
- if config[setting].origin == 'default':
+ value = config[setting].value
+ if config[setting].origin == 'default' or setting in _IGNORE_CHANGED:
color = 'green'
+ value = self.config.template_default(value, get_constants())
elif config[setting].origin == 'REQUIRED':
# should include '_terms', '_input', etc
color = 'red'
else:
color = 'yellow'
- msg = "%s(%s) = %s" % (setting, config[setting].origin, config[setting].value)
+ msg = "%s(%s) = %s" % (setting, config[setting].origin, value)
else:
color = 'green'
msg = "%s(%s) = %s" % (setting, 'default', config[setting].get('default'))
@@ -429,6 +481,8 @@ class ConfigCLI(CLI):
else:
entry = {}
for key in config[setting]._fields:
+ if key == 'type':
+ continue
entry[key] = getattr(config[setting], key)
if not context.CLIARGS['only_changed'] or changed:
@@ -437,7 +491,10 @@ class ConfigCLI(CLI):
return entries
def _get_global_configs(self):
- config = self.config.get_configuration_definitions(ignore_private=True).copy()
+
+ # Add base
+ config = self.config.get_configuration_definitions(ignore_private=True)
+ # convert to settings
for setting in config.keys():
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, variables=get_constants())
config[setting] = Setting(setting, v, o, None)
@@ -449,7 +506,7 @@ class ConfigCLI(CLI):
# prep loading
loader = getattr(plugin_loader, '%s_loader' % ptype)
- # acumulators
+ # accumulators
output = []
config_entries = {}
@@ -466,7 +523,7 @@ class ConfigCLI(CLI):
plugin_cs = loader.all(class_only=True)
for plugin in plugin_cs:
- # in case of deprecastion they diverge
+ # in case of deprecation they diverge
finalname = name = plugin._load_name
if name.startswith('_'):
if os.path.islink(plugin._original_path):
@@ -489,12 +546,9 @@ class ConfigCLI(CLI):
for setting in config_entries[finalname].keys():
try:
v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, plugin_type=ptype, plugin_name=name, variables=get_constants())
- except AnsibleError as e:
- if to_text(e).startswith('No setting was provided for required configuration'):
- v = None
- o = 'REQUIRED'
- else:
- raise e
+ except AnsibleRequiredOptionError:
+ v = None
+ o = 'REQUIRED'
if v is None and o is None:
# not all cases will be error
@@ -514,17 +568,60 @@ class ConfigCLI(CLI):
return output
+ def _get_galaxy_server_configs(self):
+
+ output = []
+ # add galaxy servers
+ for server in self._galaxy_servers:
+ server_config = {}
+ s_config = self.config.get_configuration_definitions('galaxy_server', server)
+ for setting in s_config.keys():
+ try:
+ v, o = C.config.get_config_value_and_origin(setting, plugin_type='galaxy_server', plugin_name=server, cfile=self.config_file)
+ except AnsibleError as e:
+ if s_config[setting].get('required', False):
+ v = None
+ o = 'REQUIRED'
+ else:
+ raise e
+ if v is None and o is None:
+ # not all cases will be error
+ o = 'REQUIRED'
+ server_config[setting] = Setting(setting, v, o, None)
+ if context.CLIARGS['format'] == 'display':
+ if not context.CLIARGS['only_changed'] or server_config:
+ equals = '=' * len(server)
+ output.append(f'\n{server}\n{equals}')
+ output.extend(self._render_settings(server_config))
+ else:
+ output.append({server: server_config})
+
+ return output
+
def execute_dump(self):
'''
Shows the current settings, merges ansible.cfg if specified
'''
- if context.CLIARGS['type'] == 'base':
- # deal with base
- output = self._get_global_configs()
- elif context.CLIARGS['type'] == 'all':
+ output = []
+ if context.CLIARGS['type'] in ('base', 'all'):
# deal with base
output = self._get_global_configs()
- # deal with plugins
+
+ # add galaxy servers
+ server_config_list = self._get_galaxy_server_configs()
+ if context.CLIARGS['format'] == 'display':
+ output.append('\nGALAXY_SERVERS:\n')
+ output.extend(server_config_list)
+ else:
+ configs = {}
+ for server_config in server_config_list:
+ server = list(server_config.keys())[0]
+ server_reduced_config = server_config.pop(server)
+ configs[server] = server_reduced_config
+ output.append({'GALAXY_SERVERS': configs})
+
+ if context.CLIARGS['type'] == 'all':
+ # add all plugins
for ptype in C.CONFIGURABLE_PLUGINS:
plugin_list = self._get_plugin_configs(ptype, context.CLIARGS['args'])
if context.CLIARGS['format'] == 'display':
@@ -537,8 +634,9 @@ class ConfigCLI(CLI):
else:
pname = '%s_PLUGINS' % ptype.upper()
output.append({pname: plugin_list})
- else:
- # deal with plugins
+
+ elif context.CLIARGS['type'] != 'base':
+ # deal with specific plugin
output = self._get_plugin_configs(context.CLIARGS['type'], context.CLIARGS['args'])
if context.CLIARGS['format'] == 'display':
@@ -550,6 +648,73 @@ class ConfigCLI(CLI):
self.pager(to_text(text, errors='surrogate_or_strict'))
+ def execute_validate(self):
+
+ found = False
+ config_entries = self._list_entries_from_args()
+ plugin_types = config_entries.pop('PLUGINS', None)
+ galaxy_servers = config_entries.pop('GALAXY_SERVERS', None)
+
+ if context.CLIARGS['format'] == 'ini':
+ if C.CONFIG_FILE is not None:
+ # validate ini config since it is found
+
+ sections = _get_ini_entries(config_entries)
+ # Also from plugins
+ if plugin_types:
+ for ptype in plugin_types:
+ for plugin in plugin_types[ptype].keys():
+ plugin_sections = _get_ini_entries(plugin_types[ptype][plugin])
+ for s in plugin_sections:
+ if s in sections:
+ sections[s].update(plugin_sections[s])
+ else:
+ sections[s] = plugin_sections[s]
+ if galaxy_servers:
+ for server in galaxy_servers:
+ server_sections = _get_ini_entries(galaxy_servers[server])
+ for s in server_sections:
+ if s in sections:
+ sections[s].update(server_sections[s])
+ else:
+ sections[s] = server_sections[s]
+ if sections:
+ p = C.config._parsers[C.CONFIG_FILE]
+ for s in p.sections():
+ # check for valid sections
+ if s not in sections:
+ display.error(f"Found unknown section '{s}' in '{C.CONFIG_FILE}.")
+ found = True
+ continue
+
+ # check keys in valid sections
+ for k in p.options(s):
+ if k not in sections[s]:
+ display.error(f"Found unknown key '{k}' in section '{s}' in '{C.CONFIG_FILE}.")
+ found = True
+
+ elif context.CLIARGS['format'] == 'env':
+ # validate any 'ANSIBLE_' env vars found
+ evars = [varname for varname in os.environ.keys() if _ansible_env_vars(varname)]
+ if evars:
+ data = _get_evar_list(config_entries)
+ if plugin_types:
+ for ptype in plugin_types:
+ for plugin in plugin_types[ptype].keys():
+ data.extend(_get_evar_list(plugin_types[ptype][plugin]))
+
+ for evar in evars:
+ if evar not in data:
+ display.error(f"Found unknown environment variable '{evar}'.")
+ found = True
+
+ # we found discrepancies!
+ if found:
+ sys.exit(1)
+
+ # allsgood
+ display.display("All configurations seem valid!")
+
def main(args=None):
ConfigCLI.cli_executor(args)
diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py
index 5805b97fce8..60f9cdd84a7 100755
--- a/lib/ansible/cli/console.py
+++ b/lib/ansible/cli/console.py
@@ -545,7 +545,7 @@ class ConsoleCLI(CLI, cmd.Cmd):
if path:
module_loader.add_directory(path)
- # dynamically add 'cannonical' modules as commands, aliases coudld be used and dynamically loaded
+ # dynamically add 'canonical' modules as commands, aliases could be used and dynamically loaded
self.modules = self.list_modules()
for module in self.modules:
setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
index 8d07391d4f0..a6a73b50b7b 100755
--- a/lib/ansible/cli/doc.py
+++ b/lib/ansible/cli/doc.py
@@ -50,7 +50,7 @@ PB_OBJECTS = ['Play', 'Role', 'Block', 'Task', 'Handler']
PB_LOADED = {}
SNIPPETS = ['inventory', 'lookup', 'module']
-# harcoded from ascii values
+# hardcoded from ascii values
STYLE = {
'BLINK': '\033[5m',
'BOLD': '\033[1m',
@@ -71,8 +71,14 @@ NOCOLOR = {
'PLUGIN': r'[%s]',
}
-# TODO: make configurable
-ref_style = {'MODULE': 'yellow', 'REF': 'magenta', 'LINK': 'cyan', 'DEP': 'magenta', 'CONSTANT': 'dark gray', 'PLUGIN': 'yellow'}
+ref_style = {
+ 'MODULE': C.COLOR_DOC_MODULE,
+ 'REF': C.COLOR_DOC_REFERENCE,
+ 'LINK': C.COLOR_DOC_LINK,
+ 'DEP': C.COLOR_DOC_DEPRECATED,
+ 'CONSTANT': C.COLOR_DOC_CONSTANT,
+ 'PLUGIN': C.COLOR_DOC_PLUGIN,
+}
def jdump(text):
@@ -381,6 +387,12 @@ class RoleMixin(object):
for role, collection, role_path in (roles | collroles):
argspec = self._load_argspec(role, role_path, collection)
+ if 'error' in argspec:
+ if fail_on_errors:
+ raise argspec['exception']
+ else:
+ display.warning('Skipping role (%s) due to: %s' % (role, argspec['error']), True)
+ continue
fqcn, doc = self._build_doc(role, role_path, collection, argspec, entry_point)
if doc:
result[fqcn] = doc
@@ -881,6 +893,7 @@ class DocCLI(CLI, RoleMixin):
plugin_type = context.CLIARGS['type'].lower()
do_json = context.CLIARGS['json_format'] or context.CLIARGS['dump']
listing = context.CLIARGS['list_files'] or context.CLIARGS['list_dir']
+ no_fail = bool(not context.CLIARGS['no_fail_on_errors'])
if context.CLIARGS['list_files']:
content = 'files'
@@ -903,7 +916,6 @@ class DocCLI(CLI, RoleMixin):
docs['all'] = {}
for ptype in ptypes:
- no_fail = bool(not context.CLIARGS['no_fail_on_errors'])
if ptype == 'role':
roles = self._create_role_list(fail_on_errors=no_fail)
docs['all'][ptype] = self._create_role_doc(roles.keys(), context.CLIARGS['entry_point'], fail_on_errors=no_fail)
@@ -929,7 +941,7 @@ class DocCLI(CLI, RoleMixin):
if plugin_type == 'keyword':
docs = DocCLI._get_keywords_docs(context.CLIARGS['args'])
elif plugin_type == 'role':
- docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'])
+ docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'], fail_on_errors=no_fail)
else:
# display specific plugin docs
docs = self._get_plugins_docs(plugin_type, context.CLIARGS['args'])
@@ -1083,7 +1095,7 @@ class DocCLI(CLI, RoleMixin):
text = DocCLI.get_man_text(doc, collection_name, plugin_type)
except Exception as e:
display.vvv(traceback.format_exc())
- raise AnsibleError("Unable to retrieve documentation from '%s' due to: %s" % (plugin, to_native(e)), orig_exc=e)
+ raise AnsibleError("Unable to retrieve documentation from '%s'" % (plugin), orig_exc=e)
return text
@@ -1189,7 +1201,7 @@ class DocCLI(CLI, RoleMixin):
opt_leadin = "-"
key = "%s%s %s" % (base_indent, opt_leadin, _format(o, 'yellow'))
- # description is specifically formated and can either be string or list of strings
+ # description is specifically formatted and can either be string or list of strings
if 'description' not in opt:
raise AnsibleError("All (sub-)options and return values must have a 'description' field")
text.append('')
@@ -1296,16 +1308,35 @@ class DocCLI(CLI, RoleMixin):
if doc.get('description'):
if isinstance(doc['description'], list):
- desc = " ".join(doc['description'])
+ descs = doc['description']
else:
- desc = doc['description']
- text.append("%s" % DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
+ descs = [doc['description']]
+ for desc in descs:
+ text.append("%s" % DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
text.append('')
if doc.get('options'):
- text.append(_format("Options", 'bold') + " (%s inicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
+ text.append(_format("Options", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
+ if doc.get('attributes', False):
+ display.deprecated(
+ f'The role {role}\'s argument spec {entry_point} contains the key "attributes", '
+ 'which will not be displayed by ansible-doc in the future. '
+ 'This was unintentionally allowed when plugin attributes were added, '
+ 'but the feature does not map well to role argument specs.',
+ version='2.20',
+ collection_name='ansible.builtin',
+ )
+ text.append("")
+ text.append(_format("ATTRIBUTES:", 'bold'))
+ for k in doc['attributes'].keys():
+ text.append('')
+ text.append(DocCLI.warp_fill(DocCLI.tty_ify(_format('%s:' % k, 'UNDERLINE')), limit - 6, initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc['attributes'][k]), opt_indent))
+ del doc['attributes']
+
# generic elements we will handle identically
for k in ('author',):
if k not in doc:
@@ -1320,6 +1351,17 @@ class DocCLI(CLI, RoleMixin):
# use empty indent since this affects the start of the yaml doc, not it's keys
text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), ''))
+ if doc.get('examples', False):
+ text.append('')
+ text.append(_format("EXAMPLES:", 'bold'))
+ if isinstance(doc['examples'], string_types):
+ text.append(doc.pop('examples').strip())
+ else:
+ try:
+ text.append(yaml_dump(doc.pop('examples'), indent=2, default_flow_style=False))
+ except Exception as e:
+ raise AnsibleParserError("Unable to parse examples section", orig_exc=e)
+
return text
@staticmethod
@@ -1337,12 +1379,13 @@ class DocCLI(CLI, RoleMixin):
text.append("> %s %s (%s)" % (plugin_type.upper(), _format(doc.pop('plugin_name'), 'bold'), doc.pop('filename')))
if isinstance(doc['description'], list):
- desc = " ".join(doc.pop('description'))
+ descs = doc.pop('description')
else:
- desc = doc.pop('description')
+ descs = [doc.pop('description')]
text.append('')
- text.append(DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=base_indent, subsequent_indent=base_indent))
+ for desc in descs:
+ text.append(DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=base_indent, subsequent_indent=base_indent))
if display.verbosity > 0:
doc['added_in'] = DocCLI._format_version_added(doc.pop('version_added', 'historical'), doc.pop('version_added_collection', 'ansible-core'))
@@ -1350,16 +1393,15 @@ class DocCLI(CLI, RoleMixin):
if doc.get('deprecated', False):
text.append(_format("DEPRECATED: ", 'bold', 'DEP'))
if isinstance(doc['deprecated'], dict):
- if 'removed_at_date' in doc['deprecated']:
- text.append(
- "\tReason: %(why)s\n\tWill be removed in a release after %(removed_at_date)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated')
- )
- else:
- if 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
- doc['deprecated']['removed_in'] = doc['deprecated']['version']
- text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
+ if 'removed_at_date' not in doc['deprecated'] and 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
+ doc['deprecated']['removed_in'] = doc['deprecated']['version']
+ try:
+ text.append('\t' + C.config.get_deprecated_msg_from_config(doc['deprecated'], True))
+ except KeyError as e:
+ raise AnsibleError("Invalid deprecation documentation structure", orig_exc=e)
else:
- text.append("%s" % doc.pop('deprecated'))
+ text.append("%s" % doc['deprecated'])
+ del doc['deprecated']
if doc.pop('has_action', False):
text.append("")
@@ -1367,7 +1409,7 @@ class DocCLI(CLI, RoleMixin):
if doc.get('options', False):
text.append("")
- text.append(_format("OPTIONS", 'bold') + " (%s inicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
+ text.append(_format("OPTIONS", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent, man=(display.verbosity == 0))
if doc.get('attributes', False):
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
index 7803bdd9a3f..17c923209d4 100755
--- a/lib/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -55,37 +55,16 @@ from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.dataloader import DataLoader
-from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.template import Templar
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
+from ansible.utils.vars import load_extra_vars
display = Display()
urlparse = six.moves.urllib.parse.urlparse
-# config definition by position: name, required, type
-SERVER_DEF = [
- ('url', True, 'str'),
- ('username', False, 'str'),
- ('password', False, 'str'),
- ('token', False, 'str'),
- ('auth_url', False, 'str'),
- ('api_version', False, 'int'),
- ('validate_certs', False, 'bool'),
- ('client_id', False, 'str'),
- ('timeout', False, 'int'),
-]
-
-# config definition fields
-SERVER_ADDITIONAL = {
- 'api_version': {'default': None, 'choices': [2, 3]},
- 'validate_certs': {'cli': [{'name': 'validate_certs'}]},
- 'timeout': {'default': C.GALAXY_SERVER_TIMEOUT, 'cli': [{'name': 'timeout'}]},
- 'token': {'default': None},
-}
-
def with_collection_artifacts_manager(wrapped_method):
"""Inject an artifacts manager if not passed explicitly.
@@ -366,6 +345,7 @@ class GalaxyCLI(CLI):
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
+ opt_help.add_runtask_options(init_parser)
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
@@ -488,12 +468,31 @@ class GalaxyCLI(CLI):
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
- args_kwargs['help'] = 'Role name, URL or tar file'
+ args_kwargs['help'] = 'Role name, URL or tar file. This is mutually exclusive with -r.'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
+ if self._implicit_role:
+ # might install both roles and collections
+ description_text = (
+ 'Install roles and collections from file(s), URL(s) or Ansible '
+ 'Galaxy to the first entry in the config COLLECTIONS_PATH for collections '
+ 'and first entry in the config ROLES_PATH for roles. '
+ 'The first entry in the config ROLES_PATH can be overridden by --roles-path '
+ 'or -p, but this will result in only roles being installed.'
+ )
+ prog = 'ansible-galaxy install'
+ else:
+ prog = f"ansible-galaxy {galaxy_type} install"
+ description_text = (
+ 'Install {0}(s) from file(s), URL(s) or Ansible '
+ 'Galaxy to the first entry in the config {1}S_PATH '
+ 'unless overridden by --{0}s-path.'.format(galaxy_type, galaxy_type.upper())
+ )
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
- 'Galaxy'.format(galaxy_type))
+ 'Galaxy'.format(galaxy_type),
+ description=description_text,
+ prog=prog,)
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
@@ -546,8 +545,12 @@ class GalaxyCLI(CLI):
'This does not apply to collections in remote Git repositories or URLs to remote tarballs.'
)
else:
- install_parser.add_argument('-r', '--role-file', dest='requirements',
- help='A file containing a list of roles to be installed.')
+ if self._implicit_role:
+ install_parser.add_argument('-r', '--role-file', dest='requirements',
+ help='A file containing a list of collections and roles to be installed.')
+ else:
+ install_parser.add_argument('-r', '--role-file', dest='requirements',
+ help='A file containing a list of roles to be installed.')
r_re = re.compile(r'^(?] for the values url, username, password, and token.
- config_dict = dict((k, server_config_def(server_key, k, req, ensure_type)) for k, req, ensure_type in SERVER_DEF)
- defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
- C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
# resolve the config created options above with existing config and user options
- server_options = C.config.get_plugin_options('galaxy_server', server_key)
+ server_options = C.config.get_plugin_options(plugin_type='galaxy_server', name=server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
@@ -1171,6 +1150,7 @@ class GalaxyCLI(CLI):
)
loader = DataLoader()
+ inject_data.update(load_extra_vars(loader))
templar = Templar(loader, variables=inject_data)
# create role directory
@@ -1214,7 +1194,11 @@ class GalaxyCLI(CLI):
src_template = os.path.join(root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
- b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
+ try:
+ b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
+ except AnsibleError as e:
+ shutil.rmtree(b_obj_path)
+ raise AnsibleError(f"Failed to create {galaxy_type.title()} {obj_name}. Templating {src_template} failed with the error: {e}") from e
with open(dest_file, 'wb') as df:
df.write(b_rendered)
else:
diff --git a/lib/ansible/cli/inventory.py b/lib/ansible/cli/inventory.py
index c92b1300cb9..6a4ee9241f7 100755
--- a/lib/ansible/cli/inventory.py
+++ b/lib/ansible/cli/inventory.py
@@ -24,26 +24,6 @@ from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_
display = Display()
-INTERNAL_VARS = frozenset(['ansible_diff_mode',
- 'ansible_config_file',
- 'ansible_facts',
- 'ansible_forks',
- 'ansible_inventory_sources',
- 'ansible_limit',
- 'ansible_playbook_python',
- 'ansible_run_tags',
- 'ansible_skip_tags',
- 'ansible_verbosity',
- 'ansible_version',
- 'inventory_dir',
- 'inventory_file',
- 'inventory_hostname',
- 'inventory_hostname_short',
- 'groups',
- 'group_names',
- 'omit',
- 'playbook_dir', ])
-
class InventoryCLI(CLI):
''' used to display or dump the configured inventory as Ansible sees it '''
@@ -93,12 +73,12 @@ class InventoryCLI(CLI):
# list
self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
- help="When doing an --list, represent in a way that is optimized for export,"
+ help="When doing --list, represent in a way that is optimized for export,"
"not as an accurate representation of how Ansible has processed it")
self.parser.add_argument('--output', default=None, dest='output_file',
help="When doing --list, send the inventory to a file instead of to the screen")
# self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
- # help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
+ # help="When doing --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
def post_process_args(self, options):
options = super(InventoryCLI, self).post_process_args(options)
@@ -245,7 +225,7 @@ class InventoryCLI(CLI):
@staticmethod
def _remove_internal(dump):
- for internal in INTERNAL_VARS:
+ for internal in C.INTERNAL_STATIC_VARS:
if internal in dump:
del dump[internal]
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
index fb3321efa9a..eb8436636e2 100755
--- a/lib/ansible/cli/pull.py
+++ b/lib/ansible/cli/pull.py
@@ -12,7 +12,7 @@ from ansible.cli import CLI
import datetime
import os
import platform
-import random
+import secrets
import shlex
import shutil
import socket
@@ -140,7 +140,7 @@ class PullCLI(CLI):
if options.sleep:
try:
- secs = random.randint(0, int(options.sleep))
+ secs = secrets.randbelow(int(options.sleep))
options.sleep = secs
except ValueError:
raise AnsibleOptionsError("%s is not a number." % options.sleep)
diff --git a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
old mode 100755
new mode 100644
index 9455b9851a9..701dcdaa198
--- a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
+++ b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
@@ -1,10 +1,8 @@
-#!/usr/bin/env python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
import fcntl
-import hashlib
import io
import os
import pickle
@@ -40,13 +38,6 @@ def read_stream(byte_stream):
if len(data) < size:
raise Exception("EOF found before data was complete")
- data_hash = to_text(byte_stream.readline().strip())
- if data_hash != hashlib.sha1(data).hexdigest():
- raise Exception("Read {0} bytes, but data did not match checksum".format(size))
-
- # restore escaped loose \r characters
- data = data.replace(br'\r', b'\r')
-
return data
@@ -221,7 +212,7 @@ def main(args=None):
""" Called to initiate the connect to the remote device
"""
- parser = opt_help.create_base_parser(prog='ansible-connection')
+ parser = opt_help.create_base_parser(prog=None)
opt_help.add_verbosity_options(parser)
parser.add_argument('playbook_pid')
parser.add_argument('task_uuid')
diff --git a/lib/ansible/config/base.yml b/lib/ansible/config/base.yml
index 079cb55c0d7..ed7007b3d1e 100644
--- a/lib/ansible/config/base.yml
+++ b/lib/ansible/config/base.yml
@@ -1,6 +1,14 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
+_ANSIBLE_CONNECTION_PATH:
+ env:
+ - name: _ANSIBLE_CONNECTION_PATH
+ name: Overrides the location of the Ansible persistent connection helper script.
+ description:
+ - For internal use only.
+ type: path
+ version_added: "2.18"
ANSIBLE_HOME:
name: The Ansible home path
description:
@@ -25,6 +33,9 @@ ANSIBLE_CONNECTION_PATH:
- {key: ansible_connection_path, section: persistent_connection}
yaml: {key: persistent_connection.ansible_connection_path}
version_added: "2.8"
+ deprecated:
+ why: This setting has no effect.
+ version: "2.22"
ANSIBLE_COW_SELECTION:
name: Cowsay filter selection
default: default
@@ -293,6 +304,14 @@ COLOR_HIGHLIGHT:
env: [{name: ANSIBLE_COLOR_HIGHLIGHT}]
ini:
- {key: highlight, section: colors}
+COLOR_INCLUDED:
+ name: Color for 'included' task status
+ default: cyan
+ description: Defines the color to use when showing 'Included' task status.
+ env: [{name: ANSIBLE_COLOR_INCLUDED}]
+ ini:
+ - {key: included, section: colors}
+ version_added: '2.18'
COLOR_OK:
name: Color for 'ok' task status
default: green
@@ -328,6 +347,54 @@ COLOR_WARN:
env: [{name: ANSIBLE_COLOR_WARN}]
ini:
- {key: warn, section: colors}
+COLOR_DOC_MODULE:
+ name: Color for module name in the ansible-doc output
+ default: yellow
+ description: Defines the color to use when emitting a module name in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_MODULE}]
+ ini:
+ - {key: doc_module, section: colors}
+ version_added: '2.18'
+COLOR_DOC_REFERENCE:
+ name: Color for cross-reference in the ansible-doc output
+ default: magenta
+ description: Defines the color to use when emitting cross-reference in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_REFERENCE}]
+ ini:
+ - {key: doc_reference, section: colors}
+ version_added: '2.18'
+COLOR_DOC_LINK:
+ name: Color for Link in ansible-doc output
+ default: cyan
+ description: Defines the color to use when emitting a link in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_LINK}]
+ ini:
+ - {key: doc_link, section: colors}
+ version_added: '2.18'
+COLOR_DOC_DEPRECATED:
+ name: Color for deprecated value in ansible-doc output
+ default: magenta
+ description: Defines the color to use when emitting a deprecated value in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_DEPRECATED}]
+ ini:
+ - {key: doc_deprecated, section: colors}
+ version_added: '2.18'
+COLOR_DOC_CONSTANT:
+ name: Color for constant in ansible-doc output
+ default: dark gray
+ description: Defines the color to use when emitting a constant in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_CONSTANT}]
+ ini:
+ - {key: doc_constant, section: colors}
+ version_added: '2.18'
+COLOR_DOC_PLUGIN:
+ name: Color for the plugin in ansible-doc output
+ default: yellow
+ description: Defines the color to use when emitting a plugin name in the ansible-doc output.
+ env: [{name: ANSIBLE_COLOR_DOC_PLUGIN}]
+ ini:
+ - {key: doc_plugin, section: colors}
+ version_added: '2.18'
CONNECTION_PASSWORD_FILE:
name: Connection password file
default: ~
@@ -582,24 +649,6 @@ DEFAULT_EXECUTABLE:
env: [{name: ANSIBLE_EXECUTABLE}]
ini:
- {key: executable, section: defaults}
-DEFAULT_FACT_PATH:
- name: local fact path
- description:
- - "This option allows you to globally configure a custom path for 'local_facts' for the implied :ref:`ansible_collections.ansible.builtin.setup_module` task when using fact gathering."
- - "If not set, it will fall back to the default from the ``ansible.builtin.setup`` module: ``/etc/ansible/facts.d``."
- - "This does **not** affect user defined tasks that use the ``ansible.builtin.setup`` module."
- - The real action being created by the implicit task is currently ``ansible.legacy.gather_facts`` module, which then calls the configured fact modules,
- by default this will be ``ansible.builtin.setup`` for POSIX systems but other platforms might have different defaults.
- env: [{name: ANSIBLE_FACT_PATH}]
- ini:
- - {key: fact_path, section: defaults}
- type: string
- deprecated:
- # TODO: when removing set playbook/play.py to default=None
- why: the module_defaults keyword is a more generic version and can apply to all calls to the
- M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
- version: "2.18"
- alternatives: module_defaults
DEFAULT_FILTER_PLUGIN_PATH:
name: Jinja2 Filter Plugins Path
default: '{{ ANSIBLE_HOME ~ "/plugins/filter:/usr/share/ansible/plugins/filter" }}'
@@ -643,39 +692,6 @@ DEFAULT_GATHERING:
implicit: "the cache plugin will be ignored and facts will be gathered per play unless 'gather_facts: False' is set."
explicit: facts will not be gathered unless directly requested in the play.
smart: each new host that has no facts discovered will be scanned, but if the same host is addressed in multiple plays it will not be contacted again in the run.
-DEFAULT_GATHER_SUBSET:
- name: Gather facts subset
- description:
- - Set the `gather_subset` option for the :ref:`ansible_collections.ansible.builtin.setup_module` task in the implicit fact gathering.
- See the module documentation for specifics.
- - "It does **not** apply to user defined ``ansible.builtin.setup`` tasks."
- env: [{name: ANSIBLE_GATHER_SUBSET}]
- ini:
- - key: gather_subset
- section: defaults
- version_added: "2.1"
- type: list
- deprecated:
- # TODO: when removing set playbook/play.py to default=None
- why: the module_defaults keyword is a more generic version and can apply to all calls to the
- M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
- version: "2.18"
- alternatives: module_defaults
-DEFAULT_GATHER_TIMEOUT:
- name: Gather facts timeout
- description:
- - Set the timeout in seconds for the implicit fact gathering, see the module documentation for specifics.
- - "It does **not** apply to user defined :ref:`ansible_collections.ansible.builtin.setup_module` tasks."
- env: [{name: ANSIBLE_GATHER_TIMEOUT}]
- ini:
- - {key: gather_timeout, section: defaults}
- type: integer
- deprecated:
- # TODO: when removing set playbook/play.py to default=None
- why: the module_defaults keyword is a more generic version and can apply to all calls to the
- M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
- version: "2.18"
- alternatives: module_defaults
DEFAULT_HASH_BEHAVIOUR:
name: Hash merge behaviour
default: replace
@@ -810,7 +826,9 @@ DEFAULT_LOCAL_TMP:
DEFAULT_LOG_PATH:
name: Ansible log file path
default: ~
- description: File to which Ansible will log on the controller. When empty logging is disabled.
+ description:
+ - File to which Ansible will log on the controller.
+ - When not set the logging is disabled.
env: [{name: ANSIBLE_LOG_PATH}]
ini:
- {key: log_path, section: defaults}
@@ -855,8 +873,8 @@ DEFAULT_MODULE_COMPRESSION:
env: []
ini:
- {key: module_compression, section: defaults}
-# vars:
-# - name: ansible_module_compression
+ vars:
+ - name: ansible_module_compression
DEFAULT_MODULE_NAME:
name: Default adhoc module
default: command
@@ -948,7 +966,7 @@ DEFAULT_PRIVATE_ROLE_VARS:
- This was introduced as a way to reset role variables to default values if a role is used more than once
in a playbook.
- Starting in version '2.17' M(ansible.builtin.include_roles) and M(ansible.builtin.import_roles) can
- indivudually override this via the C(public) parameter.
+ individually override this via the C(public) parameter.
- Included roles only make their variables public at execution, unlike imported roles which happen at playbook compile time.
env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}]
ini:
@@ -1008,7 +1026,7 @@ DEFAULT_STDOUT_CALLBACK:
EDITOR:
name: editor application to use
default: vi
- descrioption:
+ description:
- for the cases in which Ansible needs to return a file within an editor, this chooses the application to use.
ini:
- section: defaults
@@ -1510,6 +1528,23 @@ GALAXY_REQUIRED_VALID_SIGNATURE_COUNT:
- The number of signatures that must be successful during GPG signature verification while installing or verifying collections.
- This should be a positive integer or all to indicate all signatures must successfully validate the collection.
- Prepend + to the value to fail if no valid signatures are found for the collection.
+GALAXY_COLLECTION_IMPORT_POLL_INTERVAL:
+ description:
+ - The initial interval in seconds for polling the import status of a collection.
+ - This interval increases exponentially based on the :ref:`galaxy_collection_import_poll_factor`, with a maximum delay of 30 seconds.
+ type: float
+ default: 2.0
+ env:
+ - name: ANSIBLE_GALAXY_COLLECTION_IMPORT_POLL_INTERVAL
+ version_added: '2.18'
+GALAXY_COLLECTION_IMPORT_POLL_FACTOR:
+ description:
+ - The multiplier used to increase the :ref:`galaxy_collection_import_poll_interval` when checking the collection import status.
+ type: float
+ default: 1.5
+ env:
+ - name: ANSIBLE_GALAXY_COLLECTION_IMPORT_POLL_FACTOR
+ version_added: "2.18"
HOST_KEY_CHECKING:
# NOTE: constant not in use by ssh/paramiko plugins anymore, but they do support the same configuration sources
# TODO: check non ssh connection plugins for use/migration
@@ -1553,34 +1588,23 @@ INTERPRETER_PYTHON:
_INTERPRETER_PYTHON_DISTRO_MAP:
name: Mapping of known included platform pythons for various Linux distros
default:
- redhat:
- '6': /usr/bin/python
- '8': /usr/libexec/platform-python
- '9': /usr/bin/python3
- debian:
- '8': /usr/bin/python
- '10': /usr/bin/python3
- fedora:
- '23': /usr/bin/python3
- ubuntu:
- '14': /usr/bin/python
- '16': /usr/bin/python3
+ # Entry only for testing
+ ansible test:
+ '99': /usr/bin/python99
version_added: "2.8"
# FUTURE: add inventory override once we're sure it can't be abused by a rogue target
# FUTURE: add a platform layer to the map so we could use for, eg, freebsd/macos/etc?
INTERPRETER_PYTHON_FALLBACK:
name: Ordered list of Python interpreters to check for in discovery
default:
+ - python3.13
- python3.12
- python3.11
- python3.10
- python3.9
- python3.8
- - python3.7
- /usr/bin/python3
- - /usr/libexec/platform-python
- - /usr/bin/python
- - python
+ - python3
vars:
- name: ansible_interpreter_python_fallback
type: list
@@ -1695,7 +1719,7 @@ INVENTORY_EXPORT:
type: bool
INVENTORY_IGNORE_EXTS:
name: Inventory ignore extensions
- default: "{{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}"
+ default: "{{(REJECT_EXTS + ('.orig', '.cfg', '.retry'))}}"
description: List of extensions to ignore when using a directory as an inventory source.
env: [{name: ANSIBLE_INVENTORY_IGNORE}]
ini:
@@ -1744,7 +1768,7 @@ INJECT_FACTS_AS_VARS:
default: True
description:
- Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
- - Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
+ - Unlike inside the `ansible_facts` dictionary where the prefix `ansible_` is removed from fact names, these will have the exact names that are returned by the module.
env: [{name: ANSIBLE_INJECT_FACT_VARS}]
ini:
- {key: inject_facts_as_vars, section: defaults}
@@ -1782,7 +1806,7 @@ OLD_PLUGIN_CACHE_CLEARING:
PAGER:
name: pager application to use
default: less
- descrioption:
+ description:
- for the cases in which Ansible needs to return output in a pageable fashion, this chooses the application to use.
ini:
- section: defaults
@@ -1984,7 +2008,11 @@ TASK_TIMEOUT:
name: Task Timeout
default: 0
description:
- - Set the maximum time (in seconds) that a task can run for.
+ - Set the maximum time (in seconds) for a task action to execute in.
+ - Timeout runs independently from templating or looping.
+ It applies per each attempt of executing the task's action and remains unchanged by the total time spent on a task.
+ - When the action execution exceeds the timeout, Ansible interrupts the process.
+ This is registered as a failure due to outside circumstances, not a task failure, to receive appropriate response and recovery process.
- If set to 0 (the default) there is no timeout.
env: [{name: ANSIBLE_TASK_TIMEOUT}]
ini:
@@ -2115,4 +2143,35 @@ VERBOSE_TO_STDERR:
- section: defaults
key: verbose_to_stderr
type: bool
-...
+_Z_TEST_ENTRY:
+ name: testentry
+ description: for tests
+ env:
+ - name: ANSIBLE_TEST_ENTRY
+ - name: ANSIBLE_TEST_ENTRY_D
+ deprecated:
+ why: for testing
+ version: '3.30'
+ alternatives: nothing
+ ini:
+ - section: testing
+ key: valid
+ - section: testing
+ key: deprecated
+ deprecated:
+ why: for testing
+ version: '3.30'
+ alternatives: nothing
+_Z_TEST_ENTRY_2:
+ version_added: '2.18'
+ name: testentry
+ description: for tests
+ deprecated:
+ why: for testing
+ version: '3.30'
+ alternatives: nothing
+ env:
+ - name: ANSIBLE_TEST_ENTRY2
+ ini:
+ - section: testing
+ key: valid2
diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py
index 52ffb569286..4bbd9cbf9d0 100644
--- a/lib/ansible/config/manager.py
+++ b/lib/ansible/config/manager.py
@@ -4,6 +4,7 @@
from __future__ import annotations
import atexit
+import decimal
import configparser
import os
import os.path
@@ -15,14 +16,14 @@ from collections import namedtuple
from collections.abc import Mapping, Sequence
from jinja2.nativetypes import NativeEnvironment
-from ansible.errors import AnsibleOptionsError, AnsibleError
+from ansible.errors import AnsibleOptionsError, AnsibleError, AnsibleRequiredOptionError
+from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
from ansible.module_utils.common.yaml import yaml_load
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.parsing.quoting import unquote
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
-from ansible.utils import py3compat
from ansible.utils.path import cleanup_tmp_file, makedirs_safe, unfrackpath
@@ -30,6 +31,26 @@ Setting = namedtuple('Setting', 'name value origin type')
INTERNAL_DEFS = {'lookup': ('_terms',)}
+GALAXY_SERVER_DEF = [
+ ('url', True, 'str'),
+ ('username', False, 'str'),
+ ('password', False, 'str'),
+ ('token', False, 'str'),
+ ('auth_url', False, 'str'),
+ ('api_version', False, 'int'),
+ ('validate_certs', False, 'bool'),
+ ('client_id', False, 'str'),
+ ('timeout', False, 'int'),
+]
+
+# config definition fields
+GALAXY_SERVER_ADDITIONAL = {
+ 'api_version': {'default': None, 'choices': [2, 3]},
+ 'validate_certs': {'cli': [{'name': 'validate_certs'}]},
+ 'timeout': {'cli': [{'name': 'timeout'}]},
+ 'token': {'default': None},
+}
+
def _get_entry(plugin_type, plugin_name, config):
''' construct entry for requested config '''
@@ -43,7 +64,7 @@ def _get_entry(plugin_type, plugin_name, config):
# FIXME: see if we can unify in module_utils with similar function used by argspec
-def ensure_type(value, value_type, origin=None):
+def ensure_type(value, value_type, origin=None, origin_ftype=None):
''' return a configuration variable with casting
:arg value: The value to ensure correct typing of
:kwarg value_type: The type of the value. This can be any of the following strings:
@@ -82,10 +103,18 @@ def ensure_type(value, value_type, origin=None):
value = boolean(value, strict=False)
elif value_type in ('integer', 'int'):
- value = int(value)
+ if not isinstance(value, int):
+ try:
+ if (decimal_value := decimal.Decimal(value)) == (int_part := int(decimal_value)):
+ value = int_part
+ else:
+ errmsg = 'int'
+ except decimal.DecimalException as e:
+ raise ValueError from e
elif value_type == 'float':
- value = float(value)
+ if not isinstance(value, float):
+ value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
@@ -142,7 +171,7 @@ def ensure_type(value, value_type, origin=None):
elif value_type in ('str', 'string'):
if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode, bool, int, float, complex)):
value = to_text(value, errors='surrogate_or_strict')
- if origin == 'ini':
+ if origin_ftype and origin_ftype == 'ini':
value = unquote(value)
else:
errmsg = 'string'
@@ -150,11 +179,11 @@ def ensure_type(value, value_type, origin=None):
# defaults to string type
elif isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)):
value = to_text(value, errors='surrogate_or_strict')
- if origin == 'ini':
+ if origin_ftype and origin_ftype == 'ini':
value = unquote(value)
if errmsg:
- raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
+ raise ValueError(f'Invalid type provided for "{errmsg}": {value!r}')
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
@@ -204,15 +233,13 @@ def find_ini_config_file(warnings=None):
# Note: In this case, warnings does nothing
warnings = set()
- # A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later
- # We can't use None because we could set path to None.
- SENTINEL = object
-
potential_paths = []
+ # A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later
+ # We can't use None because we could set path to None.
# Environment setting
- path_from_env = os.getenv("ANSIBLE_CONFIG", SENTINEL)
- if path_from_env is not SENTINEL:
+ path_from_env = os.getenv("ANSIBLE_CONFIG", Sentinel)
+ if path_from_env is not Sentinel:
path_from_env = unfrackpath(path_from_env, follow=False)
if os.path.isdir(to_bytes(path_from_env)):
path_from_env = os.path.join(path_from_env, "ansible.cfg")
@@ -303,6 +330,53 @@ class ConfigManager(object):
# ensure we always have config def entry
self._base_defs['CONFIG_FILE'] = {'default': None, 'type': 'path'}
+ def load_galaxy_server_defs(self, server_list):
+
+ def server_config_def(section, key, required, option_type):
+ config_def = {
+ 'description': 'The %s of the %s Galaxy server' % (key, section),
+ 'ini': [
+ {
+ 'section': 'galaxy_server.%s' % section,
+ 'key': key,
+ }
+ ],
+ 'env': [
+ {'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
+ ],
+ 'required': required,
+ 'type': option_type,
+ }
+ if key in GALAXY_SERVER_ADDITIONAL:
+ config_def.update(GALAXY_SERVER_ADDITIONAL[key])
+ # ensure we always have a default timeout
+ if key == 'timeout' and 'default' not in config_def:
+ config_def['default'] = self.get_config_value('GALAXY_SERVER_TIMEOUT')
+
+ return config_def
+
+ if server_list:
+ for server_key in server_list:
+ if not server_key:
+ # To filter out empty strings or non truthy values as an empty server list env var is equal to [''].
+ continue
+
+ # Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
+ # section [galaxy_server.] for the values url, username, password, and token.
+ defs = dict((k, server_config_def(server_key, k, req, value_type)) for k, req, value_type in GALAXY_SERVER_DEF)
+ self.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
+
+ def template_default(self, value, variables):
+ if isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')) and variables is not None:
+ # template default values if possible
+ # NOTE: cannot use is_template due to circular dep
+ try:
+ t = NativeEnvironment().from_string(value)
+ value = t.render(variables)
+ except Exception:
+ pass # not templatable
+ return value
+
def _read_config_yaml_file(self, yml_file):
# TODO: handle relative paths as relative to the directory containing the current playbook instead of CWD
# Currently this is only used with absolute paths to the `ansible/config` directory
@@ -347,7 +421,7 @@ class ConfigManager(object):
def get_plugin_options(self, plugin_type, name, keys=None, variables=None, direct=None):
options = {}
- defs = self.get_configuration_definitions(plugin_type, name)
+ defs = self.get_configuration_definitions(plugin_type=plugin_type, name=name)
for option in defs:
options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables, direct=direct)
@@ -356,7 +430,7 @@ class ConfigManager(object):
def get_plugin_vars(self, plugin_type, name):
pvars = []
- for pdef in self.get_configuration_definitions(plugin_type, name).values():
+ for pdef in self.get_configuration_definitions(plugin_type=plugin_type, name=name).values():
if 'vars' in pdef and pdef['vars']:
for var_entry in pdef['vars']:
pvars.append(var_entry['name'])
@@ -365,7 +439,7 @@ class ConfigManager(object):
def get_plugin_options_from_var(self, plugin_type, name, variable):
options = []
- for option_name, pdef in self.get_configuration_definitions(plugin_type, name).items():
+ for option_name, pdef in self.get_configuration_definitions(plugin_type=plugin_type, name=name).items():
if 'vars' in pdef and pdef['vars']:
for var_entry in pdef['vars']:
if variable == var_entry['name']:
@@ -407,7 +481,6 @@ class ConfigManager(object):
for cdef in list(ret.keys()):
if cdef.startswith('_'):
del ret[cdef]
-
return ret
def _loop_entries(self, container, entry_list):
@@ -460,8 +533,9 @@ class ConfigManager(object):
# Note: sources that are lists listed in low to high precedence (last one wins)
value = None
origin = None
+ origin_ftype = None
- defs = self.get_configuration_definitions(plugin_type, plugin_name)
+ defs = self.get_configuration_definitions(plugin_type=plugin_type, name=plugin_name)
if config in defs:
aliases = defs[config].get('aliases', [])
@@ -512,58 +586,58 @@ class ConfigManager(object):
# env vars are next precedence
if value is None and defs[config].get('env'):
- value, origin = self._loop_entries(py3compat.environ, defs[config]['env'])
+ value, origin = self._loop_entries(os.environ, defs[config]['env'])
origin = 'env: %s' % origin
# try config file entries next, if we have one
if self._parsers.get(cfile, None) is None:
self._parse_config_file(cfile)
+ # attempt to read from config file
if value is None and cfile is not None:
ftype = get_config_type(cfile)
if ftype and defs[config].get(ftype):
- if ftype == 'ini':
- # load from ini config
- try: # FIXME: generalize _loop_entries to allow for files also, most of this code is dupe
- for ini_entry in defs[config]['ini']:
- temp_value = get_ini_config_value(self._parsers[cfile], ini_entry)
- if temp_value is not None:
- value = temp_value
- origin = cfile
- if 'deprecated' in ini_entry:
- self.DEPRECATED.append(('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated']))
- except Exception as e:
- sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e)))
- elif ftype == 'yaml':
- # FIXME: implement, also , break down key from defs (. notation???)
- origin = cfile
+ try:
+ for entry in defs[config][ftype]:
+ # load from config
+ if ftype == 'ini':
+ temp_value = get_ini_config_value(self._parsers[cfile], entry)
+ elif ftype == 'yaml':
+ raise AnsibleError('YAML configuration type has not been implemented yet')
+ else:
+ raise AnsibleError('Invalid configuration file type: %s' % ftype)
+
+ if temp_value is not None:
+ # set value and origin
+ value = temp_value
+ origin = cfile
+ origin_ftype = ftype
+ if 'deprecated' in entry:
+ if ftype == 'ini':
+ self.DEPRECATED.append(('[%s]%s' % (entry['section'], entry['key']), entry['deprecated']))
+ else:
+ raise AnsibleError('Unimplemented file type: %s' % ftype)
+
+ except Exception as e:
+ sys.stderr.write("Error while loading config %s: %s" % (cfile, to_native(e)))
# set default if we got here w/o a value
if value is None:
if defs[config].get('required', False):
if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}):
- raise AnsibleError("No setting was provided for required configuration %s" %
- to_native(_get_entry(plugin_type, plugin_name, config)))
+ raise AnsibleRequiredOptionError("No setting was provided for required configuration %s" %
+ to_native(_get_entry(plugin_type, plugin_name, config)))
else:
origin = 'default'
- value = defs[config].get('default')
- if isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')) and variables is not None:
- # template default values if possible
- # NOTE: cannot use is_template due to circular dep
- try:
- t = NativeEnvironment().from_string(value)
- value = t.render(variables)
- except Exception:
- pass # not templatable
-
- # ensure correct type, can raise exceptions on mismatched types
+ value = self.template_default(defs[config].get('default'), variables)
try:
- value = ensure_type(value, defs[config].get('type'), origin=origin)
+ # ensure correct type, can raise exceptions on mismatched types
+ value = ensure_type(value, defs[config].get('type'), origin=origin, origin_ftype=origin_ftype)
except ValueError as e:
if origin.startswith('env:') and value == '':
# this is empty env var for non string so we can set to default
origin = 'default'
- value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin)
+ value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin, origin_ftype=origin_ftype)
else:
raise AnsibleOptionsError('Invalid type for configuration option %s (from %s): %s' %
(to_native(_get_entry(plugin_type, plugin_name, config)).strip(), origin, to_native(e)))
@@ -606,3 +680,17 @@ class ConfigManager(object):
self._plugins[plugin_type] = {}
self._plugins[plugin_type][name] = defs
+
+ @staticmethod
+ def get_deprecated_msg_from_config(dep_docs, include_removal=False):
+
+ removal = ''
+ if include_removal:
+ if 'removed_at_date' in dep_docs:
+ removal = f"Will be removed in a release after {dep_docs['removed_at_date']}\n\t"
+ else:
+ removal = f"Will be removed in: Ansible {dep_docs['removed_in']}\n\t"
+
+ # TODO: choose to deprecate either singular or plural
+ alt = dep_docs.get('alternatives', dep_docs.get('alternative', 'none'))
+ return f"Reason: {dep_docs['why']}\n\t{removal}Alternatives: {alt}"
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 3bf952f271a..34f91db54ea 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -15,6 +15,10 @@ from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ansible.release import __version__
from ansible.utils.fqcn import add_internal_fqcns
+# initialize config manager/config data to read/store global settings
+# and generate 'pseudo constants' for app consumption.
+config = ConfigManager()
+
def _warning(msg):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
@@ -36,6 +40,28 @@ def _deprecated(msg, version):
sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
+def handle_config_noise(display=None):
+
+ if display is not None:
+ w = display.warning
+ d = display.deprecated
+ else:
+ w = _warning
+ d = _deprecated
+
+ while config.WARNINGS:
+ warn = config.WARNINGS.pop()
+ w(warn)
+
+ while config.DEPRECATED:
+ # tuple with name and options
+ dep = config.DEPRECATED.pop(0)
+ msg = config.get_deprecated_msg_from_config(dep[1])
+ # use tabs only for ansible-doc?
+ msg = msg.replace("\t", "")
+ d(f"{dep[0]} option. {msg}", version=dep[1]['version'])
+
+
def set_constant(name, value, export=vars()):
''' sets constants and returns resolved options dict '''
export[name] = value
@@ -111,11 +137,51 @@ CONFIGURABLE_PLUGINS = ('become', 'cache', 'callback', 'cliconf', 'connection',
DOCUMENTABLE_PLUGINS = CONFIGURABLE_PLUGINS + ('module', 'strategy', 'test', 'filter')
IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "MANIFEST", "Makefile") # ignore during module search
INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
+INTERNAL_STATIC_VARS = frozenset(
+ [
+ "ansible_async_path",
+ "ansible_collection_name",
+ "ansible_config_file",
+ "ansible_dependent_role_names",
+ "ansible_diff_mode",
+ "ansible_config_file",
+ "ansible_facts",
+ "ansible_forks",
+ "ansible_inventory_sources",
+ "ansible_limit",
+ "ansible_play_batch",
+ "ansible_play_hosts",
+ "ansible_play_hosts_all",
+ "ansible_play_role_names",
+ "ansible_playbook_python",
+ "ansible_role_name",
+ "ansible_role_names",
+ "ansible_run_tags",
+ "ansible_skip_tags",
+ "ansible_verbosity",
+ "ansible_version",
+ "inventory_dir",
+ "inventory_file",
+ "inventory_hostname",
+ "inventory_hostname_short",
+ "groups",
+ "group_names",
+ "omit",
+ "hostvars",
+ "playbook_dir",
+ "play_hosts",
+ "role_name",
+ "role_names",
+ "role_path",
+ "role_uuid",
+ "role_names",
+ ]
+)
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
-MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
- 'ansible.windows.win_shell', 'raw', 'script')))
-MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
- 'ansible.windows.win_shell', 'raw')))
+WIN_MOVED = ['ansible.windows.win_command', 'ansible.windows.win_shell']
+MODULE_REQUIRE_ARGS_SIMPLE = ['command', 'raw', 'script', 'shell', 'win_command', 'win_shell']
+MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(MODULE_REQUIRE_ARGS_SIMPLE) + WIN_MOVED)
+MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'shell', 'win_shell', 'raw')) + WIN_MOVED)
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python', 'ansible_facts')
SYNTHETIC_COLLECTIONS = ('ansible.builtin', 'ansible.legacy')
TREE_DIR = None
@@ -178,11 +244,8 @@ MAGIC_VARIABLE_MAPPING = dict(
)
# POPULATE SETTINGS FROM CONFIG ###
-config = ConfigManager()
-
-# Generate constants from config
for setting in config.get_configuration_definitions():
set_constant(setting, config.get_config_value(setting, variables=vars()))
-for warn in config.WARNINGS:
- _warning(warn)
+# emit any warnings or deprecations
+handle_config_noise()
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
index 8e33bef120b..78853757f8a 100644
--- a/lib/ansible/errors/__init__.py
+++ b/lib/ansible/errors/__init__.py
@@ -66,14 +66,18 @@ class AnsibleError(Exception):
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
message = [self._message]
+
+ # Add from previous exceptions
+ if self.orig_exc:
+ message.append('. %s' % to_native(self.orig_exc))
+
+ # Add from yaml to give specific file/line no
if isinstance(self.obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error and not self._suppress_extended_error:
message.append(
'\n\n%s' % to_native(extended_error)
)
- elif self.orig_exc:
- message.append('. %s' % to_native(self.orig_exc))
return ''.join(message)
@@ -227,6 +231,11 @@ class AnsibleOptionsError(AnsibleError):
pass
+class AnsibleRequiredOptionError(AnsibleOptionsError):
+ ''' bad or incomplete options passed '''
+ pass
+
+
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
diff --git a/lib/ansible/executor/interpreter_discovery.py b/lib/ansible/executor/interpreter_discovery.py
index deba1a5515f..24b2174d3c8 100644
--- a/lib/ansible/executor/interpreter_discovery.py
+++ b/lib/ansible/executor/interpreter_discovery.py
@@ -41,7 +41,7 @@ class InterpreterDiscoveryRequiredError(Exception):
def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
# interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to
# get the system type from uname, and find any random Python that can get us the info we need. For supported
- # target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available)
+ # target OS types, we'll dispatch a Python script that calls platform.dist() (for older platforms, where available)
# and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known
# distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the
# default fallback of /usr/bin/python is used (if we know it's there), or discovery fails.
@@ -53,7 +53,7 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
host = task_vars.get('inventory_hostname', 'unknown')
res = None
platform_type = 'unknown'
- found_interpreters = [u'/usr/bin/python'] # fallback value
+ found_interpreters = [u'/usr/bin/python3'] # fallback value
is_auto_legacy = discovery_mode.startswith('auto_legacy')
is_silent = discovery_mode.endswith('_silent')
@@ -89,7 +89,7 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
action._discovery_warnings.append(u'No python interpreters found for '
u'host {0} (tried {1})'.format(host, bootstrap_python_list))
# this is lame, but returning None or throwing an exception is uglier
- return u'/usr/bin/python'
+ return u'/usr/bin/python3'
if platform_type != 'linux':
raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type)))
@@ -106,7 +106,6 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
platform_info = json.loads(res.get('stdout'))
distro, version = _get_linux_distro(platform_info)
-
if not distro or not version:
raise NotImplementedError('unable to get Linux distribution/version info')
@@ -120,15 +119,15 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
# provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been)
if is_auto_legacy:
- if platform_interpreter != u'/usr/bin/python' and u'/usr/bin/python' in found_interpreters:
+ if platform_interpreter != u'/usr/bin/python3' and u'/usr/bin/python3' in found_interpreters:
if not is_silent:
action._discovery_warnings.append(
u"Distribution {0} {1} on host {2} should use {3}, but is using "
- u"/usr/bin/python for backward compatibility with prior Ansible releases. "
+ u"/usr/bin/python3 for backward compatibility with prior Ansible releases. "
u"See {4} for more information"
.format(distro, version, host, platform_interpreter,
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
- return u'/usr/bin/python'
+ return u'/usr/bin/python3'
if platform_interpreter not in found_interpreters:
if platform_interpreter not in bootstrap_python_list:
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
index 58eebfaea57..1ef7451095e 100644
--- a/lib/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -427,27 +427,51 @@ class PlayIterator:
# might be there from previous flush
state.handlers = self.handlers[:]
state.update_handlers = False
- state.cur_handlers_task = 0
while True:
try:
task = state.handlers[state.cur_handlers_task]
except IndexError:
task = None
+ state.cur_handlers_task = 0
state.run_state = state.pre_flushing_run_state
state.update_handlers = True
break
else:
state.cur_handlers_task += 1
if task.is_host_notified(host):
- break
+ return state, task
elif state.run_state == IteratingStates.COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
- break
+ # skip implicit flush_handlers if there are no handlers notified
+ if (
+ task.implicit
+ and task.action in C._ACTION_META
+ and task.args.get('_raw_params', None) == 'flush_handlers'
+ and (
+ # the state store in the `state` variable could be a nested state,
+ # notifications are always stored in the top level state, get it here
+ not self.get_state_for_host(host.name).handler_notifications
+ # in case handlers notifying other handlers, the notifications are not
+ # saved in `handler_notifications` and handlers are notified directly
+ # to prevent duplicate handler runs, so check whether any handler
+ # is notified
+ and all(not h.notified_hosts for h in self.handlers)
+ )
+ ):
+ display.debug("No handler notifications for %s, skipping." % host.name)
+ elif (
+ (role := task._role)
+ and role._metadata.allow_duplicates is False
+ and host.name in self._play._get_cached_role(role)._completed
+ ):
+ display.debug("'%s' skipped because role has already run" % task)
+ else:
+ break
return (state, task)
@@ -635,3 +659,19 @@ class PlayIterator:
def clear_notification(self, hostname: str, notification: str) -> None:
self._host_states[hostname].handler_notifications.remove(notification)
+
+ def end_host(self, hostname: str) -> None:
+ """Used by ``end_host``, ``end_batch`` and ``end_play`` meta tasks to end executing given host."""
+ state = self.get_active_state(self.get_state_for_host(hostname))
+ if state.run_state == IteratingStates.RESCUE:
+ # This is a special case for when ending a host occurs in rescue.
+ # By definition the meta task responsible for ending the host
+ # is the last task, so we need to clear the fail state to mark
+ # the host as rescued.
+ # The reason we need to do that is because this operation is
+ # normally done when PlayIterator transitions from rescue to
+ # always when only then we can say that rescue didn't fail
+ # but with ending a host via meta task, we don't get to that transition.
+ self.set_fail_state_for_host(hostname, FailedStates.NONE)
+ self.set_run_state_for_host(hostname, IteratingStates.COMPLETE)
+ self._play._removed_hosts.append(hostname)
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
index 7c3ac4113bf..f439967838b 100644
--- a/lib/ansible/executor/playbook_executor.py
+++ b/lib/ansible/executor/playbook_executor.py
@@ -195,10 +195,7 @@ class PlaybookExecutor:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
- # check the number of failures here, to see if they're above the maximum
- # failure percentage allowed, or if any errors are fatal. If either of those
- # conditions are met, we break out, otherwise we only break out if the entire
- # batch failed
+ # check the number of failures here and break out if the entire batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
diff --git a/lib/ansible/executor/powershell/become_wrapper.ps1 b/lib/ansible/executor/powershell/become_wrapper.ps1
index f40e2658f5f..cea42c128aa 100644
--- a/lib/ansible/executor/powershell/become_wrapper.ps1
+++ b/lib/ansible/executor/powershell/become_wrapper.ps1
@@ -116,12 +116,11 @@ Write-AnsibleLog "INFO - parsed become input, user: '$username', type: '$logon_t
# set to Stop and cannot be changed. Also need to split the payload from the wrapper to prevent potentially
# sensitive content from being logged by the scriptblock logger.
$bootstrap_wrapper = {
- &chcp.com 65001 > $null
- $exec_wrapper_str = [System.Console]::In.ReadToEnd()
- $split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
+ [Console]::InputEncoding = [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding
+ $ew = [System.Console]::In.ReadToEnd()
+ $split_parts = $ew.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
Set-Variable -Name json_raw -Value $split_parts[1]
- $exec_wrapper = [ScriptBlock]::Create($split_parts[0])
- &$exec_wrapper
+ &([ScriptBlock]::Create($split_parts[0]))
}
$exec_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper.ToString()))
$lp_command_line = "powershell.exe -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $exec_command"
diff --git a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
index cdba80cbb01..8e7141eb515 100644
--- a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
+++ b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
@@ -1,4 +1,4 @@
-&chcp.com 65001 > $null
+try { [Console]::InputEncoding = [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding } catch { $null = $_ }
if ($PSVersionTable.PSVersion -lt [Version]"3.0") {
'{"failed":true,"msg":"Ansible requires PowerShell v3.0 or newer"}'
@@ -9,5 +9,4 @@ $exec_wrapper_str = $input | Out-String
$split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
If (-not $split_parts.Length -eq 2) { throw "invalid payload" }
Set-Variable -Name json_raw -Value $split_parts[1]
-$exec_wrapper = [ScriptBlock]::Create($split_parts[0])
-&$exec_wrapper
+& ([ScriptBlock]::Create($split_parts[0]))
diff --git a/lib/ansible/executor/powershell/exec_wrapper.ps1 b/lib/ansible/executor/powershell/exec_wrapper.ps1
index 0f97bdfb8a5..4ecc1367c84 100644
--- a/lib/ansible/executor/powershell/exec_wrapper.ps1
+++ b/lib/ansible/executor/powershell/exec_wrapper.ps1
@@ -16,7 +16,7 @@ begin {
.SYNOPSIS
Converts a JSON string to a Hashtable/Array in the fastest way
possible. Unfortunately ConvertFrom-Json is still faster but outputs
- a PSCustomObject which is combersone for module consumption.
+ a PSCustomObject which is cumbersome for module consumption.
.PARAMETER InputObject
[String] The JSON string to deserialize.
@@ -178,6 +178,7 @@ $($ErrorRecord.InvocationInfo.PositionMessage)
Write-AnsibleLog "INFO - converting json raw to a payload" "exec_wrapper"
$payload = ConvertFrom-AnsibleJson -InputObject $json_raw
+ $payload.module_args._ansible_exec_wrapper_warnings = [System.Collections.Generic.List[string]]@()
# TODO: handle binary modules
# TODO: handle persistence
diff --git a/lib/ansible/executor/powershell/module_manifest.py b/lib/ansible/executor/powershell/module_manifest.py
index 99b18e5ff4b..93c5c8c643e 100644
--- a/lib/ansible/executor/powershell/module_manifest.py
+++ b/lib/ansible/executor/powershell/module_manifest.py
@@ -8,7 +8,7 @@ import errno
import json
import os
import pkgutil
-import random
+import secrets
import re
from importlib import import_module
@@ -318,7 +318,7 @@ def _create_powershell_wrapper(b_module_data, module_path, module_args,
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["actions"].insert(0, 'async_wrapper')
- exec_manifest["async_jid"] = f'j{random.randint(0, 999999999999)}'
+ exec_manifest["async_jid"] = f'j{secrets.randbelow(999999999999)}'
exec_manifest["async_timeout_sec"] = async_timeout
exec_manifest["async_startup_timeout"] = C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars)
diff --git a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
index c35c84cfc86..f79dd6fbc86 100644
--- a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
+++ b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
@@ -29,7 +29,18 @@ if ($csharp_utils.Count -gt 0) {
# add any C# references so the module does not have to do so
$new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"])
- Add-CSharpType -References $csharp_utils -TempPath $new_tmp -IncludeDebugInfo
+
+ # We use a fake module object to capture warnings
+ $fake_module = [PSCustomObject]@{
+ Tmpdir = $new_tmp
+ Verbosity = 3
+ }
+ $warning_func = New-Object -TypeName System.Management.Automation.PSScriptMethod -ArgumentList Warn, {
+ param($message)
+ $Payload.module_args._ansible_exec_wrapper_warnings.Add($message)
+ }
+ $fake_module.PSObject.Members.Add($warning_func)
+ Add-CSharpType -References $csharp_utils -AnsibleModule $fake_module
}
if ($Payload.ContainsKey("coverage") -and $null -ne $host.Runspace -and $null -ne $host.Runspace.Debugger) {
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
index 5b3e5326b6e..932a33cfec0 100644
--- a/lib/ansible/executor/task_executor.py
+++ b/lib/ansible/executor/task_executor.py
@@ -4,23 +4,24 @@
from __future__ import annotations
import os
-import pty
import time
import json
+import pathlib
import signal
import subprocess
import sys
-import termios
import traceback
from ansible import constants as C
+from ansible.cli import scripts
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure, AnsibleActionFail, AnsibleActionSkip
from ansible.executor.task_result import TaskResult
from ansible.executor.module_common import get_action_args_with_defaults
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import binary_type
from ansible.module_utils.common.text.converters import to_text, to_native
-from ansible.module_utils.connection import write_to_file_descriptor
+from ansible.module_utils.connection import write_to_stream
+from ansible.module_utils.six import string_types
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.plugins import get_plugin_class
@@ -31,7 +32,7 @@ from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unsafe_proxy import to_unsafe_text, wrap_var
from ansible.vars.clean import namespace_facts, clean_facts
from ansible.utils.display import Display
-from ansible.utils.vars import combine_vars, isidentifier
+from ansible.utils.vars import combine_vars
display = Display()
@@ -42,11 +43,21 @@ __all__ = ['TaskExecutor']
class TaskTimeoutError(BaseException):
- pass
+ def __init__(self, message="", frame=None):
+
+ if frame is not None:
+ orig = frame
+ root = pathlib.Path(__file__).parent
+ while not pathlib.Path(frame.f_code.co_filename).is_relative_to(root):
+ frame = frame.f_back
+
+ self.frame = 'Interrupted at %s called from %s' % (orig, frame)
+
+ super(TaskTimeoutError, self).__init__(message)
def task_timeout(signum, frame):
- raise TaskTimeoutError
+ raise TaskTimeoutError(frame=frame)
def remove_omit(task_args, omit_token):
@@ -332,6 +343,13 @@ class TaskExecutor:
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
+
+ if self._task.register:
+ # Ensure per loop iteration results are registered in case `_execute()`
+ # returns early (when conditional, failure, ...).
+ # This is needed in case the registered variable is used in the loop label template.
+ task_vars[self._task.register] = res
+
task_fields = self._task.dump_attrs()
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
@@ -362,12 +380,17 @@ class TaskExecutor:
'msg': 'Failed to template loop_control.label: %s' % to_text(e)
})
+ # if plugin is loaded, get resolved name, otherwise leave original task connection
+ if self._connection and not isinstance(self._connection, string_types):
+ task_fields['connection'] = getattr(self._connection, 'ansible_name')
+
tr = TaskResult(
self._host.name,
self._task._uuid,
res,
task_fields=task_fields,
)
+
if tr.is_failed() or tr.is_unreachable():
self._final_q.send_callback('v2_runner_item_on_failed', tr)
elif tr.is_skipped():
@@ -379,6 +402,19 @@ class TaskExecutor:
self._final_q.send_callback('v2_runner_item_on_ok', tr)
results.append(res)
+
+ # break loop if break_when conditions are met
+ if self._task.loop_control and self._task.loop_control.break_when:
+ cond = Conditional(loader=self._loader)
+ cond.when = self._task.loop_control.get_validated_value(
+ 'break_when', self._task.loop_control.fattributes.get('break_when'), self._task.loop_control.break_when, templar
+ )
+ if cond.evaluate_conditional(templar, task_vars):
+ # delete loop vars before exiting loop
+ del task_vars[loop_var]
+ break
+
+ # done with loop var, remove for next iteration
del task_vars[loop_var]
# clear 'connection related' plugin variables for next iteration
@@ -640,7 +676,7 @@ class TaskExecutor:
return dict(unreachable=True, msg=to_text(e))
except TaskTimeoutError as e:
msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout)
- return dict(failed=True, msg=msg)
+ return dict(failed=True, msg=msg, timedout={'frame': e.frame, 'period': self._task.timeout})
finally:
if self._task.timeout:
signal.alarm(0)
@@ -657,9 +693,6 @@ class TaskExecutor:
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
- if not isidentifier(self._task.register):
- raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % self._task.register)
-
vars_copy[self._task.register] = result
if self._task.async_val > 0:
@@ -840,7 +873,12 @@ class TaskExecutor:
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
- async_task = Task.load(dict(action='async_status', args={'jid': async_jid}, environment=self._task.environment))
+ async_task = Task.load(dict(
+ action='async_status',
+ args={'jid': async_jid},
+ check_mode=self._task.check_mode,
+ environment=self._task.environment,
+ ))
# FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized
# Because this is an async task, the action handler is async. However,
@@ -912,6 +950,7 @@ class TaskExecutor:
'jid': async_jid,
'mode': 'cleanup',
},
+ 'check_mode': self._task.check_mode,
'environment': self._task.environment,
}
)
@@ -1042,7 +1081,7 @@ class TaskExecutor:
# add extras if plugin supports them
if getattr(self._connection, 'allow_extras', False):
for k in variables:
- if k.startswith('ansible_%s_' % self._connection._load_name) and k not in options:
+ if k.startswith('ansible_%s_' % self._connection.extras_prefix) and k not in options:
options['_extras'][k] = templar.template(variables[k])
task_keys = self._task.dump_attrs()
@@ -1085,7 +1124,7 @@ class TaskExecutor:
# deals with networking sub_plugins (network_cli/httpapi/netconf)
sub = getattr(self._connection, '_sub_plugin', None)
- if sub is not None and sub.get('type') != 'external':
+ if sub and sub.get('type') != 'external':
plugin_type = get_plugin_class(sub.get("obj"))
varnames.extend(self._set_plugin_options(plugin_type, variables, templar, task_keys))
sub_conn = getattr(self._connection, 'ssh_type_conn', None)
@@ -1173,26 +1212,19 @@ class TaskExecutor:
return handler, module
+CLI_STUB_NAME = 'ansible_connection_cli_stub.py'
+
+
def start_connection(play_context, options, task_uuid):
'''
Starts the persistent connection
'''
- candidate_paths = [C.ANSIBLE_CONNECTION_PATH or os.path.dirname(sys.argv[0])]
- candidate_paths.extend(os.environ.get('PATH', '').split(os.pathsep))
- for dirname in candidate_paths:
- ansible_connection = os.path.join(dirname, 'ansible-connection')
- if os.path.isfile(ansible_connection):
- display.vvvv("Found ansible-connection at path {0}".format(ansible_connection))
- break
- else:
- raise AnsibleError("Unable to find location of 'ansible-connection'. "
- "Please set or check the value of ANSIBLE_CONNECTION_PATH")
env = os.environ.copy()
env.update({
# HACK; most of these paths may change during the controller's lifetime
# (eg, due to late dynamic role includes, multi-playbook execution), without a way
- # to invalidate/update, ansible-connection won't always see the same plugins the controller
+ # to invalidate/update, the persistent connection helper won't always see the same plugins the controller
# can.
'ANSIBLE_BECOME_PLUGINS': become_loader.print_paths(),
'ANSIBLE_CLICONF_PLUGINS': cliconf_loader.print_paths(),
@@ -1205,30 +1237,19 @@ def start_connection(play_context, options, task_uuid):
verbosity = []
if display.verbosity:
verbosity.append('-%s' % ('v' * display.verbosity))
- python = sys.executable
- master, slave = pty.openpty()
+
+ if not (cli_stub_path := C.config.get_config_value('_ANSIBLE_CONNECTION_PATH')):
+ cli_stub_path = str(pathlib.Path(scripts.__file__).parent / CLI_STUB_NAME)
+
p = subprocess.Popen(
- [python, ansible_connection, *verbosity, to_text(os.getppid()), to_text(task_uuid)],
- stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
+ [sys.executable, cli_stub_path, *verbosity, to_text(os.getppid()), to_text(task_uuid)],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,
)
- os.close(slave)
-
- # We need to set the pty into noncanonical mode. This ensures that we
- # can receive lines longer than 4095 characters (plus newline) without
- # truncating.
- old = termios.tcgetattr(master)
- new = termios.tcgetattr(master)
- new[3] = new[3] & ~termios.ICANON
-
- try:
- termios.tcsetattr(master, termios.TCSANOW, new)
- write_to_file_descriptor(master, options)
- write_to_file_descriptor(master, play_context.serialize())
-
- (stdout, stderr) = p.communicate()
- finally:
- termios.tcsetattr(master, termios.TCSANOW, old)
- os.close(master)
+
+ write_to_stream(p.stdin, options)
+ write_to_stream(p.stdin, play_context.serialize())
+
+ (stdout, stderr) = p.communicate()
if p.returncode == 0:
result = json.loads(to_text(stdout, errors='surrogate_then_replace'))
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
index f6e8c8bf7e0..3b9e251da81 100644
--- a/lib/ansible/executor/task_queue_manager.py
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -223,7 +223,7 @@ class TaskQueueManager:
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False))
- # try to get colleciotn world name first
+ # try to get collection world name first
cnames = getattr(callback_plugin, '_redirected_names', [])
if cnames:
# store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later
diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
index 2690f3a52bb..821189367d1 100644
--- a/lib/ansible/executor/task_result.py
+++ b/lib/ansible/executor/task_result.py
@@ -139,7 +139,7 @@ class TaskResult:
elif self._result:
result._result = module_response_deepcopy(self._result)
- # actualy remove
+ # actually remove
for remove_key in ignore:
if remove_key in result._result:
del result._result[remove_key]
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
index ad340415867..6765b087b35 100644
--- a/lib/ansible/galaxy/api.py
+++ b/lib/ansible/galaxy/api.py
@@ -62,8 +62,7 @@ def should_retry_error(exception):
if isinstance(orig_exc, URLError):
orig_exc = orig_exc.reason
- # Handle common URL related errors such as TimeoutError, and BadStatusLine
- # Note: socket.timeout is only required for Py3.9
+ # Handle common URL related errors
if isinstance(orig_exc, (TimeoutError, BadStatusLine, IncompleteRead)):
return True
@@ -133,6 +132,15 @@ def g_connect(versions):
% (method.__name__, ", ".join(versions), ", ".join(available_versions),
self.name, self.api_server))
+ # Warn only when we know we are talking to a collections API
+ if common_versions == {'v2'}:
+ display.deprecated(
+ 'The v2 Ansible Galaxy API is deprecated and no longer supported. '
+ 'Ensure that you have configured the ansible-galaxy CLI to utilize an '
+ 'updated and supported version of Ansible Galaxy.',
+ version='2.20'
+ )
+
return method(self, *args, **kwargs)
return wrapped
return decorator
@@ -711,7 +719,7 @@ class GalaxyAPI:
display.display("Waiting until Galaxy import task %s has completed" % full_url)
start = time.time()
- wait = 2
+ wait = C.GALAXY_COLLECTION_IMPORT_POLL_INTERVAL
while timeout == 0 or (time.time() - start) < timeout:
try:
@@ -735,7 +743,7 @@ class GalaxyAPI:
time.sleep(wait)
# poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds.
- wait = min(30, wait * 1.5)
+ wait = min(30, wait * C.GALAXY_COLLECTION_IMPORT_POLL_FACTOR)
if state == 'waiting':
raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'"
% to_native(full_url))
diff --git a/lib/ansible/galaxy/collection/__init__.py b/lib/ansible/galaxy/collection/__init__.py
index 24707b1e731..829f7aa19d2 100644
--- a/lib/ansible/galaxy/collection/__init__.py
+++ b/lib/ansible/galaxy/collection/__init__.py
@@ -8,6 +8,7 @@ from __future__ import annotations
import errno
import fnmatch
import functools
+import glob
import inspect
import json
import os
@@ -124,13 +125,14 @@ from ansible.galaxy.dependency_resolution.dataclasses import (
)
from ansible.galaxy.dependency_resolution.versioning import meets_requirements
from ansible.plugins.loader import get_all_plugin_loaders
+from ansible.module_utils.common.file import S_IRWU_RG_RO, S_IRWXU_RXG_RXO, S_IXANY
+from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common.yaml import yaml_dump
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.hashing import secure_hash, secure_hash_s
-from ansible.utils.sentinel import Sentinel
display = Display()
@@ -333,11 +335,18 @@ def verify_local_collection(local_collection, remote_collection, artifacts_manag
os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict'))
)
+ b_ignore_patterns = [
+ b'*.pyc',
+ ]
+
# Find any paths not in the FILES.json
for root, dirs, files in os.walk(b_collection_path):
for name in files:
full_path = os.path.join(root, name)
path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict')
+ if any(fnmatch.fnmatch(full_path, b_pattern) for b_pattern in b_ignore_patterns):
+ display.v("Ignoring verification for %s" % full_path)
+ continue
if full_path not in collection_files:
modified_content.append(
@@ -1310,7 +1319,7 @@ def _build_collection_tar(
tar_info = tarfile.TarInfo(name)
tar_info.size = len(b)
tar_info.mtime = int(time.time())
- tar_info.mode = 0o0644
+ tar_info.mode = S_IRWU_RG_RO
tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
for file_info in file_manifest['files']: # type: ignore[union-attr]
@@ -1324,7 +1333,7 @@ def _build_collection_tar(
def reset_stat(tarinfo):
if tarinfo.type != tarfile.SYMTYPE:
existing_is_exec = tarinfo.mode & stat.S_IXUSR
- tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644
+ tarinfo.mode = S_IRWXU_RXG_RXO if existing_is_exec or tarinfo.isdir() else S_IRWU_RG_RO
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = ''
@@ -1366,7 +1375,7 @@ def _build_collection_dir(b_collection_path, b_collection_output, collection_man
This should follow the same pattern as _build_collection_tar.
"""
- os.makedirs(b_collection_output, mode=0o0755)
+ os.makedirs(b_collection_output, mode=S_IRWXU_RXG_RXO)
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
@@ -1378,7 +1387,7 @@ def _build_collection_dir(b_collection_path, b_collection_output, collection_man
with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io:
shutil.copyfileobj(b_io, file_obj)
- os.chmod(b_path, 0o0644)
+ os.chmod(b_path, S_IRWU_RG_RO)
base_directories = []
for file_info in sorted(file_manifest['files'], key=lambda x: x['name']):
@@ -1389,11 +1398,11 @@ def _build_collection_dir(b_collection_path, b_collection_output, collection_man
dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict'))
existing_is_exec = os.stat(src_file, follow_symlinks=False).st_mode & stat.S_IXUSR
- mode = 0o0755 if existing_is_exec else 0o0644
+ mode = S_IRWXU_RXG_RXO if existing_is_exec else S_IRWU_RG_RO
# ensure symlinks to dirs are not translated to empty dirs
if os.path.isdir(src_file) and not os.path.islink(src_file):
- mode = 0o0755
+ mode = S_IRWXU_RXG_RXO
base_directories.append(src_file)
os.mkdir(dest_file, mode)
else:
@@ -1517,6 +1526,7 @@ def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?
artifacts_manager.required_successful_signature_count,
artifacts_manager.ignore_signature_errors,
)
+ remove_source_metadata(collection, b_collection_path)
if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):
write_source_metadata(
collection,
@@ -1542,10 +1552,10 @@ def write_source_metadata(collection, b_collection_path, artifacts_manager):
shutil.rmtree(b_info_dir)
try:
- os.mkdir(b_info_dir, mode=0o0755)
+ os.mkdir(b_info_dir, mode=S_IRWXU_RXG_RXO)
with open(b_info_dest, mode='w+b') as fd:
fd.write(b_yaml_source_data)
- os.chmod(b_info_dest, 0o0644)
+ os.chmod(b_info_dest, S_IRWU_RG_RO)
except Exception:
# Ensure we don't leave the dir behind in case of a failure.
if os.path.isdir(b_info_dir):
@@ -1553,6 +1563,22 @@ def write_source_metadata(collection, b_collection_path, artifacts_manager):
raise
+def remove_source_metadata(collection, b_collection_path):
+ pattern = f"{collection.namespace}.{collection.name}-*.info"
+ info_path = os.path.join(
+ b_collection_path,
+ b'../../',
+ to_bytes(pattern, errors='surrogate_or_strict')
+ )
+ if (outdated_info := glob.glob(info_path)):
+ display.vvvv(f"Removing {pattern} metadata from previous installations")
+ for info_dir in outdated_info:
+ try:
+ shutil.rmtree(info_dir)
+ except Exception:
+ pass
+
+
def verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors):
# type: (str, list[str], str, str, list[str]) -> None
failed_verify = False
@@ -1576,13 +1602,6 @@ def install_artifact(b_coll_targz_path, b_collection_path, b_temp_path, signatur
"""
try:
with tarfile.open(b_coll_targz_path, mode='r') as collection_tar:
- # Remove this once py3.11 is our controller minimum
- # Workaround for https://bugs.python.org/issue47231
- # See _extract_tar_dir
- collection_tar._ansible_normalized_cache = {
- m.name.removesuffix(os.path.sep): m for m in collection_tar.getmembers()
- } # deprecated: description='TarFile member index' core_version='2.18' python_version='3.11'
-
# Verify the signature on the MANIFEST.json before extracting anything else
_extract_tar_file(collection_tar, MANIFEST_FILENAME, b_collection_path, b_temp_path)
@@ -1663,10 +1682,10 @@ def install_src(collection, b_collection_path, b_collection_output_path, artifac
def _extract_tar_dir(tar, dirname, b_dest):
""" Extracts a directory from a collection tar. """
- dirname = to_native(dirname, errors='surrogate_or_strict').removesuffix(os.path.sep)
+ dirname = to_native(dirname, errors='surrogate_or_strict')
try:
- tar_member = tar._ansible_normalized_cache[dirname]
+ tar_member = tar.getmember(dirname)
except KeyError:
raise AnsibleError("Unable to extract '%s' from collection" % dirname)
@@ -1674,7 +1693,7 @@ def _extract_tar_dir(tar, dirname, b_dest):
b_parent_path = os.path.dirname(b_dir_path)
try:
- os.makedirs(b_parent_path, mode=0o0755)
+ os.makedirs(b_parent_path, mode=S_IRWXU_RXG_RXO)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@@ -1689,7 +1708,7 @@ def _extract_tar_dir(tar, dirname, b_dest):
else:
if not os.path.isdir(b_dir_path):
- os.mkdir(b_dir_path, 0o0755)
+ os.mkdir(b_dir_path, S_IRWXU_RXG_RXO)
def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
@@ -1715,7 +1734,7 @@ def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
if not os.path.exists(b_parent_dir):
# Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
# makes sure we create the parent directory even if it wasn't set in the metadata.
- os.makedirs(b_parent_dir, mode=0o0755)
+ os.makedirs(b_parent_dir, mode=S_IRWXU_RXG_RXO)
if tar_member.type == tarfile.SYMTYPE:
b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
@@ -1730,9 +1749,9 @@ def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
# Default to rw-r--r-- and only add execute if the tar file has execute.
tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
- new_mode = 0o644
+ new_mode = S_IRWU_RG_RO
if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
- new_mode |= 0o0111
+ new_mode |= S_IXANY
os.chmod(b_dest_filepath, new_mode)
@@ -1888,7 +1907,7 @@ def _resolve_depenency_map(
for req in dep_exc.criterion.iter_requirement():
error_msg_lines.append(
- '* {req.fqcn!s}:{req.ver!s}'.format(req=req)
+ f'* {req.fqcn!s}:{req.ver!s}'
)
error_msg_lines.append(pre_release_hint)
diff --git a/lib/ansible/galaxy/collection/concrete_artifact_manager.py b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
index 27ce287af3c..fb807766f5c 100644
--- a/lib/ansible/galaxy/collection/concrete_artifact_manager.py
+++ b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
@@ -10,6 +10,7 @@ import os
import tarfile
import subprocess
import typing as t
+import yaml
from contextlib import contextmanager
from hashlib import sha256
@@ -24,6 +25,7 @@ if t.TYPE_CHECKING:
)
from ansible.galaxy.token import GalaxyToken
+from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy import get_collections_galaxy_meta_info
from ansible.galaxy.api import should_retry_error
@@ -33,12 +35,12 @@ from ansible.module_utils.common.text.converters import to_bytes, to_native, to_
from ansible.module_utils.api import retry_with_delays_and_condition
from ansible.module_utils.api import generate_jittered_backoff
from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.sentinel import Sentinel
from ansible.module_utils.common.yaml import yaml_load
from ansible.module_utils.urls import open_url
from ansible.utils.display import Display
-from ansible.utils.sentinel import Sentinel
-import yaml
+import ansible.constants as C
display = Display()
@@ -61,7 +63,7 @@ class ConcreteArtifactsManager:
"""
def __init__(self, b_working_directory, validate_certs=True, keyring=None, timeout=60, required_signature_count=None, ignore_signature_errors=None):
# type: (bytes, bool, str, int, str, list[str]) -> None
- """Initialize ConcreteArtifactsManager caches and costraints."""
+ """Initialize ConcreteArtifactsManager caches and constraints."""
self._validate_certs = validate_certs # type: bool
self._artifact_cache = {} # type: dict[bytes, bytes]
self._galaxy_artifact_cache = {} # type: dict[Candidate | Requirement, bytes]
@@ -413,7 +415,7 @@ def _extract_collection_from_git(repo_url, coll_ver, b_path):
b_checkout_path = mkdtemp(
dir=b_path,
prefix=to_bytes(name, errors='surrogate_or_strict'),
- ) # type: bytes
+ )
try:
git_executable = get_bin_path('git')
@@ -425,11 +427,14 @@ def _extract_collection_from_git(repo_url, coll_ver, b_path):
# Perform a shallow clone if simply cloning HEAD
if version == 'HEAD':
- git_clone_cmd = git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path)
+ git_clone_cmd = [git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path)]
else:
- git_clone_cmd = git_executable, 'clone', git_url, to_text(b_checkout_path)
+ git_clone_cmd = [git_executable, 'clone', git_url, to_text(b_checkout_path)]
# FIXME: '--branch', version
+ if context.CLIARGS['ignore_certs'] or C.GALAXY_IGNORE_CERTS:
+ git_clone_cmd.extend(['-c', 'http.sslVerify=false'])
+
try:
subprocess.check_call(git_clone_cmd)
except subprocess.CalledProcessError as proc_err:
diff --git a/lib/ansible/galaxy/collection/galaxy_api_proxy.py b/lib/ansible/galaxy/collection/galaxy_api_proxy.py
index 0c1b7df0bec..046354a395d 100644
--- a/lib/ansible/galaxy/collection/galaxy_api_proxy.py
+++ b/lib/ansible/galaxy/collection/galaxy_api_proxy.py
@@ -27,8 +27,7 @@ display = Display()
class MultiGalaxyAPIProxy:
"""A proxy that abstracts talking to multiple Galaxy instances."""
- def __init__(self, apis, concrete_artifacts_manager, offline=False):
- # type: (t.Iterable[GalaxyAPI], ConcreteArtifactsManager, bool) -> None
+ def __init__(self, apis: t.Iterable[GalaxyAPI], concrete_artifacts_manager: ConcreteArtifactsManager, offline: bool = False) -> None:
"""Initialize the target APIs list."""
self._apis = apis
self._concrete_art_mgr = concrete_artifacts_manager
@@ -38,22 +37,21 @@ class MultiGalaxyAPIProxy:
def is_offline_mode_requested(self):
return self._offline
- def _assert_that_offline_mode_is_not_requested(self): # type: () -> None
+ def _assert_that_offline_mode_is_not_requested(self) -> None:
if self.is_offline_mode_requested:
raise NotImplementedError("The calling code is not supposed to be invoked in 'offline' mode.")
- def _get_collection_versions(self, requirement):
- # type: (Requirement) -> t.Iterator[tuple[GalaxyAPI, str]]
+ def _get_collection_versions(self, requirement: Requirement) -> t.Iterator[tuple[GalaxyAPI, str]]:
"""Helper for get_collection_versions.
Yield api, version pairs for all APIs,
and reraise the last error if no valid API was found.
"""
if self._offline:
- return []
+ return
found_api = False
- last_error = None # type: Exception | None
+ last_error: Exception | None = None
api_lookup_order = (
(requirement.src, )
@@ -86,8 +84,7 @@ class MultiGalaxyAPIProxy:
if not found_api and last_error is not None:
raise last_error
- def get_collection_versions(self, requirement):
- # type: (Requirement) -> t.Iterable[tuple[str, GalaxyAPI]]
+ def get_collection_versions(self, requirement: Requirement) -> t.Iterable[tuple[str, GalaxyAPI]]:
"""Get a set of unique versions for FQCN on Galaxy servers."""
if requirement.is_concrete_artifact:
return {
@@ -110,8 +107,7 @@ class MultiGalaxyAPIProxy:
)
)
- def get_collection_version_metadata(self, collection_candidate):
- # type: (Candidate) -> CollectionVersionMetadata
+ def get_collection_version_metadata(self, collection_candidate: Candidate) -> CollectionVersionMetadata:
"""Retrieve collection metadata of a given candidate."""
self._assert_that_offline_mode_is_not_requested()
@@ -160,8 +156,7 @@ class MultiGalaxyAPIProxy:
raise last_err
- def get_collection_dependencies(self, collection_candidate):
- # type: (Candidate) -> dict[str, str]
+ def get_collection_dependencies(self, collection_candidate: Candidate) -> dict[str, str]:
# FIXME: return Requirement instances instead?
"""Retrieve collection dependencies of a given candidate."""
if collection_candidate.is_concrete_artifact:
@@ -177,13 +172,12 @@ class MultiGalaxyAPIProxy:
dependencies
)
- def get_signatures(self, collection_candidate):
- # type: (Candidate) -> list[str]
+ def get_signatures(self, collection_candidate: Candidate) -> list[str]:
self._assert_that_offline_mode_is_not_requested()
namespace = collection_candidate.namespace
name = collection_candidate.name
version = collection_candidate.ver
- last_err = None # type: Exception | None
+ last_err: Exception | None = None
api_lookup_order = (
(collection_candidate.src, )
diff --git a/lib/ansible/galaxy/collection/gpg.py b/lib/ansible/galaxy/collection/gpg.py
index 38ec189ddd0..9d41cdcde8c 100644
--- a/lib/ansible/galaxy/collection/gpg.py
+++ b/lib/ansible/galaxy/collection/gpg.py
@@ -12,20 +12,14 @@ import contextlib
import inspect
import os
import subprocess
-import sys
import typing as t
from dataclasses import dataclass, fields as dc_fields
-from functools import partial
from urllib.error import HTTPError, URLError
if t.TYPE_CHECKING:
from ansible.utils.display import Display
-IS_PY310_PLUS = sys.version_info[:2] >= (3, 10)
-
-frozen_dataclass = partial(dataclass, frozen=True, **({'slots': True} if IS_PY310_PLUS else {}))
-
def get_signature_from_source(source, display=None): # type: (str, t.Optional[Display]) -> str
if display is not None:
@@ -128,7 +122,7 @@ def parse_gpg_errors(status_out): # type: (str) -> t.Iterator[GpgBaseError]
yield cls(*fields)
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgBaseError(Exception):
status: str
@@ -142,35 +136,35 @@ class GpgBaseError(Exception):
super(GpgBaseError, self).__setattr__(field_name, field_type(getattr(self, field_name)))
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgExpSig(GpgBaseError):
"""The signature with the keyid is good, but the signature is expired."""
keyid: str
username: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgExpKeySig(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by an expired key."""
keyid: str
username: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgRevKeySig(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by a revoked key."""
keyid: str
username: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgBadSig(GpgBaseError):
"""The signature with the keyid has not been verified okay."""
keyid: str
username: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgErrSig(GpgBaseError):
""""It was not possible to check the signature. This may be caused by
a missing public key or an unsupported algorithm. A RC of 4
@@ -186,24 +180,24 @@ class GpgErrSig(GpgBaseError):
fpr: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgNoPubkey(GpgBaseError):
"""The public key is not available."""
keyid: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgMissingPassPhrase(GpgBaseError):
"""No passphrase was supplied."""
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgBadPassphrase(GpgBaseError):
"""The supplied passphrase was wrong or not given."""
keyid: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgNoData(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
@@ -215,7 +209,7 @@ class GpgNoData(GpgBaseError):
what: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgUnexpected(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
@@ -227,7 +221,7 @@ class GpgUnexpected(GpgBaseError):
what: str
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgError(GpgBaseError):
"""This is a generic error status message, it might be followed by error location specific data."""
location: str
@@ -235,30 +229,30 @@ class GpgError(GpgBaseError):
more: str = ""
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgFailure(GpgBaseError):
"""This is the counterpart to SUCCESS and used to indicate a program failure."""
location: str
code: int
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgBadArmor(GpgBaseError):
"""The ASCII armor is corrupted."""
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgKeyExpired(GpgBaseError):
"""The key has expired."""
timestamp: int
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgKeyRevoked(GpgBaseError):
"""The used key has been revoked by its owner."""
-@frozen_dataclass
+@dataclass(frozen=True, slots=True)
class GpgNoSecKey(GpgBaseError):
"""The secret key is not available."""
keyid: str
diff --git a/lib/ansible/galaxy/data/COPYING b/lib/ansible/galaxy/data/COPYING
new file mode 100644
index 00000000000..87a9639c92f
--- /dev/null
+++ b/lib/ansible/galaxy/data/COPYING
@@ -0,0 +1,7 @@
+All templates, files and files generated from them in the subdirectories of this one
+are subject to the MIT license when applicable.
+
+MIT License:
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/ansible/galaxy/data/apb/Dockerfile.j2 b/lib/ansible/galaxy/data/apb/Dockerfile.j2
index 4d99a8b0c37..f39abc3bd89 100644
--- a/lib/ansible/galaxy/data/apb/Dockerfile.j2
+++ b/lib/ansible/galaxy/data/apb/Dockerfile.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
FROM ansibleplaybookbundle/apb-base
LABEL "com.redhat.apb.spec"=\
diff --git a/lib/ansible/galaxy/data/apb/Makefile.j2 b/lib/ansible/galaxy/data/apb/Makefile.j2
index ebeaa61f168..9278d246094 100644
--- a/lib/ansible/galaxy/data/apb/Makefile.j2
+++ b/lib/ansible/galaxy/data/apb/Makefile.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
DOCKERHOST = DOCKERHOST
DOCKERORG = DOCKERORG
IMAGENAME = {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/README.md b/lib/ansible/galaxy/data/apb/README.md
index 2e350a03fde..0f51845fbd3 100644
--- a/lib/ansible/galaxy/data/apb/README.md
+++ b/lib/ansible/galaxy/data/apb/README.md
@@ -6,17 +6,21 @@ A brief description of the APB goes here.
Requirements
------------
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here.
+For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
APB Variables
--------------
-A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well.
+A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and
+any variables that can/should be set via parameters to the role.
+Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
-A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to
+parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
diff --git a/lib/ansible/galaxy/data/apb/apb.yml.j2 b/lib/ansible/galaxy/data/apb/apb.yml.j2
index f96880196fe..e9405dcc359 100644
--- a/lib/ansible/galaxy/data/apb/apb.yml.j2
+++ b/lib/ansible/galaxy/data/apb/apb.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
version: '1.0.0'
name: {{ role_name }}
description: {{ description }}
diff --git a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
index 3818e64c335..8842d94e345 100644
--- a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
index 3f4c49674d4..89371a09bab 100644
--- a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/meta/main.yml.j2 b/lib/ansible/galaxy/data/apb/meta/main.yml.j2
index 862f8ef8b4c..23f870c4c50 100644
--- a/lib/ansible/galaxy/data/apb/meta/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/meta/main.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
galaxy_info:
author: {{ author }}
description: {{ description }}
@@ -16,21 +17,6 @@ galaxy_info:
# - CC-BY-4.0
license: {{ license }}
- #
- # platforms is a list of platforms, and each platform has a name and a list of versions.
- #
- # platforms:
- # - name: Fedora
- # versions:
- # - all
- # - 25
- # - name: SomePlatform
- # versions:
- # - all
- # - 1.0
- # - 7
- # - 99.99
-
galaxy_tags:
- apb
# List tags for your role here, one per line. A tag is a keyword that describes
diff --git a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
index 19527310a59..0a863784990 100644
--- a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
+++ b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
- name: "{{ role_name }} playbook to deprovision the application"
hosts: localhost
gather_facts: false
diff --git a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
index 7b08605ec58..f0691e2b875 100644
--- a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
+++ b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
- name: "{{ role_name }} playbook to provision the application"
hosts: localhost
gather_facts: false
diff --git a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
index a9880650590..1bba65a7566 100644
--- a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/tests/ansible.cfg b/lib/ansible/galaxy/data/apb/tests/ansible.cfg
index 2f74f1b2722..e2b73526706 100644
--- a/lib/ansible/galaxy/data/apb/tests/ansible.cfg
+++ b/lib/ansible/galaxy/data/apb/tests/ansible.cfg
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
[defaults]
inventory=./inventory
diff --git a/lib/ansible/galaxy/data/apb/tests/inventory b/lib/ansible/galaxy/data/apb/tests/inventory
index ea69cbf1225..a24f8243f1f 100644
--- a/lib/ansible/galaxy/data/apb/tests/inventory
+++ b/lib/ansible/galaxy/data/apb/tests/inventory
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
localhost
diff --git a/lib/ansible/galaxy/data/apb/tests/test.yml.j2 b/lib/ansible/galaxy/data/apb/tests/test.yml.j2
index fb14f85c97b..1b03869978c 100644
--- a/lib/ansible/galaxy/data/apb/tests/test.yml.j2
+++ b/lib/ansible/galaxy/data/apb/tests/test.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
gather_facts: no
diff --git a/lib/ansible/galaxy/data/apb/vars/main.yml.j2 b/lib/ansible/galaxy/data/apb/vars/main.yml.j2
index 092d511a1e6..8fc2f46c5e0 100644
--- a/lib/ansible/galaxy/data/apb/vars/main.yml.j2
+++ b/lib/ansible/galaxy/data/apb/vars/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/collections_galaxy_meta.yml b/lib/ansible/galaxy/data/collections_galaxy_meta.yml
index 5c4472cda1a..f47f1a7efff 100644
--- a/lib/ansible/galaxy/data/collections_galaxy_meta.yml
+++ b/lib/ansible/galaxy/data/collections_galaxy_meta.yml
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
diff --git a/lib/ansible/galaxy/data/container/defaults/main.yml.j2 b/lib/ansible/galaxy/data/container/defaults/main.yml.j2
index 3818e64c335..8842d94e345 100644
--- a/lib/ansible/galaxy/data/container/defaults/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/defaults/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/handlers/main.yml.j2 b/lib/ansible/galaxy/data/container/handlers/main.yml.j2
index 3f4c49674d4..89371a09bab 100644
--- a/lib/ansible/galaxy/data/container/handlers/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/handlers/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/meta/container.yml.j2 b/lib/ansible/galaxy/data/container/meta/container.yml.j2
index f033d34110e..97b39617192 100644
--- a/lib/ansible/galaxy/data/container/meta/container.yml.j2
+++ b/lib/ansible/galaxy/data/container/meta/container.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
# Add your Ansible Container service definitions here.
# For example:
#
diff --git a/lib/ansible/galaxy/data/container/meta/main.yml.j2 b/lib/ansible/galaxy/data/container/meta/main.yml.j2
index 72fc9a22e8a..d3fe1495a25 100644
--- a/lib/ansible/galaxy/data/container/meta/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/meta/main.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
galaxy_info:
author: {{ author }}
description: {{ description }}
@@ -21,24 +22,6 @@ galaxy_info:
# If Ansible is required outside of the build container, provide the minimum version:
# min_ansible_version:
- #
- # Provide a list of supported platforms, and for each platform a list of versions.
- # If you don't wish to enumerate all versions for a particular platform, use 'all'.
- # To view available platforms and versions (or releases), visit:
- # https://galaxy.ansible.com/api/v1/platforms/
- #
- # platforms:
- # - name: Fedora
- # versions:
- # - all
- # - 25
- # - name: SomePlatform
- # versions:
- # - all
- # - 1.0
- # - 7
- # - 99.99
-
galaxy_tags:
- container
# List tags for your role here, one per line. A tag is a keyword that describes
diff --git a/lib/ansible/galaxy/data/container/tasks/main.yml.j2 b/lib/ansible/galaxy/data/container/tasks/main.yml.j2
index a9880650590..1bba65a7566 100644
--- a/lib/ansible/galaxy/data/container/tasks/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/tasks/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/tests/ansible.cfg b/lib/ansible/galaxy/data/container/tests/ansible.cfg
index 2f74f1b2722..e2b73526706 100644
--- a/lib/ansible/galaxy/data/container/tests/ansible.cfg
+++ b/lib/ansible/galaxy/data/container/tests/ansible.cfg
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
[defaults]
inventory=./inventory
diff --git a/lib/ansible/galaxy/data/container/tests/inventory b/lib/ansible/galaxy/data/container/tests/inventory
index ea69cbf1225..a24f8243f1f 100644
--- a/lib/ansible/galaxy/data/container/tests/inventory
+++ b/lib/ansible/galaxy/data/container/tests/inventory
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
localhost
diff --git a/lib/ansible/galaxy/data/container/tests/test.yml.j2 b/lib/ansible/galaxy/data/container/tests/test.yml.j2
index fb14f85c97b..1b03869978c 100644
--- a/lib/ansible/galaxy/data/container/tests/test.yml.j2
+++ b/lib/ansible/galaxy/data/container/tests/test.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
gather_facts: no
diff --git a/lib/ansible/galaxy/data/container/vars/main.yml.j2 b/lib/ansible/galaxy/data/container/vars/main.yml.j2
index 092d511a1e6..8fc2f46c5e0 100644
--- a/lib/ansible/galaxy/data/container/vars/main.yml.j2
+++ b/lib/ansible/galaxy/data/container/vars/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/collection/README.md.j2 b/lib/ansible/galaxy/data/default/collection/README.md.j2
index 5e5162206ec..ff8d7a3e8b1 100644
--- a/lib/ansible/galaxy/data/default/collection/README.md.j2
+++ b/lib/ansible/galaxy/data/default/collection/README.md.j2
@@ -1,3 +1,4 @@
+{# SPDX-License-Identifier: MIT-0 #}
# Ansible Collection - {{ namespace }}.{{ collection_name }}
Documentation for the collection.
diff --git a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
index 7821491b257..842bdb10ce1 100644
--- a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
+++ b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
### REQUIRED
{% for option in required_config %}
{{ option.description | comment_ify }}
diff --git a/lib/ansible/galaxy/data/default/collection/meta/runtime.yml b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml
index 20f709edff5..936cae9f714 100644
--- a/lib/ansible/galaxy/data/default/collection/meta/runtime.yml
+++ b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
# Collections must specify a minimum required ansible version to upload
# to galaxy
diff --git a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
index 7c006cfa76f..795e371cd60 100644
--- a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
+++ b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
@@ -1,3 +1,4 @@
+{# SPDX-License-Identifier: MIT-0 #}
# Collections Plugins Directory
This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
diff --git a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
index 3818e64c335..8842d94e345 100644
--- a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
index 3f4c49674d4..89371a09bab 100644
--- a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
index 4891a68b490..b23f47cc5bc 100644
--- a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
galaxy_info:
author: {{ author }}
description: {{ description }}
@@ -21,24 +22,6 @@ galaxy_info:
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
- #
- # Provide a list of supported platforms, and for each platform a list of versions.
- # If you don't wish to enumerate all versions for a particular platform, use 'all'.
- # To view available platforms and versions (or releases), visit:
- # https://galaxy.ansible.com/api/v1/platforms/
- #
- # platforms:
- # - name: Fedora
- # versions:
- # - all
- # - 25
- # - name: SomePlatform
- # versions:
- # - all
- # - 1.0
- # - 7
- # - 99.99
-
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
diff --git a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
index a9880650590..1bba65a7566 100644
--- a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/tests/inventory b/lib/ansible/galaxy/data/default/role/tests/inventory
index 878877b0776..03ca42fd173 100644
--- a/lib/ansible/galaxy/data/default/role/tests/inventory
+++ b/lib/ansible/galaxy/data/default/role/tests/inventory
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
localhost
diff --git a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
index 0c40f95a697..bf4f028593e 100644
--- a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
remote_user: root
diff --git a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
index 092d511a1e6..8fc2f46c5e0 100644
--- a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
+++ b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
index cf434d750c3..51e41111117 100644
--- a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
+++ b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/defaults/main.yml.j2 b/lib/ansible/galaxy/data/network/defaults/main.yml.j2
index 3818e64c335..8842d94e345 100644
--- a/lib/ansible/galaxy/data/network/defaults/main.yml.j2
+++ b/lib/ansible/galaxy/data/network/defaults/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/library/example_command.py.j2 b/lib/ansible/galaxy/data/network/library/example_command.py.j2
index dff5b153a64..9aa6ef62d4d 100644
--- a/lib/ansible/galaxy/data/network/library/example_command.py.j2
+++ b/lib/ansible/galaxy/data/network/library/example_command.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/library/example_config.py.j2 b/lib/ansible/galaxy/data/network/library/example_config.py.j2
index 0a8479b36cb..2913af08a0c 100644
--- a/lib/ansible/galaxy/data/network/library/example_config.py.j2
+++ b/lib/ansible/galaxy/data/network/library/example_config.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/library/example_facts.py.j2 b/lib/ansible/galaxy/data/network/library/example_facts.py.j2
index 2f0bafa1ccb..f90f456eab0 100644
--- a/lib/ansible/galaxy/data/network/library/example_facts.py.j2
+++ b/lib/ansible/galaxy/data/network/library/example_facts.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/meta/main.yml.j2 b/lib/ansible/galaxy/data/network/meta/main.yml.j2
index d0184ae8cea..0cd67263113 100644
--- a/lib/ansible/galaxy/data/network/meta/main.yml.j2
+++ b/lib/ansible/galaxy/data/network/meta/main.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
galaxy_info:
author: {{ author }}
description: {{ description }}
@@ -21,21 +22,6 @@ galaxy_info:
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
- #
- # platforms is a list of platforms, and each platform has a name and a list of versions.
- #
- # platforms:
- # - name: VYOS
- # versions:
- # - all
- # - 25
- # - name: SomePlatform
- # versions:
- # - all
- # - 1.0
- # - 7
- # - 99.99
-
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
diff --git a/lib/ansible/galaxy/data/network/module_utils/example.py.j2 b/lib/ansible/galaxy/data/network/module_utils/example.py.j2
index 9422e747024..a3d9aeac236 100644
--- a/lib/ansible/galaxy/data/network/module_utils/example.py.j2
+++ b/lib/ansible/galaxy/data/network/module_utils/example.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
index 69c90c990af..fb9ddfad86c 100644
--- a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
+++ b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/tasks/main.yml.j2 b/lib/ansible/galaxy/data/network/tasks/main.yml.j2
index a9880650590..1bba65a7566 100644
--- a/lib/ansible/galaxy/data/network/tasks/main.yml.j2
+++ b/lib/ansible/galaxy/data/network/tasks/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
index f44e79f3dcf..d3562d15136 100644
--- a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
+++ b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
#
# (c) 2018 Red Hat Inc.
#
diff --git a/lib/ansible/galaxy/data/network/tests/inventory b/lib/ansible/galaxy/data/network/tests/inventory
index 878877b0776..03ca42fd173 100644
--- a/lib/ansible/galaxy/data/network/tests/inventory
+++ b/lib/ansible/galaxy/data/network/tests/inventory
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
localhost
diff --git a/lib/ansible/galaxy/data/network/tests/test.yml.j2 b/lib/ansible/galaxy/data/network/tests/test.yml.j2
index 11284eb5b8e..93263043bf1 100644
--- a/lib/ansible/galaxy/data/network/tests/test.yml.j2
+++ b/lib/ansible/galaxy/data/network/tests/test.yml.j2
@@ -1,3 +1,4 @@
+#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
connection: network_cli
diff --git a/lib/ansible/galaxy/data/network/vars/main.yml.j2 b/lib/ansible/galaxy/data/network/vars/main.yml.j2
index 092d511a1e6..8fc2f46c5e0 100644
--- a/lib/ansible/galaxy/data/network/vars/main.yml.j2
+++ b/lib/ansible/galaxy/data/network/vars/main.yml.j2
@@ -1,2 +1,3 @@
+#SPDX-License-Identifier: MIT-0
---
# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/dependency_resolution/providers.py b/lib/ansible/galaxy/dependency_resolution/providers.py
index 716f5423b37..7578cae785c 100644
--- a/lib/ansible/galaxy/dependency_resolution/providers.py
+++ b/lib/ansible/galaxy/dependency_resolution/providers.py
@@ -126,7 +126,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
the current candidate list
* ``parent`` specifies the candidate that provides
- (dependend on) the requirement, or `None`
+ (depended on) the requirement, or `None`
to indicate a root requirement.
resolvelib >=0.7.0, < 0.8.0
@@ -202,7 +202,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
remote archives), the one-and-only match is returned
For a "named" requirement, Galaxy-compatible APIs are consulted
- to find concrete candidates for this requirement. Of theres a
+ to find concrete candidates for this requirement. If there's a
pre-installed candidate, it's prepended in front of others.
resolvelib >=0.5.3, <0.6.0
@@ -437,7 +437,7 @@ class CollectionDependencyProviderBase(AbstractProvider):
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
- # NOTE: The underlying implmentation currently uses first found
+ # NOTE: The underlying implementation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
index 77334f8630e..806a9996ad4 100644
--- a/lib/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -256,7 +256,7 @@ class GalaxyRole(object):
display.display("- downloading role from %s" % archive_url)
try:
- url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent())
+ url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent(), timeout=60)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
@@ -297,7 +297,7 @@ class GalaxyRole(object):
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
- loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
+ loose_versions = [v for a in role_versions if (v := LooseVersion()) and v.parse(a.get('name') or '') is None]
try:
loose_versions.sort()
except TypeError:
@@ -386,6 +386,8 @@ class GalaxyRole(object):
else:
os.makedirs(self.path)
+ resolved_archive = unfrackpath(archive_parent_dir, follow=False)
+
# We strip off any higher-level directories for all of the files
# contained within the tar file here. The default is 'github_repo-target'.
# Gerrit instances, on the other hand, does not have a parent directory at all.
@@ -400,33 +402,29 @@ class GalaxyRole(object):
if not (attr_value := getattr(member, attr, None)):
continue
- if attr_value.startswith(os.sep) and not is_subpath(attr_value, archive_parent_dir):
- err = f"Invalid {attr} for tarfile member: path {attr_value} is not a subpath of the role {archive_parent_dir}"
- raise AnsibleError(err)
-
if attr == 'linkname':
# Symlinks are relative to the link
- relative_to_archive_dir = os.path.dirname(getattr(member, 'name', ''))
- archive_dir_path = os.path.join(archive_parent_dir, relative_to_archive_dir, attr_value)
+ relative_to = os.path.dirname(getattr(member, 'name', ''))
else:
# Normalize paths that start with the archive dir
attr_value = attr_value.replace(archive_parent_dir, "", 1)
attr_value = os.path.join(*attr_value.split(os.sep)) # remove leading os.sep
- archive_dir_path = os.path.join(archive_parent_dir, attr_value)
+ relative_to = ''
- resolved_archive = unfrackpath(archive_parent_dir)
- resolved_path = unfrackpath(archive_dir_path)
- if not is_subpath(resolved_path, resolved_archive):
- err = f"Invalid {attr} for tarfile member: path {resolved_path} is not a subpath of the role {resolved_archive}"
+ full_path = os.path.join(resolved_archive, relative_to, attr_value)
+ if not is_subpath(full_path, resolved_archive, real=True):
+ err = f"Invalid {attr} for tarfile member: path {full_path} is not a subpath of the role {resolved_archive}"
raise AnsibleError(err)
- relative_path = os.path.join(*resolved_path.replace(resolved_archive, "", 1).split(os.sep)) or '.'
+ relative_path_dir = os.path.join(resolved_archive, relative_to)
+ relative_path = os.path.join(*full_path.replace(relative_path_dir, "", 1).split(os.sep))
setattr(member, attr, relative_path)
if _check_working_data_filter():
# deprecated: description='extract fallback without filter' python_version='3.11'
role_tar_file.extract(member, to_native(self.path), filter='data') # type: ignore[call-arg]
else:
+ # Remove along with manual path filter once Python 3.12 is minimum supported version
role_tar_file.extract(member, to_native(self.path))
# write out the install info file for later use
diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py
index 183e2af109e..eb06a34181f 100644
--- a/lib/ansible/galaxy/token.py
+++ b/lib/ansible/galaxy/token.py
@@ -21,12 +21,16 @@
from __future__ import annotations
import base64
-import os
import json
+import os
+import time
from stat import S_IRUSR, S_IWUSR
+from urllib.error import HTTPError
from ansible import constants as C
+from ansible.galaxy.api import GalaxyError
from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils.common.sentinel import Sentinel as NoTokenSentinel
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.yaml import yaml_dump, yaml_load
from ansible.module_utils.urls import open_url
@@ -35,12 +39,6 @@ from ansible.utils.display import Display
display = Display()
-class NoTokenSentinel(object):
- """ Represents an ansible.cfg server with not token defined (will ignore cmdline and GALAXY_TOKEN_PATH. """
- def __new__(cls, *args, **kwargs):
- return cls
-
-
class KeycloakToken(object):
'''A token granted by a Keycloak server.
@@ -57,12 +55,16 @@ class KeycloakToken(object):
self.client_id = client_id
if self.client_id is None:
self.client_id = 'cloud-services'
+ self._expiration = None
def _form_payload(self):
return 'grant_type=refresh_token&client_id=%s&refresh_token=%s' % (self.client_id,
self.access_token)
def get(self):
+ if self._expiration and time.time() >= self._expiration:
+ self._token = None
+
if self._token:
return self._token
@@ -76,15 +78,20 @@ class KeycloakToken(object):
# or 'azp' (Authorized party - the party to which the ID Token was issued)
payload = self._form_payload()
- resp = open_url(to_native(self.auth_url),
- data=payload,
- validate_certs=self.validate_certs,
- method='POST',
- http_agent=user_agent())
+ try:
+ resp = open_url(to_native(self.auth_url),
+ data=payload,
+ validate_certs=self.validate_certs,
+ method='POST',
+ http_agent=user_agent())
+ except HTTPError as e:
+ raise GalaxyError(e, 'Unable to get access token')
- # TODO: handle auth errors
+ data = json.load(resp)
- data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
+ # So that we have a buffer, expire the token in ~2/3 the given value
+ expires_in = data['expires_in'] // 3 * 2
+ self._expiration = time.time() + expires_in
# - extract 'access_token'
self._token = data.get('access_token')
diff --git a/lib/ansible/keyword_desc.yml b/lib/ansible/keyword_desc.yml
index 22a612ccc1c..4aea8234b61 100644
--- a/lib/ansible/keyword_desc.yml
+++ b/lib/ansible/keyword_desc.yml
@@ -61,7 +61,7 @@ serial: Explicitly define how Ansible batches the execution of the current play
strategy: Allows you to choose the strategy plugin to use for the play. See :ref:`strategy_plugins`.
tags: Tags applied to the task or included tasks, this allows selecting subsets of tasks from the command line.
tasks: Main list of tasks to execute in the play, they run after :term:`roles` and before :term:`post_tasks`.
-timeout: Time limit for the task to execute in, if exceeded Ansible will interrupt and fail the task.
+timeout: Time limit for the task action to execute in, if exceeded, Ansible will interrupt the process. Timeout does not include templating or looping.
throttle: Limit the number of concurrent task runs on task, block and playbook level. This is independent of the forks and serial settings, but cannot be set higher than those limits. For example, if forks is set to 10 and the throttle is set to 15, at most 10 hosts will be operated on in parallel.
until: "This keyword implies a ':term:`retries` loop' that will go on until the condition supplied here is met or we hit the :term:`retries` limit."
vars: Dictionary/map of variables
diff --git a/lib/ansible/module_utils/_internal/__init__.py b/lib/ansible/module_utils/_internal/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/module_utils/_internal/_concurrent/__init__.py b/lib/ansible/module_utils/_internal/_concurrent/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py b/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
new file mode 100644
index 00000000000..0b32a062fed
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
@@ -0,0 +1,28 @@
+"""Proxy stdlib threading module that only supports non-joinable daemon threads."""
+# NB: all new local module attrs are _ prefixed to ensure an identical public attribute surface area to the module we're proxying
+
+from __future__ import annotations as _annotations
+
+import threading as _threading
+import typing as _t
+
+
+class _DaemonThread(_threading.Thread):
+ """
+ Daemon-only Thread subclass; prevents running threads of this type from blocking interpreter shutdown and process exit.
+ The join() method is a no-op.
+ """
+
+ def __init__(self, *args, daemon: bool | None = None, **kwargs) -> None:
+ super().__init__(*args, daemon=daemon or True, **kwargs)
+
+ def join(self, timeout=None) -> None:
+ """ThreadPoolExecutor's atexit handler joins all queue threads before allowing shutdown; prevent them from blocking."""
+
+
+Thread = _DaemonThread # shadow the real Thread attr with our _DaemonThread
+
+
+def __getattr__(name: str) -> _t.Any:
+ """Delegate anything not defined locally to the real `threading` module."""
+ return getattr(_threading, name)
diff --git a/lib/ansible/module_utils/_internal/_concurrent/_futures.py b/lib/ansible/module_utils/_internal/_concurrent/_futures.py
new file mode 100644
index 00000000000..2ca493f6873
--- /dev/null
+++ b/lib/ansible/module_utils/_internal/_concurrent/_futures.py
@@ -0,0 +1,21 @@
+"""Utilities for concurrent code execution using futures."""
+
+from __future__ import annotations
+
+import concurrent.futures
+import types
+
+from . import _daemon_threading
+
+
+class DaemonThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
+ """ThreadPoolExecutor subclass that creates non-joinable daemon threads for non-blocking pool and process shutdown with abandoned threads."""
+
+ atc = concurrent.futures.ThreadPoolExecutor._adjust_thread_count
+
+ # clone the base class `_adjust_thread_count` method with a copy of its globals dict
+ _adjust_thread_count = types.FunctionType(atc.__code__, atc.__globals__.copy(), name=atc.__name__, argdefs=atc.__defaults__, closure=atc.__closure__)
+ # patch the method closure's `threading` module import to use our daemon-only thread factory instead
+ _adjust_thread_count.__globals__.update(threading=_daemon_threading)
+
+ del atc # don't expose this as a class attribute
diff --git a/lib/ansible/module_utils/api.py b/lib/ansible/module_utils/api.py
index 8f08772278e..2415c38a839 100644
--- a/lib/ansible/module_utils/api.py
+++ b/lib/ansible/module_utils/api.py
@@ -28,7 +28,7 @@ from __future__ import annotations
import copy
import functools
import itertools
-import random
+import secrets
import sys
import time
@@ -131,7 +131,7 @@ def generate_jittered_backoff(retries=10, delay_base=3, delay_threshold=60):
:param delay_threshold: The maximum time in seconds for any delay.
"""
for retry in range(0, retries):
- yield random.randint(0, min(delay_threshold, delay_base * 2 ** retry))
+ yield secrets.randbelow(min(delay_threshold, delay_base * 2 ** retry))
def retry_never(exception_or_result):
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index 6e0a53564fc..19dbb1d1541 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -9,7 +9,7 @@ import sys
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
-_PY_MIN = (3, 7)
+_PY_MIN = (3, 8)
if sys.version_info < _PY_MIN:
print(json.dumps(dict(
@@ -120,12 +120,13 @@ from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.file import (
_PERM_BITS as PERM_BITS,
- _EXEC_PERM_BITS as EXEC_PERM_BITS,
_DEFAULT_PERM as DEFAULT_PERM,
is_executable,
format_attributes,
get_flags_from_attributes,
FILE_ATTRIBUTES,
+ S_IXANY,
+ S_IRWU_RWG_RWO,
)
from ansible.module_utils.common.sys_info import (
get_distribution,
@@ -457,7 +458,7 @@ class AnsibleModule(object):
self._selinux_mls_enabled = None
self._selinux_initial_context = None
- # finally, make sure we're in a sane working dir
+ # finally, make sure we're in a logical working dir
self._set_cwd()
@property
@@ -1021,7 +1022,7 @@ class AnsibleModule(object):
if prev_mode is None:
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
- has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
+ has_x_permissions = (prev_mode & S_IXANY) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
@@ -1201,6 +1202,7 @@ class AnsibleModule(object):
setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
def safe_eval(self, value, locals=None, include_exceptions=False):
+ # deprecated: description='no longer used in the codebase' core_version='2.21'
return safe_eval(value, locals, include_exceptions)
def _load_params(self):
@@ -1352,9 +1354,10 @@ class AnsibleModule(object):
Find system executable in PATH.
:param arg: The executable to find.
- :param required: if executable is not found and required is ``True``, fail_json
+ :param required: if the executable is not found and required is ``True``, fail_json
:param opt_dirs: optional list of directories to search in addition to ``PATH``
- :returns: if found return full path; otherwise return None
+ :returns: if found return full path; otherwise return original arg, unless 'warning' then return None
+ :raises: Sysexit: if arg is not found and required=True (via fail_json)
'''
bin_path = None
@@ -1363,8 +1366,6 @@ class AnsibleModule(object):
except ValueError as e:
if required:
self.fail_json(msg=to_text(e))
- else:
- return bin_path
return bin_path
@@ -1431,7 +1432,7 @@ class AnsibleModule(object):
kwargs['deprecations'] = deprecations
# preserve bools/none from no_log
- # TODO: once python version on target high enough, dict comprh
+ # TODO: once python version on target high enough, dict comprehensions
preserved = {}
for k, v in kwargs.items():
if v is None or isinstance(v, bool):
@@ -1556,7 +1557,7 @@ class AnsibleModule(object):
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
- #
+
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
@@ -1584,7 +1585,7 @@ class AnsibleModule(object):
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
- def atomic_move(self, src, dest, unsafe_writes=False):
+ def atomic_move(self, src, dest, unsafe_writes=False, keep_dest_attrs=True):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
@@ -1592,24 +1593,12 @@ class AnsibleModule(object):
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
- if os.path.exists(b_dest):
+ if os.path.exists(b_dest) and keep_dest_attrs:
try:
dest_stat = os.stat(b_dest)
-
- # copy mode and ownership
- os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
-
- # try to copy flags if possible
- if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
- try:
- os.chflags(b_src, dest_stat.st_flags)
- except OSError as e:
- for err in 'EOPNOTSUPP', 'ENOTSUP':
- if hasattr(errno, err) and e.errno == getattr(errno, err):
- break
- else:
- raise
+ shutil.copystat(b_dest, b_src)
+ os.utime(b_src, times=(time.time(), time.time()))
except OSError as e:
if e.errno != errno.EPERM:
raise
@@ -1657,19 +1646,24 @@ class AnsibleModule(object):
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
- shutil.move(b_src, b_tmp_dest_name)
+ shutil.move(b_src, b_tmp_dest_name, copy_function=shutil.copy if keep_dest_attrs else shutil.copy2)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
- shutil.copy2(b_src, b_tmp_dest_name)
+ if keep_dest_attrs:
+ shutil.copy(b_src, b_tmp_dest_name)
+ else:
+ shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
- if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
- os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
+ if keep_dest_attrs:
+ if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
+ os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
+ os.utime(b_tmp_dest_name, times=(time.time(), time.time()))
except OSError as e:
if e.errno != errno.EPERM:
raise
@@ -1694,9 +1688,13 @@ class AnsibleModule(object):
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
- os.chmod(b_dest, DEFAULT_PERM & ~umask)
+ os.chmod(b_dest, S_IRWU_RWG_RWO & ~umask)
+ dest_dir_stat = os.stat(os.path.dirname(b_dest))
try:
- os.chown(b_dest, os.geteuid(), os.getegid())
+ if dest_dir_stat.st_mode & stat.S_ISGID:
+ os.chown(b_dest, os.geteuid(), dest_dir_stat.st_gid)
+ else:
+ os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
@@ -2063,7 +2061,7 @@ class AnsibleModule(object):
# not as exact as above, but should be good enough for most platforms that fail the previous call
buffer_size = select.PIPE_BUF
except Exception:
- buffer_size = 9000 # use sane default JIC
+ buffer_size = 9000 # use logical default JIC
return buffer_size
diff --git a/lib/ansible/module_utils/common/collections.py b/lib/ansible/module_utils/common/collections.py
index e4cb9ec1d70..28c53e14e2c 100644
--- a/lib/ansible/module_utils/common/collections.py
+++ b/lib/ansible/module_utils/common/collections.py
@@ -65,7 +65,7 @@ class ImmutableDict(Hashable, Mapping):
def is_string(seq):
- """Identify whether the input has a string-like type (inclding bytes)."""
+ """Identify whether the input has a string-like type (including bytes)."""
# AnsibleVaultEncryptedUnicode inherits from Sequence, but is expected to be a string like object
return isinstance(seq, (text_type, binary_type)) or getattr(seq, '__ENCRYPTED__', False)
diff --git a/lib/ansible/module_utils/common/file.py b/lib/ansible/module_utils/common/file.py
index fa05b62899c..1b976fd9329 100644
--- a/lib/ansible/module_utils/common/file.py
+++ b/lib/ansible/module_utils/common/file.py
@@ -7,12 +7,6 @@ import os
import stat
import re
-try:
- import selinux # pylint: disable=unused-import
- HAVE_SELINUX = True
-except ImportError:
- HAVE_SELINUX = False
-
FILE_ATTRIBUTES = {
'A': 'noatime',
@@ -44,9 +38,15 @@ USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
-_PERM_BITS = 0o7777 # file mode permission bits
-_EXEC_PERM_BITS = 0o0111 # execute permission bits
-_DEFAULT_PERM = 0o0666 # default file permission bits
+S_IRANY = 0o0444 # read by user, group, others
+S_IWANY = 0o0222 # write by user, group, others
+S_IXANY = 0o0111 # execute by user, group, others
+S_IRWU_RWG_RWO = S_IRANY | S_IWANY # read, write by user, group, others
+S_IRWU_RG_RO = S_IRANY | stat.S_IWUSR # read by user, group, others and write only by user
+S_IRWXU_RXG_RXO = S_IRANY | S_IXANY | stat.S_IWUSR # read, execute by user, group, others and write only by user
+_PERM_BITS = 0o7777 # file mode permission bits
+_EXEC_PERM_BITS = S_IXANY # execute permission bits
+_DEFAULT_PERM = S_IRWU_RWG_RWO # default file permission bits
def is_executable(path):
diff --git a/lib/ansible/module_utils/common/parameters.py b/lib/ansible/module_utils/common/parameters.py
index 9e74f59f26d..b9f5be43a70 100644
--- a/lib/ansible/module_utils/common/parameters.py
+++ b/lib/ansible/module_utils/common/parameters.py
@@ -347,7 +347,7 @@ def _list_no_log_values(argument_spec, params):
sub_param = check_type_dict(sub_param)
if not isinstance(sub_param, Mapping):
- raise TypeError("Value '{1}' in the sub parameter field '{0}' must by a {2}, "
+ raise TypeError("Value '{1}' in the sub parameter field '{0}' must be a {2}, "
"not '{1.__class__.__name__}'".format(arg_name, sub_param, wanted_type))
no_log_values.update(_list_no_log_values(sub_argument_spec, sub_param))
@@ -365,12 +365,10 @@ def _return_datastructure_name(obj):
return
elif isinstance(obj, Mapping):
for element in obj.items():
- for subelement in _return_datastructure_name(element[1]):
- yield subelement
+ yield from _return_datastructure_name(element[1])
elif is_iterable(obj):
for element in obj:
- for subelement in _return_datastructure_name(element):
- yield subelement
+ yield from _return_datastructure_name(element)
elif obj is None or isinstance(obj, bool):
# This must come before int because bools are also ints
return
@@ -665,7 +663,7 @@ def _validate_argument_values(argument_spec, parameters, options_context=None, e
diff_list = [item for item in parameters[param] if item not in choices]
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
- diff_str = ", ".join(diff_list)
+ diff_str = ", ".join([to_native(c) for c in diff_list])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (param, choices_str, diff_str)
if options_context:
msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
diff --git a/lib/ansible/module_utils/common/process.py b/lib/ansible/module_utils/common/process.py
index 8e62c5f5d6e..85ffd2195e7 100644
--- a/lib/ansible/module_utils/common/process.py
+++ b/lib/ansible/module_utils/common/process.py
@@ -12,13 +12,18 @@ from ansible.module_utils.common.warnings import deprecate
def get_bin_path(arg, opt_dirs=None, required=None):
'''
Find system executable in PATH. Raises ValueError if the executable is not found.
- Optional arguments:
- - required: [Deprecated] Before 2.10, if executable is not found and required is true it raises an Exception.
- In 2.10 and later, an Exception is always raised. This parameter will be removed in 2.21.
- - opt_dirs: optional list of directories to search in addition to PATH
+
+ :param arg: the executable to find
+ :type arg: string
+ :param opt_dirs: optional list of directories to search in addition to PATH
+ :type opt_dirs: list of strings
+ :param required: DEPRECATED. This parameter will be removed in 2.21
+ :type required: boolean
+ :returns: path to arg (should be abs path unless PATH or opt_dirs are relative paths)
+ :raises: ValueError: if arg is not found
+
In addition to PATH and opt_dirs, this function also looks through /sbin, /usr/sbin and /usr/local/sbin. A lot of
modules, especially for gathering facts, depend on this behaviour.
- If found return full path, otherwise raise ValueError.
'''
if required is not None:
deprecate(
@@ -27,26 +32,34 @@ def get_bin_path(arg, opt_dirs=None, required=None):
collection_name="ansible.builtin",
)
+ paths = []
+ sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
opt_dirs = [] if opt_dirs is None else opt_dirs
- sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
- paths = []
+ # Construct possible paths with precedence
+ # passed in paths
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
+ # system configured paths
paths += os.environ.get('PATH', '').split(os.pathsep)
- bin_path = None
- # mangle PATH to include /sbin dirs
+
+ # existing /sbin dirs, if not there already
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
+
+ # Search for binary
+ bin_path = None
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
+ # fist found wins
bin_path = path
break
+
if bin_path is None:
raise ValueError('Failed to find required executable "%s" in paths: %s' % (arg, os.pathsep.join(paths)))
diff --git a/lib/ansible/module_utils/common/sentinel.py b/lib/ansible/module_utils/common/sentinel.py
new file mode 100644
index 00000000000..0fdbf4ce318
--- /dev/null
+++ b/lib/ansible/module_utils/common/sentinel.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import annotations
+
+
+class Sentinel:
+ """
+ Object which can be used to mark whether an entry as being special
+
+ A sentinel value demarcates a value or marks an entry as having a special meaning. In C, the
+ Null byte is used as a sentinel for the end of a string. In Python, None is often used as
+ a Sentinel in optional parameters to mean that the parameter was not set by the user.
+
+ You should use None as a Sentinel value any Python code where None is not a valid entry. If
+ None is a valid entry, though, then you need to create a different value, which is the purpose
+ of this class.
+
+ Example of using Sentinel as a default parameter value::
+
+ def confirm_big_red_button(tristate=Sentinel):
+ if tristate is Sentinel:
+ print('You must explicitly press the big red button to blow up the base')
+ elif tristate is True:
+ print('Countdown to destruction activated')
+ elif tristate is False:
+ print('Countdown stopped')
+ elif tristate is None:
+ print('Waiting for more input')
+
+ Example of using Sentinel to tell whether a dict which has a default value has been changed::
+
+ values = {'one': Sentinel, 'two': Sentinel}
+ defaults = {'one': 1, 'two': 2}
+
+ # [.. Other code which does things including setting a new value for 'one' ..]
+ values['one'] = None
+ # [..]
+
+ print('You made changes to:')
+ for key, value in values.items():
+ if value is Sentinel:
+ continue
+ print('%s: %s' % (key, value)
+ """
+
+ def __new__(cls):
+ """
+ Return the cls itself. This makes both equality and identity True for comparing the class
+ to an instance of the class, preventing common usage errors.
+
+ Preferred usage::
+
+ a = Sentinel
+ if a is Sentinel:
+ print('Sentinel value')
+
+ However, these are True as well, eliminating common usage errors::
+
+ if Sentinel is Sentinel():
+ print('Sentinel value')
+
+ if Sentinel == Sentinel():
+ print('Sentinel value')
+ """
+ return cls
diff --git a/lib/ansible/module_utils/common/text/formatters.py b/lib/ansible/module_utils/common/text/formatters.py
index 3096abec7c7..d548085c57f 100644
--- a/lib/ansible/module_utils/common/text/formatters.py
+++ b/lib/ansible/module_utils/common/text/formatters.py
@@ -20,6 +20,18 @@ SIZE_RANGES = {
'B': 1,
}
+VALID_UNITS = {
+ 'B': (('byte', 'B'), ('bit', 'b')),
+ 'K': (('kilobyte', 'KB'), ('kilobit', 'Kb')),
+ 'M': (('megabyte', 'MB'), ('megabit', 'Mb')),
+ 'G': (('gigabyte', 'GB'), ('gigabit', 'Gb')),
+ 'T': (('terabyte', 'TB'), ('terabit', 'Tb')),
+ 'P': (('petabyte', 'PB'), ('petabit', 'Pb')),
+ 'E': (('exabyte', 'EB'), ('exabit', 'Eb')),
+ 'Z': (('zetabyte', 'ZB'), ('zetabit', 'Zb')),
+ 'Y': (('yottabyte', 'YB'), ('yottabit', 'Yb')),
+}
+
def lenient_lowercase(lst):
"""Lowercase elements of a list.
@@ -53,7 +65,8 @@ def human_to_bytes(number, default_unit=None, isbits=False):
The function expects 'b' (lowercase) as a bit identifier, e.g. 'Mb'/'Kb'/etc.
if 'MB'/'KB'/... is passed, the ValueError will be rased.
"""
- m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
+ m = re.search(r'^([0-9]*\.?[0-9]+)(?:\s*([A-Za-z]+))?\s*$', str(number))
+
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
@@ -86,10 +99,13 @@ def human_to_bytes(number, default_unit=None, isbits=False):
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
-
- if unit_class_name in unit.lower():
+ unit_group = VALID_UNITS.get(range_key, None)
+ if unit_group is None:
+ raise ValueError(f"human_to_bytes() can't interpret a valid unit for {range_key}")
+ isbits_flag = 1 if isbits else 0
+ if unit.lower() == unit_group[isbits_flag][0]:
pass
- elif unit[1] != unit_class:
+ elif unit != unit_group[isbits_flag][1]:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
diff --git a/lib/ansible/module_utils/common/validation.py b/lib/ansible/module_utils/common/validation.py
index 69721e47f18..399767e775d 100644
--- a/lib/ansible/module_utils/common/validation.py
+++ b/lib/ansible/module_utils/common/validation.py
@@ -4,6 +4,7 @@
from __future__ import annotations
+import decimal
import json
import os
import re
@@ -13,10 +14,10 @@ from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.common.text.converters import jsonify
from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.common.warnings import deprecate
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import (
binary_type,
- integer_types,
string_types,
text_type,
)
@@ -39,6 +40,10 @@ def count_terms(terms, parameters):
def safe_eval(value, locals=None, include_exceptions=False):
+ deprecate(
+ "The safe_eval function should not be used.",
+ version="2.21",
+ )
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
@@ -415,7 +420,7 @@ def check_type_dict(value):
Raises :class:`TypeError` if unable to convert to a dict
- :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2``.
+ :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2`` or ``k1=v2 k2=v2``.
:returns: value converted to a dictionary
"""
@@ -427,10 +432,14 @@ def check_type_dict(value):
try:
return json.loads(value)
except Exception:
- (result, exc) = safe_eval(value, dict(), include_exceptions=True)
- if exc is not None:
- raise TypeError('unable to evaluate string as dictionary')
- return result
+ try:
+ result = literal_eval(value)
+ except Exception:
+ pass
+ else:
+ if isinstance(result, dict):
+ return result
+ raise TypeError('unable to evaluate string as dictionary')
elif '=' in value:
fields = []
field_buffer = []
@@ -457,7 +466,11 @@ def check_type_dict(value):
field = ''.join(field_buffer)
if field:
fields.append(field)
- return dict(x.split("=", 1) for x in fields)
+ try:
+ return dict(x.split("=", 1) for x in fields)
+ except ValueError:
+ # no "=" to split on: "k1=v1, k2"
+ raise TypeError('unable to evaluate string in the "key=value" format as dictionary')
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
@@ -493,16 +506,15 @@ def check_type_int(value):
:return: int of given value
"""
- if isinstance(value, integer_types):
- return value
-
- if isinstance(value, string_types):
+ if not isinstance(value, int):
try:
- return int(value)
- except ValueError:
- pass
-
- raise TypeError('%s cannot be converted to an int' % type(value))
+ if (decimal_value := decimal.Decimal(value)) != (int_value := int(decimal_value)):
+ raise ValueError("Significant decimal part found")
+ else:
+ value = int_value
+ except (decimal.DecimalException, TypeError, ValueError) as e:
+ raise TypeError(f'"{value!r}" cannot be converted to an int') from e
+ return value
def check_type_float(value):
@@ -514,16 +526,12 @@ def check_type_float(value):
:returns: float of given value.
"""
- if isinstance(value, float):
- return value
-
- if isinstance(value, (binary_type, text_type, int)):
+ if not isinstance(value, float):
try:
- return float(value)
- except ValueError:
- pass
-
- raise TypeError('%s cannot be converted to a float' % type(value))
+ value = float(value)
+ except (TypeError, ValueError) as e:
+ raise TypeError(f'{type(value)} cannot be converted to a float')
+ return value
def check_type_path(value,):
diff --git a/lib/ansible/module_utils/compat/paramiko.py b/lib/ansible/module_utils/compat/paramiko.py
index 8c84261cef8..302309cdaa8 100644
--- a/lib/ansible/module_utils/compat/paramiko.py
+++ b/lib/ansible/module_utils/compat/paramiko.py
@@ -11,7 +11,12 @@ PARAMIKO_IMPORT_ERR = None
try:
with warnings.catch_warnings():
- warnings.filterwarnings('ignore', message='Blowfish has been deprecated', category=UserWarning)
+ # Blowfish has been moved, but the deprecated import is used by paramiko versions older than 2.9.5.
+ # See: https://github.com/paramiko/paramiko/pull/2039
+ warnings.filterwarnings('ignore', message='Blowfish has been ', category=UserWarning)
+ # TripleDES has been moved, but the deprecated import is used by paramiko versions older than 3.3.2 and 3.4.1.
+ # See: https://github.com/paramiko/paramiko/pull/2421
+ warnings.filterwarnings('ignore', message='TripleDES has been ', category=UserWarning)
import paramiko # pylint: disable=unused-import
# paramiko and gssapi are incompatible and raise AttributeError not ImportError
# When running in FIPS mode, cryptography raises InternalError
diff --git a/lib/ansible/module_utils/compat/selinux.py b/lib/ansible/module_utils/compat/selinux.py
index 0900388b761..a7a19cfd63f 100644
--- a/lib/ansible/module_utils/compat/selinux.py
+++ b/lib/ansible/module_utils/compat/selinux.py
@@ -11,8 +11,8 @@ from ctypes import CDLL, c_char_p, c_int, byref, POINTER, get_errno
try:
_selinux_lib = CDLL('libselinux.so.1', use_errno=True)
-except OSError:
- raise ImportError('unable to load libselinux.so')
+except OSError as ex:
+ raise ImportError('unable to load libselinux.so') from ex
def _module_setup():
diff --git a/lib/ansible/module_utils/connection.py b/lib/ansible/module_utils/connection.py
index cc889696e70..b6720125855 100644
--- a/lib/ansible/module_utils/connection.py
+++ b/lib/ansible/module_utils/connection.py
@@ -29,8 +29,8 @@
from __future__ import annotations
import os
-import hashlib
import json
+import pickle
import socket
import struct
import traceback
@@ -40,30 +40,14 @@ from functools import partial
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.common.json import AnsibleJSONEncoder
from ansible.module_utils.six import iteritems
-from ansible.module_utils.six.moves import cPickle
-def write_to_file_descriptor(fd, obj):
- """Handles making sure all data is properly written to file descriptor fd.
+def write_to_stream(stream, obj):
+ """Write a length+newline-prefixed pickled object to a stream."""
+ src = pickle.dumps(obj)
- In particular, that data is encoded in a character stream-friendly way and
- that all data gets written before returning.
- """
- # Need to force a protocol that is compatible with both py2 and py3.
- # That would be protocol=2 or less.
- # Also need to force a protocol that excludes certain control chars as
- # stdin in this case is a pty and control chars will cause problems.
- # that means only protocol=0 will work.
- src = cPickle.dumps(obj, protocol=0)
-
- # raw \r characters will not survive pty round-trip
- # They should be rehydrated on the receiving end
- src = src.replace(b'\r', br'\r')
- data_hash = to_bytes(hashlib.sha1(src).hexdigest())
-
- os.write(fd, b'%d\n' % len(src))
- os.write(fd, src)
- os.write(fd, b'%s\n' % data_hash)
+ stream.write(b'%d\n' % len(src))
+ stream.write(src)
def send_data(s, data):
@@ -146,7 +130,7 @@ class Connection(object):
data = json.dumps(req, cls=AnsibleJSONEncoder, vault_to_text=True)
except TypeError as exc:
raise ConnectionError(
- "Failed to encode some variables as JSON for communication with ansible-connection. "
+ "Failed to encode some variables as JSON for communication with the persistent connection helper. "
"The original exception was: %s" % to_text(exc)
)
@@ -176,7 +160,7 @@ class Connection(object):
if response['id'] != reqid:
raise ConnectionError('invalid json-rpc id received')
if "result_type" in response:
- response["result"] = cPickle.loads(to_bytes(response["result"]))
+ response["result"] = pickle.loads(to_bytes(response["result"], errors="surrogateescape"))
return response
diff --git a/lib/ansible/module_utils/csharp/Ansible.Basic.cs b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
index a042af8cecc..085958270d7 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Basic.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
@@ -1025,7 +1025,16 @@ namespace Ansible.Basic
foreach (DictionaryEntry entry in param)
{
string paramKey = (string)entry.Key;
- if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase))
+ if (paramKey == "_ansible_exec_wrapper_warnings")
+ {
+ // Special key used in module_powershell_wrapper to pass
+ // along any warnings that should be returned back to
+ // Ansible.
+ removedParameters.Add(paramKey);
+ foreach (string warning in (IList)entry.Value)
+ Warn(warning);
+ }
+ else if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase))
unsupportedParameters.Add(paramKey);
else if (!legalInputs.Contains(paramKey))
// For backwards compatibility we do not care about the case but we need to warn the users as this will
diff --git a/lib/ansible/module_utils/csharp/Ansible.Become.cs b/lib/ansible/module_utils/csharp/Ansible.Become.cs
index d3bb1564fa6..68d4d11d7a5 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Become.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Become.cs
@@ -333,13 +333,12 @@ namespace Ansible.Become
// Grant access to the current Windows Station and Desktop to the become user
GrantAccessToWindowStationAndDesktop(account);
- // Try and impersonate a SYSTEM token, we need a SYSTEM token to either become a well known service
- // account or have administrative rights on the become access token.
- // If we ultimately are becoming the SYSTEM account we want the token with the most privileges available.
- // https://github.com/ansible/ansible/issues/71453
- bool mostPrivileges = becomeSid == "S-1-5-18";
+ // Try and impersonate a SYSTEM token. We need the SeTcbPrivilege for
+ // - LogonUser for a service SID
+ // - S4U logon
+ // - Token elevation
systemToken = GetPrimaryTokenForUser(new SecurityIdentifier("S-1-5-18"),
- new List() { "SeTcbPrivilege" }, mostPrivileges);
+ new List() { "SeTcbPrivilege" });
if (systemToken != null)
{
try
@@ -357,11 +356,9 @@ namespace Ansible.Become
try
{
- if (becomeSid == "S-1-5-18")
- userTokens.Add(systemToken);
// Cannot use String.IsEmptyOrNull() as an empty string is an account that doesn't have a pass.
// We only use S4U if no password was defined or it was null
- else if (!SERVICE_SIDS.Contains(becomeSid) && password == null && logonType != LogonType.NewCredentials)
+ if (!SERVICE_SIDS.Contains(becomeSid) && password == null && logonType != LogonType.NewCredentials)
{
// If no password was specified, try and duplicate an existing token for that user or use S4U to
// generate one without network credentials
@@ -384,6 +381,11 @@ namespace Ansible.Become
string domain = null;
switch (becomeSid)
{
+ case "S-1-5-18":
+ logonType = LogonType.Service;
+ domain = "NT AUTHORITY";
+ username = "SYSTEM";
+ break;
case "S-1-5-19":
logonType = LogonType.Service;
domain = "NT AUTHORITY";
@@ -426,7 +428,7 @@ namespace Ansible.Become
}
private static SafeNativeHandle GetPrimaryTokenForUser(SecurityIdentifier sid,
- List requiredPrivileges = null, bool mostPrivileges = false)
+ List requiredPrivileges = null)
{
// According to CreateProcessWithTokenW we require a token with
// TOKEN_QUERY, TOKEN_DUPLICATE and TOKEN_ASSIGN_PRIMARY
@@ -436,9 +438,6 @@ namespace Ansible.Become
TokenAccessLevels.AssignPrimary |
TokenAccessLevels.Impersonate;
- SafeNativeHandle userToken = null;
- int privilegeCount = 0;
-
foreach (SafeNativeHandle hToken in TokenUtil.EnumerateUserTokens(sid, dwAccess))
{
// Filter out any Network logon tokens, using become with that is useless when S4U
@@ -449,10 +448,6 @@ namespace Ansible.Become
List actualPrivileges = TokenUtil.GetTokenPrivileges(hToken).Select(x => x.Name).ToList();
- // If the token has less or the same number of privileges than the current token, skip it.
- if (mostPrivileges && privilegeCount >= actualPrivileges.Count)
- continue;
-
// Check that the required privileges are on the token
if (requiredPrivileges != null)
{
@@ -464,22 +459,16 @@ namespace Ansible.Become
// Duplicate the token to convert it to a primary token with the access level required.
try
{
- userToken = TokenUtil.DuplicateToken(hToken, TokenAccessLevels.MaximumAllowed,
+ return TokenUtil.DuplicateToken(hToken, TokenAccessLevels.MaximumAllowed,
SecurityImpersonationLevel.Anonymous, TokenType.Primary);
- privilegeCount = actualPrivileges.Count;
}
catch (Process.Win32Exception)
{
continue;
}
-
- // If we don't care about getting the token with the most privileges, escape the loop as we already
- // have a token.
- if (!mostPrivileges)
- break;
}
- return userToken;
+ return null;
}
private static SafeNativeHandle GetS4UTokenForUser(SecurityIdentifier sid, LogonType logonType)
diff --git a/lib/ansible/module_utils/csharp/Ansible.Process.cs b/lib/ansible/module_utils/csharp/Ansible.Process.cs
index fc156b7a20e..a351dcd0493 100644
--- a/lib/ansible/module_utils/csharp/Ansible.Process.cs
+++ b/lib/ansible/module_utils/csharp/Ansible.Process.cs
@@ -397,7 +397,7 @@ namespace Ansible.Process
internal static Result WaitProcess(SafeFileHandle stdoutRead, SafeFileHandle stdoutWrite, SafeFileHandle stderrRead,
SafeFileHandle stderrWrite, FileStream stdinStream, byte[] stdin, IntPtr hProcess, string outputEncoding = null)
{
- // Default to using UTF-8 as the output encoding, this should be a sane default for most scenarios.
+ // Default to using UTF-8 as the output encoding, this should be a logical default for most scenarios.
outputEncoding = String.IsNullOrEmpty(outputEncoding) ? "utf-8" : outputEncoding;
Encoding encodingInstance = Encoding.GetEncoding(outputEncoding);
diff --git a/lib/ansible/module_utils/distro/__init__.py b/lib/ansible/module_utils/distro/__init__.py
index a8c29a6ce0a..8af439005fc 100644
--- a/lib/ansible/module_utils/distro/__init__.py
+++ b/lib/ansible/module_utils/distro/__init__.py
@@ -22,7 +22,7 @@ Compat distro library.
from __future__ import annotations
# The following makes it easier for us to script updates of the bundled code
-_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.6.0"}
+_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.9.0"}
# The following additional changes have been made:
# * Remove optparse since it is not needed for our use.
diff --git a/lib/ansible/module_utils/distro/_distro.py b/lib/ansible/module_utils/distro/_distro.py
index 74c0b99408a..a67edae735c 100644
--- a/lib/ansible/module_utils/distro/_distro.py
+++ b/lib/ansible/module_utils/distro/_distro.py
@@ -1,4 +1,4 @@
-# Copyright 2015,2016,2017 Nir Cohen
+# Copyright 2015-2021 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -41,40 +41,39 @@ import shlex
import subprocess
import sys
import warnings
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ Optional,
+ Sequence,
+ TextIO,
+ Tuple,
+ Type,
+)
-__version__ = "1.6.0"
-
-# Use `if False` to avoid an ImportError on Python 2. After dropping Python 2
-# support, can use typing.TYPE_CHECKING instead. See:
-# https://docs.python.org/3/library/typing.html#typing.TYPE_CHECKING
-if False: # pragma: nocover
- from typing import (
- Any,
- Callable,
- Dict,
- Iterable,
- Optional,
- Sequence,
- TextIO,
- Tuple,
- Type,
- TypedDict,
- Union,
- )
+try:
+ from typing import TypedDict
+except ImportError:
+ # Python 3.7
+ TypedDict = dict
- VersionDict = TypedDict(
- "VersionDict", {"major": str, "minor": str, "build_number": str}
- )
- InfoDict = TypedDict(
- "InfoDict",
- {
- "id": str,
- "version": str,
- "version_parts": VersionDict,
- "like": str,
- "codename": str,
- },
- )
+__version__ = "1.9.0"
+
+
+class VersionDict(TypedDict):
+ major: str
+ minor: str
+ build_number: str
+
+
+class InfoDict(TypedDict):
+ id: str
+ version: str
+ version_parts: VersionDict
+ like: str
+ codename: str
_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc")
@@ -127,6 +126,27 @@ _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
# Pattern for base file name of distro release file
_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
+# Base file names to be looked up for if _UNIXCONFDIR is not readable.
+_DISTRO_RELEASE_BASENAMES = [
+ "SuSE-release",
+ "altlinux-release",
+ "arch-release",
+ "base-release",
+ "centos-release",
+ "fedora-release",
+ "gentoo-release",
+ "mageia-release",
+ "mandrake-release",
+ "mandriva-release",
+ "mandrivalinux-release",
+ "manjaro-release",
+ "oracle-release",
+ "redhat-release",
+ "rocky-release",
+ "sl-release",
+ "slackware-version",
+]
+
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
"debian_version",
@@ -136,11 +156,12 @@ _DISTRO_RELEASE_IGNORE_BASENAMES = (
"system-release",
"plesk-release",
"iredmail-release",
+ "board-release",
+ "ec2_version",
)
-def linux_distribution(full_distribution_name=True):
- # type: (bool) -> Tuple[str, str, str]
+def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]:
"""
.. deprecated:: 1.6.0
@@ -183,8 +204,7 @@ def linux_distribution(full_distribution_name=True):
return _distro.linux_distribution(full_distribution_name)
-def id():
- # type: () -> str
+def id() -> str:
"""
Return the distro ID of the current distribution, as a
machine-readable string.
@@ -228,7 +248,9 @@ def id():
"freebsd" FreeBSD
"midnightbsd" MidnightBSD
"rocky" Rocky Linux
+ "aix" AIX
"guix" Guix System
+ "altlinux" ALT Linux
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
@@ -266,8 +288,7 @@ def id():
return _distro.id()
-def name(pretty=False):
- # type: (bool) -> str
+def name(pretty: bool = False) -> str:
"""
Return the name of the current OS distribution, as a human-readable
string.
@@ -306,8 +327,7 @@ def name(pretty=False):
return _distro.name(pretty)
-def version(pretty=False, best=False):
- # type: (bool, bool) -> str
+def version(pretty: bool = False, best: bool = False) -> str:
"""
Return the version of the current OS distribution, as a human-readable
string.
@@ -355,8 +375,7 @@ def version(pretty=False, best=False):
return _distro.version(pretty, best)
-def version_parts(best=False):
- # type: (bool) -> Tuple[str, str, str]
+def version_parts(best: bool = False) -> Tuple[str, str, str]:
"""
Return the version of the current OS distribution as a tuple
``(major, minor, build_number)`` with items as follows:
@@ -373,8 +392,7 @@ def version_parts(best=False):
return _distro.version_parts(best)
-def major_version(best=False):
- # type: (bool) -> str
+def major_version(best: bool = False) -> str:
"""
Return the major version of the current OS distribution, as a string,
if provided.
@@ -387,8 +405,7 @@ def major_version(best=False):
return _distro.major_version(best)
-def minor_version(best=False):
- # type: (bool) -> str
+def minor_version(best: bool = False) -> str:
"""
Return the minor version of the current OS distribution, as a string,
if provided.
@@ -401,8 +418,7 @@ def minor_version(best=False):
return _distro.minor_version(best)
-def build_number(best=False):
- # type: (bool) -> str
+def build_number(best: bool = False) -> str:
"""
Return the build number of the current OS distribution, as a string,
if provided.
@@ -415,8 +431,7 @@ def build_number(best=False):
return _distro.build_number(best)
-def like():
- # type: () -> str
+def like() -> str:
"""
Return a space-separated list of distro IDs of distributions that are
closely related to the current OS distribution in regards to packaging
@@ -433,8 +448,7 @@ def like():
return _distro.like()
-def codename():
- # type: () -> str
+def codename() -> str:
"""
Return the codename for the release of the current OS distribution,
as a string.
@@ -458,8 +472,7 @@ def codename():
return _distro.codename()
-def info(pretty=False, best=False):
- # type: (bool, bool) -> InfoDict
+def info(pretty: bool = False, best: bool = False) -> InfoDict:
"""
Return certain machine-readable information items about the current OS
distribution in a dictionary, as shown in the following example:
@@ -503,8 +516,7 @@ def info(pretty=False, best=False):
return _distro.info(pretty, best)
-def os_release_info():
- # type: () -> Dict[str, str]
+def os_release_info() -> Dict[str, str]:
"""
Return a dictionary containing key-value pairs for the information items
from the os-release file data source of the current OS distribution.
@@ -514,8 +526,7 @@ def os_release_info():
return _distro.os_release_info()
-def lsb_release_info():
- # type: () -> Dict[str, str]
+def lsb_release_info() -> Dict[str, str]:
"""
Return a dictionary containing key-value pairs for the information items
from the lsb_release command data source of the current OS distribution.
@@ -526,8 +537,7 @@ def lsb_release_info():
return _distro.lsb_release_info()
-def distro_release_info():
- # type: () -> Dict[str, str]
+def distro_release_info() -> Dict[str, str]:
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current OS distribution.
@@ -537,8 +547,7 @@ def distro_release_info():
return _distro.distro_release_info()
-def uname_info():
- # type: () -> Dict[str, str]
+def uname_info() -> Dict[str, str]:
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current OS distribution.
@@ -546,8 +555,7 @@ def uname_info():
return _distro.uname_info()
-def os_release_attr(attribute):
- # type: (str) -> str
+def os_release_attr(attribute: str) -> str:
"""
Return a single named information item from the os-release file data source
of the current OS distribution.
@@ -566,8 +574,7 @@ def os_release_attr(attribute):
return _distro.os_release_attr(attribute)
-def lsb_release_attr(attribute):
- # type: (str) -> str
+def lsb_release_attr(attribute: str) -> str:
"""
Return a single named information item from the lsb_release command output
data source of the current OS distribution.
@@ -587,8 +594,7 @@ def lsb_release_attr(attribute):
return _distro.lsb_release_attr(attribute)
-def distro_release_attr(attribute):
- # type: (str) -> str
+def distro_release_attr(attribute: str) -> str:
"""
Return a single named information item from the distro release file
data source of the current OS distribution.
@@ -607,8 +613,7 @@ def distro_release_attr(attribute):
return _distro.distro_release_attr(attribute)
-def uname_attr(attribute):
- # type: (str) -> str
+def uname_attr(attribute: str) -> str:
"""
Return a single named information item from the distro release file
data source of the current OS distribution.
@@ -629,25 +634,23 @@ try:
from functools import cached_property
except ImportError:
# Python < 3.8
- class cached_property(object): # type: ignore
+ class cached_property: # type: ignore
"""A version of @property which caches the value. On access, it calls the
underlying function and sets the value in `__dict__` so future accesses
will not re-call the property.
"""
- def __init__(self, f):
- # type: (Callable[[Any], Any]) -> None
+ def __init__(self, f: Callable[[Any], Any]) -> None:
self._fname = f.__name__
self._f = f
- def __get__(self, obj, owner):
- # type: (Any, Type[Any]) -> Any
- assert obj is not None, "call {} on an instance".format(self._fname)
+ def __get__(self, obj: Any, owner: Type[Any]) -> Any:
+ assert obj is not None, f"call {self._fname} on an instance"
ret = obj.__dict__[self._fname] = self._f(obj)
return ret
-class LinuxDistribution(object):
+class LinuxDistribution:
"""
Provides information about a OS distribution.
@@ -667,13 +670,13 @@ class LinuxDistribution(object):
def __init__(
self,
- include_lsb=True,
- os_release_file="",
- distro_release_file="",
- include_uname=True,
- root_dir=None,
- ):
- # type: (bool, str, str, bool, Optional[str]) -> None
+ include_lsb: Optional[bool] = None,
+ os_release_file: str = "",
+ distro_release_file: str = "",
+ include_uname: Optional[bool] = None,
+ root_dir: Optional[str] = None,
+ include_oslevel: Optional[bool] = None,
+ ) -> None:
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
@@ -713,7 +716,13 @@ class LinuxDistribution(object):
be empty.
* ``root_dir`` (string): The absolute path to the root directory to use
- to find distro-related information files.
+ to find distro-related information files. Note that ``include_*``
+ parameters must not be enabled in combination with ``root_dir``.
+
+ * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command
+ output is included as a data source. If the oslevel command is not
+ available in the program execution path the data source will be
+ empty.
Public instance attributes:
@@ -732,9 +741,20 @@ class LinuxDistribution(object):
parameter. This controls whether the uname information will
be loaded.
+ * ``include_oslevel`` (bool): The result of the ``include_oslevel``
+ parameter. This controls whether (AIX) oslevel information will be
+ loaded.
+
+ * ``root_dir`` (string): The result of the ``root_dir`` parameter.
+ The absolute path to the root directory to use to find distro-related
+ information files.
+
Raises:
- * :py:exc:`IOError`: Some I/O issue with an os-release file or distro
+ * :py:exc:`ValueError`: Initialization parameters combination is not
+ supported.
+
+ * :py:exc:`OSError`: Some I/O issue with an os-release file or distro
release file.
* :py:exc:`UnicodeError`: A data source has unexpected characters or
@@ -764,11 +784,24 @@ class LinuxDistribution(object):
self.os_release_file = usr_lib_os_release_file
self.distro_release_file = distro_release_file or "" # updated later
- self.include_lsb = include_lsb
- self.include_uname = include_uname
- def __repr__(self):
- # type: () -> str
+ is_root_dir_defined = root_dir is not None
+ if is_root_dir_defined and (include_lsb or include_uname or include_oslevel):
+ raise ValueError(
+ "Including subprocess data sources from specific root_dir is disallowed"
+ " to prevent false information"
+ )
+ self.include_lsb = (
+ include_lsb if include_lsb is not None else not is_root_dir_defined
+ )
+ self.include_uname = (
+ include_uname if include_uname is not None else not is_root_dir_defined
+ )
+ self.include_oslevel = (
+ include_oslevel if include_oslevel is not None else not is_root_dir_defined
+ )
+
+ def __repr__(self) -> str:
"""Return repr of all info"""
return (
"LinuxDistribution("
@@ -776,14 +809,18 @@ class LinuxDistribution(object):
"distro_release_file={self.distro_release_file!r}, "
"include_lsb={self.include_lsb!r}, "
"include_uname={self.include_uname!r}, "
+ "include_oslevel={self.include_oslevel!r}, "
+ "root_dir={self.root_dir!r}, "
"_os_release_info={self._os_release_info!r}, "
"_lsb_release_info={self._lsb_release_info!r}, "
"_distro_release_info={self._distro_release_info!r}, "
- "_uname_info={self._uname_info!r})".format(self=self)
+ "_uname_info={self._uname_info!r}, "
+ "_oslevel_info={self._oslevel_info!r})".format(self=self)
)
- def linux_distribution(self, full_distribution_name=True):
- # type: (bool) -> Tuple[str, str, str]
+ def linux_distribution(
+ self, full_distribution_name: bool = True
+ ) -> Tuple[str, str, str]:
"""
Return information about the OS distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
@@ -797,15 +834,13 @@ class LinuxDistribution(object):
self._os_release_info.get("release_codename") or self.codename(),
)
- def id(self):
- # type: () -> str
+ def id(self) -> str:
"""Return the distro ID of the OS distribution, as a string.
For details, see :func:`distro.id`.
"""
- def normalize(distro_id, table):
- # type: (str, Dict[str, str]) -> str
+ def normalize(distro_id: str, table: Dict[str, str]) -> str:
distro_id = distro_id.lower().replace(" ", "_")
return table.get(distro_id, distro_id)
@@ -827,8 +862,7 @@ class LinuxDistribution(object):
return ""
- def name(self, pretty=False):
- # type: (bool) -> str
+ def name(self, pretty: bool = False) -> str:
"""
Return the name of the OS distribution, as a string.
@@ -848,11 +882,10 @@ class LinuxDistribution(object):
name = self.distro_release_attr("name") or self.uname_attr("name")
version = self.version(pretty=True)
if version:
- name = name + " " + version
+ name = f"{name} {version}"
return name or ""
- def version(self, pretty=False, best=False):
- # type: (bool, bool) -> str
+ def version(self, pretty: bool = False, best: bool = False) -> str:
"""
Return the version of the OS distribution, as a string.
@@ -870,7 +903,10 @@ class LinuxDistribution(object):
).get("version_id", ""),
self.uname_attr("release"),
]
- if self.id() == "debian" or "debian" in self.like().split():
+ if self.uname_attr("id").startswith("aix"):
+ # On AIX platforms, prefer oslevel command output.
+ versions.insert(0, self.oslevel_info())
+ elif self.id() == "debian" or "debian" in self.like().split():
# On Debian-like, add debian_version file content to candidates list.
versions.append(self._debian_version)
version = ""
@@ -888,11 +924,10 @@ class LinuxDistribution(object):
version = v
break
if pretty and version and self.codename():
- version = "{0} ({1})".format(version, self.codename())
+ version = f"{version} ({self.codename()})"
return version
- def version_parts(self, best=False):
- # type: (bool) -> Tuple[str, str, str]
+ def version_parts(self, best: bool = False) -> Tuple[str, str, str]:
"""
Return the version of the OS distribution, as a tuple of version
numbers.
@@ -908,8 +943,7 @@ class LinuxDistribution(object):
return major, minor or "", build_number or ""
return "", "", ""
- def major_version(self, best=False):
- # type: (bool) -> str
+ def major_version(self, best: bool = False) -> str:
"""
Return the major version number of the current distribution.
@@ -917,8 +951,7 @@ class LinuxDistribution(object):
"""
return self.version_parts(best)[0]
- def minor_version(self, best=False):
- # type: (bool) -> str
+ def minor_version(self, best: bool = False) -> str:
"""
Return the minor version number of the current distribution.
@@ -926,8 +959,7 @@ class LinuxDistribution(object):
"""
return self.version_parts(best)[1]
- def build_number(self, best=False):
- # type: (bool) -> str
+ def build_number(self, best: bool = False) -> str:
"""
Return the build number of the current distribution.
@@ -935,8 +967,7 @@ class LinuxDistribution(object):
"""
return self.version_parts(best)[2]
- def like(self):
- # type: () -> str
+ def like(self) -> str:
"""
Return the IDs of distributions that are like the OS distribution.
@@ -944,8 +975,7 @@ class LinuxDistribution(object):
"""
return self.os_release_attr("id_like") or ""
- def codename(self):
- # type: () -> str
+ def codename(self) -> str:
"""
Return the codename of the OS distribution.
@@ -962,18 +992,17 @@ class LinuxDistribution(object):
or ""
)
- def info(self, pretty=False, best=False):
- # type: (bool, bool) -> InfoDict
+ def info(self, pretty: bool = False, best: bool = False) -> InfoDict:
"""
Return certain machine-readable information about the OS
distribution.
For details, see :func:`distro.info`.
"""
- return dict(
+ return InfoDict(
id=self.id(),
version=self.version(pretty, best),
- version_parts=dict(
+ version_parts=VersionDict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best),
@@ -982,8 +1011,7 @@ class LinuxDistribution(object):
codename=self.codename(),
)
- def os_release_info(self):
- # type: () -> Dict[str, str]
+ def os_release_info(self) -> Dict[str, str]:
"""
Return a dictionary containing key-value pairs for the information
items from the os-release file data source of the OS distribution.
@@ -992,8 +1020,7 @@ class LinuxDistribution(object):
"""
return self._os_release_info
- def lsb_release_info(self):
- # type: () -> Dict[str, str]
+ def lsb_release_info(self) -> Dict[str, str]:
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the OS
@@ -1003,8 +1030,7 @@ class LinuxDistribution(object):
"""
return self._lsb_release_info
- def distro_release_info(self):
- # type: () -> Dict[str, str]
+ def distro_release_info(self) -> Dict[str, str]:
"""
Return a dictionary containing key-value pairs for the information
items from the distro release file data source of the OS
@@ -1014,8 +1040,7 @@ class LinuxDistribution(object):
"""
return self._distro_release_info
- def uname_info(self):
- # type: () -> Dict[str, str]
+ def uname_info(self) -> Dict[str, str]:
"""
Return a dictionary containing key-value pairs for the information
items from the uname command data source of the OS distribution.
@@ -1024,8 +1049,13 @@ class LinuxDistribution(object):
"""
return self._uname_info
- def os_release_attr(self, attribute):
- # type: (str) -> str
+ def oslevel_info(self) -> str:
+ """
+ Return AIX' oslevel command output.
+ """
+ return self._oslevel_info
+
+ def os_release_attr(self, attribute: str) -> str:
"""
Return a single named information item from the os-release file data
source of the OS distribution.
@@ -1034,8 +1064,7 @@ class LinuxDistribution(object):
"""
return self._os_release_info.get(attribute, "")
- def lsb_release_attr(self, attribute):
- # type: (str) -> str
+ def lsb_release_attr(self, attribute: str) -> str:
"""
Return a single named information item from the lsb_release command
output data source of the OS distribution.
@@ -1044,8 +1073,7 @@ class LinuxDistribution(object):
"""
return self._lsb_release_info.get(attribute, "")
- def distro_release_attr(self, attribute):
- # type: (str) -> str
+ def distro_release_attr(self, attribute: str) -> str:
"""
Return a single named information item from the distro release file
data source of the OS distribution.
@@ -1054,8 +1082,7 @@ class LinuxDistribution(object):
"""
return self._distro_release_info.get(attribute, "")
- def uname_attr(self, attribute):
- # type: (str) -> str
+ def uname_attr(self, attribute: str) -> str:
"""
Return a single named information item from the uname command
output data source of the OS distribution.
@@ -1065,8 +1092,7 @@ class LinuxDistribution(object):
return self._uname_info.get(attribute, "")
@cached_property
- def _os_release_info(self):
- # type: () -> Dict[str, str]
+ def _os_release_info(self) -> Dict[str, str]:
"""
Get the information items from the specified os-release file.
@@ -1074,13 +1100,12 @@ class LinuxDistribution(object):
A dictionary containing all information items.
"""
if os.path.isfile(self.os_release_file):
- with open(self.os_release_file) as release_file:
+ with open(self.os_release_file, encoding="utf-8") as release_file:
return self._parse_os_release_content(release_file)
return {}
@staticmethod
- def _parse_os_release_content(lines):
- # type: (TextIO) -> Dict[str, str]
+ def _parse_os_release_content(lines: TextIO) -> Dict[str, str]:
"""
Parse the lines of an os-release file.
@@ -1097,16 +1122,6 @@ class LinuxDistribution(object):
lexer = shlex.shlex(lines, posix=True)
lexer.whitespace_split = True
- # The shlex module defines its `wordchars` variable using literals,
- # making it dependent on the encoding of the Python source file.
- # In Python 2.6 and 2.7, the shlex source file is encoded in
- # 'iso-8859-1', and the `wordchars` variable is defined as a byte
- # string. This causes a UnicodeDecodeError to be raised when the
- # parsed content is a unicode object. The following fix resolves that
- # (... but it should be fixed in shlex...):
- if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
- lexer.wordchars = lexer.wordchars.decode("iso-8859-1")
-
tokens = list(lexer)
for token in tokens:
# At this point, all shell-like parsing has been done (i.e.
@@ -1140,8 +1155,7 @@ class LinuxDistribution(object):
return props
@cached_property
- def _lsb_release_info(self):
- # type: () -> Dict[str, str]
+ def _lsb_release_info(self) -> Dict[str, str]:
"""
Get the information items from the lsb_release command output.
@@ -1150,19 +1164,17 @@ class LinuxDistribution(object):
"""
if not self.include_lsb:
return {}
- with open(os.devnull, "wb") as devnull:
- try:
- cmd = ("lsb_release", "-a")
- stdout = subprocess.check_output(cmd, stderr=devnull)
- # Command not found or lsb_release returned error
- except (OSError, subprocess.CalledProcessError):
- return {}
+ try:
+ cmd = ("lsb_release", "-a")
+ stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
+ # Command not found or lsb_release returned error
+ except (OSError, subprocess.CalledProcessError):
+ return {}
content = self._to_str(stdout).splitlines()
return self._parse_lsb_release_content(content)
@staticmethod
- def _parse_lsb_release_content(lines):
- # type: (Iterable[str]) -> Dict[str, str]
+ def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]:
"""
Parse the output of the lsb_release command.
@@ -1186,31 +1198,39 @@ class LinuxDistribution(object):
return props
@cached_property
- def _uname_info(self):
- # type: () -> Dict[str, str]
+ def _uname_info(self) -> Dict[str, str]:
if not self.include_uname:
return {}
- with open(os.devnull, "wb") as devnull:
- try:
- cmd = ("uname", "-rs")
- stdout = subprocess.check_output(cmd, stderr=devnull)
- except OSError:
- return {}
+ try:
+ cmd = ("uname", "-rs")
+ stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
+ except OSError:
+ return {}
content = self._to_str(stdout).splitlines()
return self._parse_uname_content(content)
@cached_property
- def _debian_version(self):
- # type: () -> str
+ def _oslevel_info(self) -> str:
+ if not self.include_oslevel:
+ return ""
+ try:
+ stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL)
+ except (OSError, subprocess.CalledProcessError):
+ return ""
+ return self._to_str(stdout).strip()
+
+ @cached_property
+ def _debian_version(self) -> str:
try:
- with open(os.path.join(self.etc_dir, "debian_version")) as fp:
+ with open(
+ os.path.join(self.etc_dir, "debian_version"), encoding="ascii"
+ ) as fp:
return fp.readline().rstrip()
- except (OSError, IOError):
+ except FileNotFoundError:
return ""
@staticmethod
- def _parse_uname_content(lines):
- # type: (Sequence[str]) -> Dict[str, str]
+ def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]:
if not lines:
return {}
props = {}
@@ -1229,23 +1249,12 @@ class LinuxDistribution(object):
return props
@staticmethod
- def _to_str(text):
- # type: (Union[bytes, str]) -> str
+ def _to_str(bytestring: bytes) -> str:
encoding = sys.getfilesystemencoding()
- encoding = "utf-8" if encoding == "ascii" else encoding
-
- if sys.version_info[0] >= 3:
- if isinstance(text, bytes):
- return text.decode(encoding)
- else:
- if isinstance(text, unicode): # noqa
- return text.encode(encoding)
-
- return text
+ return bytestring.decode(encoding)
@cached_property
- def _distro_release_info(self):
- # type: () -> Dict[str, str]
+ def _distro_release_info(self) -> Dict[str, str]:
"""
Get the information items from the specified distro release file.
@@ -1262,14 +1271,14 @@ class LinuxDistribution(object):
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
- if "name" in distro_info and "cloudlinux" in distro_info["name"].lower():
- distro_info["id"] = "cloudlinux"
- elif match:
- distro_info["id"] = match.group(1)
- return distro_info
else:
try:
- basenames = os.listdir(self.etc_dir)
+ basenames = [
+ basename
+ for basename in os.listdir(self.etc_dir)
+ if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES
+ and os.path.isfile(os.path.join(self.etc_dir, basename))
+ ]
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
@@ -1279,42 +1288,31 @@ class LinuxDistribution(object):
# sure about the *-release files. Check common entries of
# /etc for information. If they turn out to not be there the
# error is handled in `_parse_distro_release_file()`.
- basenames = [
- "SuSE-release",
- "arch-release",
- "base-release",
- "centos-release",
- "fedora-release",
- "gentoo-release",
- "mageia-release",
- "mandrake-release",
- "mandriva-release",
- "mandrivalinux-release",
- "manjaro-release",
- "oracle-release",
- "redhat-release",
- "rocky-release",
- "sl-release",
- "slackware-version",
- ]
+ basenames = _DISTRO_RELEASE_BASENAMES
for basename in basenames:
- if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
- continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
- if match:
- filepath = os.path.join(self.etc_dir, basename)
- distro_info = self._parse_distro_release_file(filepath)
- if "name" in distro_info:
- # The name is always present if the pattern matches
- self.distro_release_file = filepath
- distro_info["id"] = match.group(1)
- if "cloudlinux" in distro_info["name"].lower():
- distro_info["id"] = "cloudlinux"
- return distro_info
- return {}
+ if match is None:
+ continue
+ filepath = os.path.join(self.etc_dir, basename)
+ distro_info = self._parse_distro_release_file(filepath)
+ # The name is always present if the pattern matches.
+ if "name" not in distro_info:
+ continue
+ self.distro_release_file = filepath
+ break
+ else: # the loop didn't "break": no candidate.
+ return {}
+
+ if match is not None:
+ distro_info["id"] = match.group(1)
+
+ # CloudLinux < 7: manually enrich info with proper id.
+ if "cloudlinux" in distro_info.get("name", "").lower():
+ distro_info["id"] = "cloudlinux"
+
+ return distro_info
- def _parse_distro_release_file(self, filepath):
- # type: (str) -> Dict[str, str]
+ def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]:
"""
Parse a distro release file.
@@ -1326,19 +1324,18 @@ class LinuxDistribution(object):
A dictionary containing all information items.
"""
try:
- with open(filepath) as fp:
+ with open(filepath, encoding="utf-8") as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
- except (OSError, IOError):
+ except OSError:
# Ignore not being able to read a specific, seemingly version
# related file.
# See https://github.com/python-distro/distro/issues/162
return {}
@staticmethod
- def _parse_distro_release_content(line):
- # type: (str) -> Dict[str, str]
+ def _parse_distro_release_content(line: str) -> Dict[str, str]:
"""
Parse a line from a distro release file.
@@ -1366,8 +1363,7 @@ class LinuxDistribution(object):
_distro = LinuxDistribution()
-def main():
- # type: () -> None
+def main() -> None:
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
@@ -1389,7 +1385,10 @@ def main():
if args.root_dir:
dist = LinuxDistribution(
- include_lsb=False, include_uname=False, root_dir=args.root_dir
+ include_lsb=False,
+ include_uname=False,
+ include_oslevel=False,
+ root_dir=args.root_dir,
)
else:
dist = _distro
diff --git a/lib/ansible/module_utils/facts/collector.py b/lib/ansible/module_utils/facts/collector.py
index 616188b3db9..0983df7aad4 100644
--- a/lib/ansible/module_utils/facts/collector.py
+++ b/lib/ansible/module_utils/facts/collector.py
@@ -90,6 +90,8 @@ class BaseFactCollector:
def _transform_dict_keys(self, fact_dict):
'''update a dicts keys to use new names as transformed by self._transform_name'''
+ if fact_dict is None:
+ return {}
for old_key in list(fact_dict.keys()):
new_key = self._transform_name(old_key)
# pop the item by old_key and replace it using new_key
diff --git a/lib/ansible/module_utils/facts/default_collectors.py b/lib/ansible/module_utils/facts/default_collectors.py
index 1dcbd7c52a1..af4391576c0 100644
--- a/lib/ansible/module_utils/facts/default_collectors.py
+++ b/lib/ansible/module_utils/facts/default_collectors.py
@@ -53,6 +53,7 @@ from ansible.module_utils.facts.system.python import PythonFactCollector
from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector
+from ansible.module_utils.facts.system.systemd import SystemdFactCollector
from ansible.module_utils.facts.system.user import UserFactCollector
from ansible.module_utils.facts.hardware.base import HardwareCollector
@@ -118,7 +119,8 @@ _general = [
EnvFactCollector,
LoadAvgFactCollector,
SshPubKeyFactCollector,
- UserFactCollector
+ UserFactCollector,
+ SystemdFactCollector
] # type: t.List[t.Type[BaseFactCollector]]
# virtual, this might also limit hardware/networking
diff --git a/lib/ansible/module_utils/facts/hardware/aix.py b/lib/ansible/module_utils/facts/hardware/aix.py
index db34fe147a6..c2a074bf8ea 100644
--- a/lib/ansible/module_utils/facts/hardware/aix.py
+++ b/lib/ansible/module_utils/facts/hardware/aix.py
@@ -195,34 +195,35 @@ class AIXHardware(Hardware):
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
- rc, mount_out, err = self.module.run_command(mount_path)
- if mount_out:
- for line in mount_out.split('\n'):
- fields = line.split()
- if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
- if re.match('^/', fields[0]):
- # normal mount
- mount = fields[1]
- mount_info = {'mount': mount,
- 'device': fields[0],
- 'fstype': fields[2],
- 'options': fields[6],
- 'time': '%s %s %s' % (fields[3], fields[4], fields[5])}
- mount_info.update(get_mount_size(mount))
- else:
- # nfs or cifs based mount
- # in case of nfs if no mount options are provided on command line
- # add into fields empty string...
- if len(fields) < 8:
- fields.append("")
-
- mount_info = {'mount': fields[2],
- 'device': '%s:%s' % (fields[0], fields[1]),
- 'fstype': fields[3],
- 'options': fields[7],
- 'time': '%s %s %s' % (fields[4], fields[5], fields[6])}
-
- mounts.append(mount_info)
+ if mount_path:
+ rc, mount_out, err = self.module.run_command(mount_path)
+ if mount_out:
+ for line in mount_out.split('\n'):
+ fields = line.split()
+ if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
+ if re.match('^/', fields[0]):
+ # normal mount
+ mount = fields[1]
+ mount_info = {'mount': mount,
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[6],
+ 'time': '%s %s %s' % (fields[3], fields[4], fields[5])}
+ mount_info.update(get_mount_size(mount))
+ else:
+ # nfs or cifs based mount
+ # in case of nfs if no mount options are provided on command line
+ # add into fields empty string...
+ if len(fields) < 8:
+ fields.append("")
+
+ mount_info = {'mount': fields[2],
+ 'device': '%s:%s' % (fields[0], fields[1]),
+ 'fstype': fields[3],
+ 'options': fields[7],
+ 'time': '%s %s %s' % (fields[4], fields[5], fields[6])}
+
+ mounts.append(mount_info)
mount_facts['mounts'] = mounts
@@ -232,30 +233,31 @@ class AIXHardware(Hardware):
device_facts = {}
device_facts['devices'] = {}
- lsdev_cmd = self.module.get_bin_path('lsdev', True)
- lsattr_cmd = self.module.get_bin_path('lsattr', True)
- rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
-
- for line in out_lsdev.splitlines():
- field = line.split()
-
- device_attrs = {}
- device_name = field[0]
- device_state = field[1]
- device_type = field[2:]
- lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
- rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
- for attr in out_lsattr.splitlines():
- attr_fields = attr.split()
- attr_name = attr_fields[0]
- attr_parameter = attr_fields[1]
- device_attrs[attr_name] = attr_parameter
-
- device_facts['devices'][device_name] = {
- 'state': device_state,
- 'type': ' '.join(device_type),
- 'attributes': device_attrs
- }
+ lsdev_cmd = self.module.get_bin_path('lsdev')
+ lsattr_cmd = self.module.get_bin_path('lsattr')
+ if lsdev_cmd and lsattr_cmd:
+ rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
+
+ for line in out_lsdev.splitlines():
+ field = line.split()
+
+ device_attrs = {}
+ device_name = field[0]
+ device_state = field[1]
+ device_type = field[2:]
+ lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
+ rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
+ for attr in out_lsattr.splitlines():
+ attr_fields = attr.split()
+ attr_name = attr_fields[0]
+ attr_parameter = attr_fields[1]
+ device_attrs[attr_name] = attr_parameter
+
+ device_facts['devices'][device_name] = {
+ 'state': device_state,
+ 'type': ' '.join(device_type),
+ 'attributes': device_attrs
+ }
return device_facts
diff --git a/lib/ansible/module_utils/facts/hardware/darwin.py b/lib/ansible/module_utils/facts/hardware/darwin.py
index 74e4ce4e2af..ac159d5fd2b 100644
--- a/lib/ansible/module_utils/facts/hardware/darwin.py
+++ b/lib/ansible/module_utils/facts/hardware/darwin.py
@@ -19,7 +19,6 @@ from __future__ import annotations
import struct
import time
-from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.sysctl import get_sysctl
@@ -41,7 +40,7 @@ class DarwinHardware(Hardware):
def populate(self, collected_facts=None):
hardware_facts = {}
- self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern'])
+ self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern', 'hw.model'])
mac_facts = self.get_mac_facts()
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
@@ -67,9 +66,8 @@ class DarwinHardware(Hardware):
def get_mac_facts(self):
mac_facts = {}
- rc, out, err = self.module.run_command("sysctl hw.model")
- if rc == 0:
- mac_facts['model'] = mac_facts['product_name'] = out.splitlines()[-1].split()[1]
+ if 'hw.model' in self.sysctl:
+ mac_facts['model'] = mac_facts['product_name'] = self.sysctl['hw.model']
mac_facts['osversion'] = self.sysctl['kern.osversion']
mac_facts['osrevision'] = self.sysctl['kern.osrevision']
@@ -96,44 +94,49 @@ class DarwinHardware(Hardware):
total_used = 0
page_size = 4096
- try:
- vm_stat_command = get_bin_path('vm_stat')
- except ValueError:
+
+ vm_stat_command = self.module.get_bin_path('vm_stat')
+ if vm_stat_command is None:
return memory_facts
- rc, out, err = self.module.run_command(vm_stat_command)
- if rc == 0:
- # Free = Total - (Wired + active + inactive)
- # Get a generator of tuples from the command output so we can later
- # turn it into a dictionary
- memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines())
-
- # Strip extra left spaces from the value
- memory_stats = dict((k, v.lstrip()) for k, v in memory_stats)
-
- for k, v in memory_stats.items():
- try:
- memory_stats[k] = int(v)
- except ValueError:
- # Most values convert cleanly to integer values but if the field does
- # not convert to an integer, just leave it alone.
- pass
-
- if memory_stats.get('Pages wired down'):
- total_used += memory_stats['Pages wired down'] * page_size
- if memory_stats.get('Pages active'):
- total_used += memory_stats['Pages active'] * page_size
- if memory_stats.get('Pages inactive'):
- total_used += memory_stats['Pages inactive'] * page_size
-
- memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024)
+ if vm_stat_command:
+ rc, out, err = self.module.run_command(vm_stat_command)
+ if rc == 0:
+ # Free = Total - (Wired + active + inactive)
+ # Get a generator of tuples from the command output so we can later
+ # turn it into a dictionary
+ memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines())
+
+ # Strip extra left spaces from the value
+ memory_stats = dict((k, v.lstrip()) for k, v in memory_stats)
+
+ for k, v in memory_stats.items():
+ try:
+ memory_stats[k] = int(v)
+ except ValueError:
+ # Most values convert cleanly to integer values but if the field does
+ # not convert to an integer, just leave it alone.
+ pass
+
+ if memory_stats.get('Pages wired down'):
+ total_used += memory_stats['Pages wired down'] * page_size
+ if memory_stats.get('Pages active'):
+ total_used += memory_stats['Pages active'] * page_size
+ if memory_stats.get('Pages inactive'):
+ total_used += memory_stats['Pages inactive'] * page_size
+
+ memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024)
return memory_facts
def get_uptime_facts(self):
+
# On Darwin, the default format is annoying to parse.
# Use -b to get the raw value and decode it.
sysctl_cmd = self.module.get_bin_path('sysctl')
+ if not sysctl_cmd:
+ return {}
+
cmd = [sysctl_cmd, '-b', 'kern.boottime']
# We need to get raw bytes, not UTF-8.
diff --git a/lib/ansible/module_utils/facts/hardware/freebsd.py b/lib/ansible/module_utils/facts/hardware/freebsd.py
index e44da3aaacc..c7f6c6c48b6 100644
--- a/lib/ansible/module_utils/facts/hardware/freebsd.py
+++ b/lib/ansible/module_utils/facts/hardware/freebsd.py
@@ -23,7 +23,6 @@ import time
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.timeout import TimeoutError, timeout
-
from ansible.module_utils.facts.utils import get_file_content, get_mount_size
@@ -173,13 +172,50 @@ class FreeBSDHardware(Hardware):
sysdir = '/dev'
device_facts['devices'] = {}
- drives = re.compile(r'(ada?\d+|da\d+|a?cd\d+)') # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
- slices = re.compile(r'(ada?\d+s\d+\w*|da\d+s\d+\w*)')
+ # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
+ drives = re.compile(
+ r"""(?x)(
+ (?:
+ ada? # ATA/SATA disk device
+ |da # SCSI disk device
+ |a?cd # SCSI CDROM drive
+ |amrd # AMI MegaRAID drive
+ |idad # Compaq RAID array
+ |ipsd # IBM ServeRAID RAID array
+ |md # md(4) disk device
+ |mfid # LSI MegaRAID SAS array
+ |mlxd # Mylex RAID disk
+ |twed # 3ware ATA RAID array
+ |vtbd # VirtIO Block Device
+ )\d+
+ )
+ """
+ )
+
+ slices = re.compile(
+ r"""(?x)(
+ (?:
+ ada? # ATA/SATA disk device
+ |a?cd # SCSI CDROM drive
+ |amrd # AMI MegaRAID drive
+ |da # SCSI disk device
+ |idad # Compaq RAID array
+ |ipsd # IBM ServeRAID RAID array
+ |md # md(4) disk device
+ |mfid # LSI MegaRAID SAS array
+ |mlxd # Mylex RAID disk
+ |twed # 3ware ATA RAID array
+ |vtbd # VirtIO Block Device
+ )\d+[ps]\d+\w*
+ )
+ """
+ )
+
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
- if d:
+ if d and d.group(1) not in device_facts['devices']:
device_facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
@@ -216,18 +252,22 @@ class FreeBSDHardware(Hardware):
'product_version': 'system-version',
'system_vendor': 'system-manufacturer',
}
+ if dmi_bin is None:
+ dmi_facts = dict.fromkeys(
+ DMI_DICT.keys(),
+ 'NA'
+ )
+ return dmi_facts
+
for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- # FIXME: why add the fact and then test if it is json?
- dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
- try:
- json.dumps(dmi_facts[k])
- except UnicodeDecodeError:
- dmi_facts[k] = 'NA'
- else:
+ (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ # FIXME: why add the fact and then test if it is json?
+ dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
+ try:
+ json.dumps(dmi_facts[k])
+ except UnicodeDecodeError:
dmi_facts[k] = 'NA'
else:
dmi_facts[k] = 'NA'
diff --git a/lib/ansible/module_utils/facts/hardware/hpux.py b/lib/ansible/module_utils/facts/hardware/hpux.py
index abb9dada663..efb63a98c2e 100644
--- a/lib/ansible/module_utils/facts/hardware/hpux.py
+++ b/lib/ansible/module_utils/facts/hardware/hpux.py
@@ -40,6 +40,9 @@ class HPUXHardware(Hardware):
def populate(self, collected_facts=None):
hardware_facts = {}
+ # TODO: very inefficient calls to machinfo,
+ # should just make one and then deal with finding the data (see facts/sysctl)
+ # but not going to change unless there is hp/ux for testing
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
hw_facts = self.get_hw_facts()
diff --git a/lib/ansible/module_utils/facts/hardware/linux.py b/lib/ansible/module_utils/facts/hardware/linux.py
index 605dbe6add7..293c75a2509 100644
--- a/lib/ansible/module_utils/facts/hardware/linux.py
+++ b/lib/ansible/module_utils/facts/hardware/linux.py
@@ -24,12 +24,9 @@ import re
import sys
import time
-from multiprocessing import cpu_count
-from multiprocessing.pool import ThreadPool
-
-from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils._internal._concurrent import _futures
from ansible.module_utils.common.locale import get_best_parsable_locale
-from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.common.text.formatters import bytes_to_human
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
@@ -91,6 +88,7 @@ class LinuxHardware(Hardware):
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
+ sysinfo_facts = self.get_sysinfo_facts()
device_facts = self.get_device_facts()
uptime_facts = self.get_uptime_facts()
lvm_facts = self.get_lvm_facts()
@@ -104,6 +102,7 @@ class LinuxHardware(Hardware):
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
+ hardware_facts.update(sysinfo_facts)
hardware_facts.update(device_facts)
hardware_facts.update(uptime_facts)
hardware_facts.update(lvm_facts)
@@ -208,6 +207,9 @@ class LinuxHardware(Hardware):
if 'vme' not in val:
xen_paravirt = True
+ if key == "flags":
+ cpu_facts['flags'] = val.split()
+
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']:
@@ -300,12 +302,9 @@ class LinuxHardware(Hardware):
)
except AttributeError:
# In Python < 3.3, os.sched_getaffinity() is not available
- try:
- cmd = get_bin_path('nproc')
- except ValueError:
- pass
- else:
- rc, out, _err = self.module.run_command(cmd)
+ nproc_cmd = self.module.get_bin_path('nproc')
+ if nproc_cmd is not None:
+ rc, out, _err = self.module.run_command(nproc_cmd)
if rc == 0:
cpu_facts['processor_nproc'] = int(out)
@@ -370,7 +369,6 @@ class LinuxHardware(Hardware):
else:
# Fall back to using dmidecode, if available
- dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_vendor': 'bios-vendor',
@@ -391,25 +389,54 @@ class LinuxHardware(Hardware):
'product_version': 'system-version',
'system_vendor': 'system-manufacturer',
}
+ dmi_bin = self.module.get_bin_path('dmidecode')
+ if dmi_bin is None:
+ dmi_facts = dict.fromkeys(
+ DMI_DICT.keys(),
+ 'NA'
+ )
+ return dmi_facts
+
for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
- try:
- json.dumps(thisvalue)
- except UnicodeDecodeError:
- thisvalue = "NA"
+ (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
+ try:
+ json.dumps(thisvalue)
+ except UnicodeDecodeError:
+ thisvalue = "NA"
- dmi_facts[k] = thisvalue
- else:
- dmi_facts[k] = 'NA'
+ dmi_facts[k] = thisvalue
else:
dmi_facts[k] = 'NA'
return dmi_facts
+ def get_sysinfo_facts(self):
+ """Fetch /proc/sysinfo facts from s390 Linux on IBM Z"""
+ if not os.path.exists('/proc/sysinfo'):
+ return {}
+
+ sysinfo_facts = dict.fromkeys(
+ ('system_vendor', 'product_version', 'product_serial', 'product_name', 'product_uuid'),
+ 'NA'
+ )
+ sysinfo_re = re.compile(
+ r'''
+ ^
+ (?:Manufacturer:\s+(?P.+))|
+ (?:Type:\s+(?P.+))|
+ (?:Sequence\ Code:\s+0+(?P.+))
+ $
+ ''',
+ re.VERBOSE | re.MULTILINE
+ )
+ data = get_file_content('/proc/sysinfo')
+ for match in sysinfo_re.finditer(data):
+ sysinfo_facts.update({k: v for k, v in match.groupdict().items() if v is not None})
+ return sysinfo_facts
+
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
@@ -548,7 +575,7 @@ class LinuxHardware(Hardware):
# start threads to query each mount
results = {}
- pool = ThreadPool(processes=min(len(mtab_entries), cpu_count()))
+ executor = _futures.DaemonThreadPoolExecutor()
maxtime = timeout.GATHER_TIMEOUT or timeout.DEFAULT_GATHER_TIMEOUT
for fields in mtab_entries:
# Transform octal escape sequences
@@ -572,30 +599,29 @@ class LinuxHardware(Hardware):
if not self.MTAB_BIND_MOUNT_RE.match(options):
mount_info['options'] += ",bind"
- results[mount] = {'info': mount_info,
- 'extra': pool.apply_async(self.get_mount_info, (mount, device, uuids)),
- 'timelimit': time.time() + maxtime}
+ results[mount] = {'info': mount_info, 'timelimit': time.monotonic() + maxtime}
+ results[mount]['extra'] = executor.submit(self.get_mount_info, mount, device, uuids)
- pool.close() # done with new workers, start gc
+ # done with spawning new workers, start gc
+ executor.shutdown()
- # wait for workers and get results
- while results:
+ while results: # wait for workers and get results
for mount in list(results):
done = False
res = results[mount]['extra']
try:
- if res.ready():
+ if res.done():
done = True
- if res.successful():
- mount_size, uuid = res.get()
+ if res.exception() is None:
+ mount_size, uuid = res.result()
if mount_size:
results[mount]['info'].update(mount_size)
results[mount]['info']['uuid'] = uuid or 'N/A'
else:
# failed, try to find out why, if 'res.successful' we know there are no exceptions
- results[mount]['info']['note'] = 'Could not get extra information: %s.' % (to_text(res.get()))
+ results[mount]['info']['note'] = f'Could not get extra information: {res.exception()}'
- elif time.time() > results[mount]['timelimit']:
+ elif time.monotonic() > results[mount]['timelimit']:
done = True
self.module.warn("Timeout exceeded when getting mount info for %s" % mount)
results[mount]['info']['note'] = 'Could not get extra information due to timeout'
@@ -744,10 +770,24 @@ class LinuxHardware(Hardware):
if serial:
d['serial'] = serial
- for key, test in [('removable', '/removable'),
- ('support_discard', '/queue/discard_granularity'),
- ]:
- d[key] = get_file_content(sysdir + test)
+ d['removable'] = get_file_content(sysdir + '/removable')
+
+ # Historically, `support_discard` simply returned the value of
+ # `/sys/block/{device}/queue/discard_granularity`. When its value
+ # is `0`, then the block device doesn't support discards;
+ # _however_, it being greater than zero doesn't necessarily mean
+ # that the block device _does_ support discards.
+ #
+ # Another indication that a block device doesn't support discards
+ # is `/sys/block/{device}/queue/discard_max_hw_bytes` being equal
+ # to `0` (with the same caveat as above). So if either of those are
+ # `0`, set `support_discard` to zero, otherwise set it to the value
+ # of `discard_granularity` for backwards compatibility.
+ d['support_discard'] = (
+ '0'
+ if get_file_content(sysdir + '/queue/discard_max_hw_bytes') == '0'
+ else get_file_content(sysdir + '/queue/discard_granularity')
+ )
if diskname in devs_wwn:
d['wwn'] = devs_wwn[diskname]
@@ -765,12 +805,12 @@ class LinuxHardware(Hardware):
part['links'][link_type] = link_values.get(partname, [])
part['start'] = get_file_content(part_sysdir + "/start", 0)
- part['sectors'] = get_file_content(part_sysdir + "/size", 0)
-
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512)
- part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
+ # sysfs sectorcount assumes 512 blocksize. Convert using the correct sectorsize
+ part['sectors'] = int(get_file_content(part_sysdir + "/size", 0)) * 512 // int(part['sectorsize'])
+ part['size'] = bytes_to_human(float(part['sectors']) * float(part['sectorsize']))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
@@ -784,13 +824,14 @@ class LinuxHardware(Hardware):
if m:
d['scheduler_mode'] = m.group(2)
- d['sectors'] = get_file_content(sysdir + "/size")
- if not d['sectors']:
- d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512)
- d['size'] = bytes_to_human(float(d['sectors']) * 512.0)
+ # sysfs sectorcount assumes 512 blocksize. Convert using the correct sectorsize
+ d['sectors'] = int(get_file_content(sysdir + "/size")) * 512 // int(d['sectorsize'])
+ if not d['sectors']:
+ d['sectors'] = 0
+ d['size'] = bytes_to_human(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
@@ -833,21 +874,24 @@ class LinuxHardware(Hardware):
""" Get LVM Facts if running as root and lvm utils are available """
lvm_facts = {'lvm': 'N/A'}
+ vgs_cmd = self.module.get_bin_path('vgs')
+ if vgs_cmd is None:
+ return lvm_facts
- if os.getuid() == 0 and self.module.get_bin_path('vgs'):
+ if os.getuid() == 0:
lvm_util_options = '--noheadings --nosuffix --units g --separator ,'
- vgs_path = self.module.get_bin_path('vgs')
# vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs = {}
- if vgs_path:
- rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))
- for vg_line in vg_lines.splitlines():
- items = vg_line.strip().split(',')
- vgs[items[0]] = {'size_g': items[-2],
- 'free_g': items[-1],
- 'num_lvs': items[2],
- 'num_pvs': items[1]}
+ rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_cmd, lvm_util_options))
+ for vg_line in vg_lines.splitlines():
+ items = vg_line.strip().split(',')
+ vgs[items[0]] = {
+ 'size_g': items[-2],
+ 'free_g': items[-1],
+ 'num_lvs': items[2],
+ 'num_pvs': items[1]
+ }
lvs_path = self.module.get_bin_path('lvs')
# lvs fields:
diff --git a/lib/ansible/module_utils/facts/hardware/netbsd.py b/lib/ansible/module_utils/facts/hardware/netbsd.py
index 7d024198392..69ac583df64 100644
--- a/lib/ansible/module_utils/facts/hardware/netbsd.py
+++ b/lib/ansible/module_utils/facts/hardware/netbsd.py
@@ -162,6 +162,9 @@ class NetBSDHardware(Hardware):
def get_uptime_facts(self):
# On NetBSD, we need to call sysctl with -n to get this value as an int.
sysctl_cmd = self.module.get_bin_path('sysctl')
+ if sysctl_cmd is None:
+ return {}
+
cmd = [sysctl_cmd, '-n', 'kern.boottime']
rc, out, err = self.module.run_command(cmd)
diff --git a/lib/ansible/module_utils/facts/hardware/openbsd.py b/lib/ansible/module_utils/facts/hardware/openbsd.py
index 751ee6165dd..b5f08c0092b 100644
--- a/lib/ansible/module_utils/facts/hardware/openbsd.py
+++ b/lib/ansible/module_utils/facts/hardware/openbsd.py
@@ -54,7 +54,7 @@ class OpenBSDHardware(Hardware):
hardware_facts.update(self.get_dmi_facts())
hardware_facts.update(self.get_uptime_facts())
- # storage devices notorioslly prone to hang/block so they are under a timeout
+ # storage devices notoriously prone to hang/block so they are under a timeout
try:
hardware_facts.update(self.get_mount_facts())
except timeout.TimeoutError:
@@ -113,6 +113,9 @@ class OpenBSDHardware(Hardware):
def get_uptime_facts(self):
# On openbsd, we need to call it with -n to get this value as an int.
sysctl_cmd = self.module.get_bin_path('sysctl')
+ if sysctl_cmd is None:
+ return {}
+
cmd = [sysctl_cmd, '-n', 'kern.boottime']
rc, out, err = self.module.run_command(cmd)
diff --git a/lib/ansible/module_utils/facts/hardware/sunos.py b/lib/ansible/module_utils/facts/hardware/sunos.py
index 62eeafc3c98..134e59a8c2c 100644
--- a/lib/ansible/module_utils/facts/hardware/sunos.py
+++ b/lib/ansible/module_utils/facts/hardware/sunos.py
@@ -172,7 +172,13 @@ class SunOSHardware(Hardware):
rc, platform, err = self.module.run_command('/usr/bin/uname -i')
platform_sbin = '/usr/platform/' + platform.rstrip() + '/sbin'
- prtdiag_path = self.module.get_bin_path("prtdiag", opt_dirs=[platform_sbin])
+ prtdiag_path = self.module.get_bin_path(
+ "prtdiag",
+ opt_dirs=[platform_sbin]
+ )
+ if prtdiag_path is None:
+ return dmi_facts
+
rc, out, err = self.module.run_command(prtdiag_path)
# rc returns 1
if out:
diff --git a/lib/ansible/module_utils/facts/network/aix.py b/lib/ansible/module_utils/facts/network/aix.py
index 29a679d84b1..17516d927d8 100644
--- a/lib/ansible/module_utils/facts/network/aix.py
+++ b/lib/ansible/module_utils/facts/network/aix.py
@@ -32,20 +32,21 @@ class AIXNetwork(GenericBsdIfconfigNetwork):
interface = dict(v4={}, v6={})
netstat_path = self.module.get_bin_path('netstat')
-
- if netstat_path:
- rc, out, err = self.module.run_command([netstat_path, '-nr'])
-
- lines = out.splitlines()
- for line in lines:
- words = line.split()
- if len(words) > 1 and words[0] == 'default':
- if '.' in words[1]:
- interface['v4']['gateway'] = words[1]
- interface['v4']['interface'] = words[5]
- elif ':' in words[1]:
- interface['v6']['gateway'] = words[1]
- interface['v6']['interface'] = words[5]
+ if netstat_path is None:
+ return interface['v4'], interface['v6']
+
+ rc, out, err = self.module.run_command([netstat_path, '-nr'])
+
+ lines = out.splitlines()
+ for line in lines:
+ words = line.split()
+ if len(words) > 1 and words[0] == 'default':
+ if '.' in words[1]:
+ interface['v4']['gateway'] = words[1]
+ interface['v4']['interface'] = words[5]
+ elif ':' in words[1]:
+ interface['v6']['gateway'] = words[1]
+ interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
@@ -58,9 +59,7 @@ class AIXNetwork(GenericBsdIfconfigNetwork):
all_ipv6_addresses=[],
)
- uname_rc = None
- uname_out = None
- uname_err = None
+ uname_rc = uname_out = uname_err = None
uname_path = self.module.get_bin_path('uname')
if uname_path:
uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
diff --git a/lib/ansible/module_utils/facts/network/fc_wwn.py b/lib/ansible/module_utils/facts/network/fc_wwn.py
index f53cc53927d..fb846cc08a8 100644
--- a/lib/ansible/module_utils/facts/network/fc_wwn.py
+++ b/lib/ansible/module_utils/facts/network/fc_wwn.py
@@ -82,7 +82,10 @@ class FcWwnInitiatorFactCollector(BaseFactCollector):
fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
elif sys.platform.startswith('hp-ux'):
cmd = module.get_bin_path('ioscan')
- fcmsu_cmd = module.get_bin_path('fcmsutil', opt_dirs=['/opt/fcms/bin'])
+ fcmsu_cmd = module.get_bin_path(
+ 'fcmsutil',
+ opt_dirs=['/opt/fcms/bin'],
+ )
# go ahead if we have both commands available
if cmd and fcmsu_cmd:
# ioscan / get list of available fibre-channel devices (fcd)
diff --git a/lib/ansible/module_utils/facts/network/hpux.py b/lib/ansible/module_utils/facts/network/hpux.py
index 61e1bdc644f..2f01825bb24 100644
--- a/lib/ansible/module_utils/facts/network/hpux.py
+++ b/lib/ansible/module_utils/facts/network/hpux.py
@@ -20,7 +20,7 @@ from ansible.module_utils.facts.network.base import Network, NetworkCollector
class HPUXNetwork(Network):
"""
- HP-UX-specifig subclass of Network. Defines networking facts:
+ HP-UX-specific subclass of Network. Defines networking facts:
- default_interface
- interfaces (a list of interface names)
- interface_ dictionary of ipv4 address information.
@@ -29,7 +29,10 @@ class HPUXNetwork(Network):
def populate(self, collected_facts=None):
network_facts = {}
- netstat_path = self.module.get_bin_path('netstat')
+ netstat_path = self.module.get_bin_path(
+ 'netstat',
+ opt_dirs=['/usr/bin']
+ )
if netstat_path is None:
return network_facts
@@ -46,7 +49,14 @@ class HPUXNetwork(Network):
def get_default_interfaces(self):
default_interfaces = {}
- rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
+ netstat_path = self.module.get_bin_path(
+ 'netstat',
+ opt_dirs=['/usr/bin']
+ )
+
+ if netstat_path is None:
+ return default_interfaces
+ rc, out, err = self.module.run_command("%s -nr" % netstat_path)
lines = out.splitlines()
for line in lines:
words = line.split()
@@ -59,7 +69,14 @@ class HPUXNetwork(Network):
def get_interfaces_info(self):
interfaces = {}
- rc, out, err = self.module.run_command("/usr/bin/netstat -niw")
+ netstat_path = self.module.get_bin_path(
+ 'netstat',
+ opt_dirs=['/usr/bin']
+ )
+
+ if netstat_path is None:
+ return interfaces
+ rc, out, err = self.module.run_command("%s -niw" % netstat_path)
lines = out.splitlines()
for line in lines:
words = line.split()
diff --git a/lib/ansible/module_utils/facts/network/iscsi.py b/lib/ansible/module_utils/facts/network/iscsi.py
index 8f7a61590fb..48f98a682bd 100644
--- a/lib/ansible/module_utils/facts/network/iscsi.py
+++ b/lib/ansible/module_utils/facts/network/iscsi.py
@@ -21,7 +21,6 @@ import sys
import ansible.module_utils.compat.typing as t
-from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.network.base import NetworkCollector
@@ -80,9 +79,8 @@ class IscsiInitiatorNetworkCollector(NetworkCollector):
iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1]
break
elif sys.platform.startswith('aix'):
- try:
- cmd = get_bin_path('lsattr')
- except ValueError:
+ cmd = module.get_bin_path('lsattr')
+ if cmd is None:
return iscsi_facts
cmd += " -E -l iscsi0"
@@ -92,10 +90,11 @@ class IscsiInitiatorNetworkCollector(NetworkCollector):
iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip()
elif sys.platform.startswith('hp-ux'):
- # try to find it in the default PATH and opt_dirs
- try:
- cmd = get_bin_path('iscsiutil', opt_dirs=['/opt/iscsi/bin'])
- except ValueError:
+ cmd = module.get_bin_path(
+ 'iscsiutil',
+ opt_dirs=['/opt/iscsi/bin']
+ )
+ if cmd is None:
return iscsi_facts
cmd += " -l"
diff --git a/lib/ansible/module_utils/facts/network/linux.py b/lib/ansible/module_utils/facts/network/linux.py
index 560cd255f37..d199d5a6ae3 100644
--- a/lib/ansible/module_utils/facts/network/linux.py
+++ b/lib/ansible/module_utils/facts/network/linux.py
@@ -295,8 +295,6 @@ class LinuxNetwork(Network):
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
- ip_path = self.module.get_bin_path("ip")
-
args = [ip_path, 'addr', 'show', 'primary', 'dev', device]
rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
diff --git a/lib/ansible/module_utils/facts/other/facter.py b/lib/ansible/module_utils/facts/other/facter.py
index ec1771ecfac..41b3cea7c92 100644
--- a/lib/ansible/module_utils/facts/other/facter.py
+++ b/lib/ansible/module_utils/facts/other/facter.py
@@ -22,8 +22,14 @@ class FacterFactCollector(BaseFactCollector):
namespace=namespace)
def find_facter(self, module):
- facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
- cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
+ facter_path = module.get_bin_path(
+ 'facter',
+ opt_dirs=['/opt/puppetlabs/bin']
+ )
+ cfacter_path = module.get_bin_path(
+ 'cfacter',
+ opt_dirs=['/opt/puppetlabs/bin']
+ )
# Prefer to use cfacter if available
if cfacter_path is not None:
@@ -73,7 +79,6 @@ class FacterFactCollector(BaseFactCollector):
try:
facter_dict = json.loads(facter_output)
except Exception:
- # FIXME: maybe raise a FactCollectorError with some info attrs?
- pass
+ module.warn("Failed to parse facter facts")
return facter_dict
diff --git a/lib/ansible/module_utils/facts/other/ohai.py b/lib/ansible/module_utils/facts/other/ohai.py
index 75968ef29f1..8f0e4dcaecb 100644
--- a/lib/ansible/module_utils/facts/other/ohai.py
+++ b/lib/ansible/module_utils/facts/other/ohai.py
@@ -36,10 +36,11 @@ class OhaiFactCollector(BaseFactCollector):
namespace=namespace)
def find_ohai(self, module):
- ohai_path = module.get_bin_path('ohai')
- return ohai_path
+ return module.get_bin_path(
+ 'ohai'
+ )
- def run_ohai(self, module, ohai_path,):
+ def run_ohai(self, module, ohai_path):
rc, out, err = module.run_command(ohai_path)
return rc, out, err
@@ -67,7 +68,6 @@ class OhaiFactCollector(BaseFactCollector):
try:
ohai_facts = json.loads(ohai_output)
except Exception:
- # FIXME: useful error, logging, something...
- pass
+ module.warn("Failed to gather ohai facts")
return ohai_facts
diff --git a/lib/ansible/module_utils/facts/packages.py b/lib/ansible/module_utils/facts/packages.py
index 21be56fab26..b5b9bcb35ef 100644
--- a/lib/ansible/module_utils/facts/packages.py
+++ b/lib/ansible/module_utils/facts/packages.py
@@ -3,24 +3,29 @@
from __future__ import annotations
+import ansible.module_utils.compat.typing as t
+
from abc import ABCMeta, abstractmethod
from ansible.module_utils.six import with_metaclass
+from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.common._utils import get_all_subclasses
def get_all_pkg_managers():
- return {obj.__name__.lower(): obj for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr)}
+ return {obj.__name__.lower(): obj for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr, RespawningLibMgr)}
class PkgMgr(with_metaclass(ABCMeta, object)): # type: ignore[misc]
@abstractmethod
- def is_available(self):
+ def is_available(self, handle_exceptions):
# This method is supposed to return True/False if the package manager is currently installed/usable
# It can also 'prep' the required systems in the process of detecting availability
+ # If handle_exceptions is false it should raise exceptions related to manager discovery instead of handling them.
pass
@abstractmethod
@@ -58,16 +63,50 @@ class LibMgr(PkgMgr):
self._lib = None
super(LibMgr, self).__init__()
- def is_available(self):
+ def is_available(self, handle_exceptions=True):
found = False
try:
self._lib = __import__(self.LIB)
found = True
except ImportError:
- pass
+ if not handle_exceptions:
+ raise Exception(missing_required_lib(self.LIB))
return found
+class RespawningLibMgr(LibMgr):
+
+ CLI_BINARIES = [] # type: t.List[str]
+ INTERPRETERS = ['/usr/bin/python3']
+
+ def is_available(self, handle_exceptions=True):
+ if super(RespawningLibMgr, self).is_available():
+ return True
+
+ for binary in self.CLI_BINARIES:
+ try:
+ bin_path = get_bin_path(binary)
+ except ValueError:
+ # Not an interesting exception to raise, just a speculative probe
+ continue
+ else:
+ # It looks like this package manager is installed
+ if not has_respawned():
+ # See if respawning will help
+ interpreter_path = probe_interpreters_for_module(self.INTERPRETERS, self.LIB)
+ if interpreter_path:
+ respawn_module(interpreter_path)
+ # The module will exit when the respawned copy completes
+
+ if not handle_exceptions:
+ raise Exception(f'Found executable at {bin_path}. {missing_required_lib(self.LIB)}')
+
+ if not handle_exceptions:
+ raise Exception(missing_required_lib(self.LIB))
+
+ return False
+
+
class CLIMgr(PkgMgr):
CLI = None # type: str | None
@@ -77,9 +116,12 @@ class CLIMgr(PkgMgr):
self._cli = None
super(CLIMgr, self).__init__()
- def is_available(self):
+ def is_available(self, handle_exceptions=True):
+ found = False
try:
self._cli = get_bin_path(self.CLI)
+ found = True
except ValueError:
- return False
- return True
+ if not handle_exceptions:
+ raise
+ return found
diff --git a/lib/ansible/module_utils/facts/sysctl.py b/lib/ansible/module_utils/facts/sysctl.py
index 1f94091200b..639e77c41f0 100644
--- a/lib/ansible/module_utils/facts/sysctl.py
+++ b/lib/ansible/module_utils/facts/sysctl.py
@@ -21,41 +21,43 @@ from ansible.module_utils.common.text.converters import to_text
def get_sysctl(module, prefixes):
- sysctl_cmd = module.get_bin_path('sysctl')
- cmd = [sysctl_cmd]
- cmd.extend(prefixes)
sysctl = dict()
-
- try:
- rc, out, err = module.run_command(cmd)
- except (IOError, OSError) as e:
- module.warn('Unable to read sysctl: %s' % to_text(e))
- rc = 1
-
- if rc == 0:
- key = ''
- value = ''
- for line in out.splitlines():
- if not line.strip():
- continue
-
- if line.startswith(' '):
- # handle multiline values, they will not have a starting key
- # Add the newline back in so people can split on it to parse
- # lines if they need to.
- value += '\n' + line
- continue
+ sysctl_cmd = module.get_bin_path('sysctl')
+ if sysctl_cmd is not None:
+
+ cmd = [sysctl_cmd]
+ cmd.extend(prefixes)
+
+ try:
+ rc, out, err = module.run_command(cmd)
+ except (IOError, OSError) as e:
+ module.warn('Unable to read sysctl: %s' % to_text(e))
+ rc = 1
+
+ if rc == 0:
+ key = ''
+ value = ''
+ for line in out.splitlines():
+ if not line.strip():
+ continue
+
+ if line.startswith(' '):
+ # handle multiline values, they will not have a starting key
+ # Add the newline back in so people can split on it to parse
+ # lines if they need to.
+ value += '\n' + line
+ continue
+
+ if key:
+ sysctl[key] = value.strip()
+
+ try:
+ (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
+ except Exception as e:
+ module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e)))
if key:
sysctl[key] = value.strip()
- try:
- (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
- except Exception as e:
- module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e)))
-
- if key:
- sysctl[key] = value.strip()
-
return sysctl
diff --git a/lib/ansible/module_utils/facts/system/distribution.py b/lib/ansible/module_utils/facts/system/distribution.py
index b274a85a800..7554ef1ae32 100644
--- a/lib/ansible/module_utils/facts/system/distribution.py
+++ b/lib/ansible/module_utils/facts/system/distribution.py
@@ -30,7 +30,7 @@ def get_uname(module, flags=('-v')):
def _file_exists(path, allow_empty=False):
# not finding the file, exit early
- if not os.path.exists(path):
+ if not os.path.isfile(path):
return False
# if just the path needs to exists (ie, it can be empty) we are done
@@ -512,12 +512,12 @@ class Distribution(object):
'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS',
'OEL', 'Amazon', 'Amzn', 'Virtuozzo', 'XenServer', 'Alibaba',
'EulerOS', 'openEuler', 'AlmaLinux', 'Rocky', 'TencentOS',
- 'EuroLinux', 'Kylin Linux Advanced Server'],
+ 'EuroLinux', 'Kylin Linux Advanced Server', 'MIRACLE'],
'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon',
'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin', 'OSMC'],
'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
- 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite'],
+ 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite', 'SL-Micro'],
'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
'Mandrake': ['Mandrake', 'Mandriva'],
'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'],
diff --git a/lib/ansible/module_utils/facts/system/local.py b/lib/ansible/module_utils/facts/system/local.py
index 3d656f5a345..66ec58a2e7d 100644
--- a/lib/ansible/module_utils/facts/system/local.py
+++ b/lib/ansible/module_utils/facts/system/local.py
@@ -1,17 +1,5 @@
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
@@ -25,7 +13,6 @@ import ansible.module_utils.compat.typing as t
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
-from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves import configparser, StringIO
@@ -91,12 +78,9 @@ class LocalFactCollector(BaseFactCollector):
# if that fails read it with ConfigParser
cp = configparser.ConfigParser()
try:
- if PY3:
- cp.read_file(StringIO(out))
- else:
- cp.readfp(StringIO(out))
+ cp.read_file(StringIO(out))
except configparser.Error:
- fact = "error loading facts as JSON or ini - please check content: %s" % fn
+ fact = f"error loading facts as JSON or ini - please check content: {fn}"
module.warn(fact)
else:
fact = {}
@@ -104,8 +88,14 @@ class LocalFactCollector(BaseFactCollector):
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
- val = cp.get(sect, opt)
- fact[sect][opt] = val
+ try:
+ val = cp.get(sect, opt)
+ except configparser.Error as ex:
+ fact = f"error loading facts as ini - please check content: {fn} ({ex})"
+ module.warn(fact)
+ continue
+ else:
+ fact[sect][opt] = val
except Exception as e:
fact = "Failed to convert (%s) to JSON: %s" % (fn, to_text(e))
module.warn(fact)
diff --git a/lib/ansible/module_utils/facts/system/service_mgr.py b/lib/ansible/module_utils/facts/system/service_mgr.py
index 4dfa7e99d44..20257967c1e 100644
--- a/lib/ansible/module_utils/facts/system/service_mgr.py
+++ b/lib/ansible/module_utils/facts/system/service_mgr.py
@@ -106,7 +106,7 @@ class ServiceMgrFactCollector(BaseFactCollector):
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
- # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
+ # many systems return init, so this cannot be trusted, if it ends in 'sh' it probably is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
@@ -144,6 +144,8 @@ class ServiceMgrFactCollector(BaseFactCollector):
service_mgr_name = 'systemd'
elif os.path.exists('/etc/init.d/'):
service_mgr_name = 'sysvinit'
+ elif os.path.exists('/etc/dinit.d/'):
+ service_mgr_name = 'dinit'
if not service_mgr_name:
# if we cannot detect, fallback to generic 'service'
diff --git a/lib/ansible/module_utils/facts/system/systemd.py b/lib/ansible/module_utils/facts/system/systemd.py
new file mode 100644
index 00000000000..3ba2bbfcbdf
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/systemd.py
@@ -0,0 +1,47 @@
+# Get systemd version and features
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+from __future__ import annotations
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+
+
+class SystemdFactCollector(BaseFactCollector):
+ name = "systemd"
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ systemctl_bin = module.get_bin_path("systemctl")
+ systemd_facts = {}
+ if systemctl_bin and ServiceMgrFactCollector.is_systemd_managed(module):
+ rc, stdout, dummy = module.run_command(
+ [systemctl_bin, "--version"],
+ check_rc=False,
+ )
+
+ if rc != 0:
+ return systemd_facts
+
+ systemd_facts["systemd"] = {
+ "features": str(stdout.split("\n")[1]),
+ "version": int(stdout.split(" ")[1]),
+ }
+
+ return systemd_facts
diff --git a/lib/ansible/module_utils/facts/timeout.py b/lib/ansible/module_utils/facts/timeout.py
index 5fb749fb6b3..3b0476245b8 100644
--- a/lib/ansible/module_utils/facts/timeout.py
+++ b/lib/ansible/module_utils/facts/timeout.py
@@ -48,7 +48,7 @@ def timeout(seconds=None, error_message="Timer expired"):
return res.get(timeout_value)
except multiprocessing.TimeoutError:
# This is an ansible.module_utils.common.facts.timeout.TimeoutError
- raise TimeoutError('Timer expired after %s seconds' % timeout_value)
+ raise TimeoutError(f'{error_message} after {timeout_value} seconds')
finally:
pool.terminate()
diff --git a/lib/ansible/module_utils/facts/virtual/linux.py b/lib/ansible/module_utils/facts/virtual/linux.py
index 05ae1254ca0..57b047b11a1 100644
--- a/lib/ansible/module_utils/facts/virtual/linux.py
+++ b/lib/ansible/module_utils/facts/virtual/linux.py
@@ -175,7 +175,7 @@ class LinuxVirtual(Virtual):
virtual_facts['virtualization_type'] = 'RHEV'
found_virt = True
- if product_name in ('VMware Virtual Platform', 'VMware7,1', 'VMware20,1'):
+ if product_name and product_name.startswith(("VMware",)):
guest_tech.add('VMware')
if not found_virt:
virtual_facts['virtualization_type'] = 'VMware'
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
index f40c3384cbc..3a1a317ec66 100644
--- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
@@ -37,7 +37,7 @@ Function Add-CSharpType {
.PARAMETER CompileSymbols
[String[]] A list of symbols to be defined during compile time. These are
added to the existing symbols, 'CORECLR', 'WINDOWS', 'UNIX' that are set
- conditionalls in this cmdlet.
+ conditionals in this cmdlet.
.NOTES
The following features were added to control the compiling options from the
@@ -75,7 +75,7 @@ Function Add-CSharpType {
[Switch]$IgnoreWarnings,
[Switch]$PassThru,
[Parameter(Mandatory = $true, ParameterSetName = "Module")][Object]$AnsibleModule,
- [Parameter(ParameterSetName = "Manual")][String]$TempPath = $env:TMP,
+ [Parameter(ParameterSetName = "Manual")][String]$TempPath,
[Parameter(ParameterSetName = "Manual")][Switch]$IncludeDebugInfo,
[String[]]$CompileSymbols = @()
)
@@ -280,9 +280,11 @@ Function Add-CSharpType {
$include_debug = $AnsibleModule.Verbosity -ge 3
}
else {
- $temp_path = $TempPath
+ $temp_path = [System.IO.Path]::GetTempPath()
$include_debug = $IncludeDebugInfo.IsPresent
}
+ $temp_path = Join-Path -Path $temp_path -ChildPath ([Guid]::NewGuid().Guid)
+
$compiler_options = [System.Collections.ArrayList]@("/optimize")
if ($defined_symbols.Count -gt 0) {
$compiler_options.Add("/define:" + ([String]::Join(";", $defined_symbols.ToArray()))) > $null
@@ -304,8 +306,12 @@ Function Add-CSharpType {
)
# create a code snippet for each reference and check if we need
- # to reference any extra assemblies
- $ignore_warnings = [System.Collections.ArrayList]@()
+ # to reference any extra assemblies.
+ # CS1610 is a warning when csc.exe failed to delete temporary files.
+ # We use our own temp dir deletion mechanism so this doesn't become a
+ # fatal error.
+ # https://github.com/ansible-collections/ansible.windows/issues/598
+ $ignore_warnings = [System.Collections.ArrayList]@('1610')
$compile_units = [System.Collections.Generic.List`1[System.CodeDom.CodeSnippetCompileUnit]]@()
foreach ($reference in $References) {
# scan through code and add any assemblies that match
@@ -373,7 +379,26 @@ Function Add-CSharpType {
}
}
- $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units)
+ $null = New-Item -Path $temp_path -ItemType Directory -Force
+ try {
+ $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units)
+ }
+ finally {
+ # Try to delete the temp path, if this fails and we are running
+ # with a module object write a warning instead of failing.
+ try {
+ [System.IO.Directory]::Delete($temp_path, $true)
+ }
+ catch {
+ $msg = "Failed to cleanup temporary directory '$temp_path' used for compiling C# code."
+ if ($AnsibleModule) {
+ $AnsibleModule.Warn("$msg Files may still be present after the task is complete. Error: $_")
+ }
+ else {
+ throw "$msg Error: $_"
+ }
+ }
+ }
}
finally {
foreach ($kvp in $originalEnv.GetEnumerator()) {
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
index 9b86f84188a..fb9fb11c490 100644
--- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
@@ -4,7 +4,7 @@
# used by Convert-DictToSnakeCase to convert a string in camelCase
# format to snake_case
Function Convert-StringToSnakeCase($string) {
- # cope with pluralized abbreaviations such as TargetGroupARNs
+ # cope with pluralized abbreviations such as TargetGroupARNs
if ($string -cmatch "[A-Z]{3,}s") {
$replacement_string = $string -creplace $matches[0], "_$($matches[0].ToLower())"
diff --git a/lib/ansible/module_utils/splitter.py b/lib/ansible/module_utils/splitter.py
index 7bddd32dae1..e2a3da50543 100644
--- a/lib/ansible/module_utils/splitter.py
+++ b/lib/ansible/module_utils/splitter.py
@@ -81,7 +81,7 @@ def split_args(args):
'''
# the list of params parsed out of the arg string
- # this is going to be the result value when we are donei
+ # this is going to be the result value when we are done
params = []
# here we encode the args, so we have a uniform charset to
diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py
index c7152311057..b3047f0164f 100644
--- a/lib/ansible/module_utils/urls.py
+++ b/lib/ansible/module_utils/urls.py
@@ -340,13 +340,16 @@ def extract_pem_certs(data):
def get_response_filename(response):
- url = response.geturl()
- path = urlparse(url)[2]
- filename = os.path.basename(path.rstrip('/')) or None
- if filename:
- filename = unquote(filename)
+ if filename := response.headers.get_param('filename', header='content-disposition'):
+ filename = os.path.basename(filename)
+ else:
+ url = response.geturl()
+ path = urlparse(url)[2]
+ filename = os.path.basename(path.rstrip('/')) or None
+ if filename:
+ filename = unquote(filename)
- return response.headers.get_param('filename', header='content-disposition') or filename
+ return filename
def parse_content_type(response):
@@ -633,6 +636,74 @@ def rfc2822_date_string(timetuple, zone='-0000'):
zone)
+def _configure_auth(url, url_username, url_password, use_gssapi, force_basic_auth, use_netrc):
+ headers = {}
+ handlers = []
+
+ parsed = urlparse(url)
+ if parsed.scheme == 'ftp':
+ return url, headers, handlers
+
+ username = url_username
+ password = url_password
+
+ if username:
+ netloc = parsed.netloc
+ elif '@' in parsed.netloc:
+ credentials, netloc = parsed.netloc.split('@', 1)
+ if ':' in credentials:
+ username, password = credentials.split(':', 1)
+ else:
+ username = credentials
+ password = ''
+ username = unquote(username)
+ password = unquote(password)
+
+ # reconstruct url without credentials
+ url = urlunparse(parsed._replace(netloc=netloc))
+
+ if use_gssapi:
+ if HTTPGSSAPIAuthHandler: # type: ignore[truthy-function]
+ handlers.append(HTTPGSSAPIAuthHandler(username, password))
+ else:
+ imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True',
+ url='https://pypi.org/project/gssapi/')
+ raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR)
+
+ elif username and not force_basic_auth:
+ passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
+
+ # this creates a password manager
+ passman.add_password(None, netloc, username, password)
+
+ # because we have put None at the start it will always
+ # use this username/password combination for urls
+ # for which `theurl` is a super-url
+ authhandler = urllib.request.HTTPBasicAuthHandler(passman)
+ digest_authhandler = urllib.request.HTTPDigestAuthHandler(passman)
+
+ # create the AuthHandler
+ handlers.append(authhandler)
+ handlers.append(digest_authhandler)
+
+ elif username and force_basic_auth:
+ headers["Authorization"] = basic_auth_header(username, password)
+
+ elif use_netrc:
+ try:
+ rc = netrc.netrc(os.environ.get('NETRC'))
+ login = rc.authenticators(parsed.hostname)
+ except IOError:
+ login = None
+
+ if login:
+ username, dummy, password = login
+ if username and password:
+ headers["Authorization"] = basic_auth_header(username, password)
+
+ return url, headers, handlers
+
+
class Request:
def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=False,
@@ -772,64 +843,9 @@ class Request:
if unix_socket:
handlers.append(UnixHTTPHandler(unix_socket))
- parsed = urlparse(url)
- if parsed.scheme != 'ftp':
- username = url_username
- password = url_password
-
- if username:
- netloc = parsed.netloc
- elif '@' in parsed.netloc:
- credentials, netloc = parsed.netloc.split('@', 1)
- if ':' in credentials:
- username, password = credentials.split(':', 1)
- else:
- username = credentials
- password = ''
- username = unquote(username)
- password = unquote(password)
-
- # reconstruct url without credentials
- url = urlunparse(parsed._replace(netloc=netloc))
-
- if use_gssapi:
- if HTTPGSSAPIAuthHandler: # type: ignore[truthy-function]
- handlers.append(HTTPGSSAPIAuthHandler(username, password))
- else:
- imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True',
- url='https://pypi.org/project/gssapi/')
- raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR)
-
- elif username and not force_basic_auth:
- passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
-
- # this creates a password manager
- passman.add_password(None, netloc, username, password)
-
- # because we have put None at the start it will always
- # use this username/password combination for urls
- # for which `theurl` is a super-url
- authhandler = urllib.request.HTTPBasicAuthHandler(passman)
- digest_authhandler = urllib.request.HTTPDigestAuthHandler(passman)
-
- # create the AuthHandler
- handlers.append(authhandler)
- handlers.append(digest_authhandler)
-
- elif username and force_basic_auth:
- headers["Authorization"] = basic_auth_header(username, password)
-
- elif use_netrc:
- try:
- rc = netrc.netrc(os.environ.get('NETRC'))
- login = rc.authenticators(parsed.hostname)
- except IOError:
- login = None
-
- if login:
- username, dummy, password = login
- if username and password:
- headers["Authorization"] = basic_auth_header(username, password)
+ url, auth_headers, auth_handlers = _configure_auth(url, url_username, url_password, use_gssapi, force_basic_auth, use_netrc)
+ headers.update(auth_headers)
+ handlers.extend(auth_handlers)
if not use_proxy:
proxyhandler = urllib.request.ProxyHandler({})
diff --git a/lib/ansible/module_utils/yumdnf.py b/lib/ansible/module_utils/yumdnf.py
index 4a07c4fac6a..b2cbba3fde2 100644
--- a/lib/ansible/module_utils/yumdnf.py
+++ b/lib/ansible/module_utils/yumdnf.py
@@ -18,6 +18,7 @@ yumdnf_argument_spec = dict(
allow_downgrade=dict(type='bool', default=False),
allowerasing=dict(default=False, type="bool"),
autoremove=dict(type='bool', default=False),
+ best=dict(type="bool"),
bugfix=dict(required=False, type='bool', default=False),
cacheonly=dict(type='bool', default=False),
conf_file=dict(type='str'),
@@ -38,7 +39,7 @@ yumdnf_argument_spec = dict(
install_weak_deps=dict(type='bool', default=True),
list=dict(type='str'),
name=dict(type='list', elements='str', aliases=['pkg'], default=[]),
- nobest=dict(default=False, type="bool"),
+ nobest=dict(type="bool"),
releasever=dict(default=None),
security=dict(type='bool', default=False),
skip_broken=dict(type='bool', default=False),
@@ -51,7 +52,7 @@ yumdnf_argument_spec = dict(
lock_timeout=dict(type='int', default=30),
),
required_one_of=[['name', 'list', 'update_cache']],
- mutually_exclusive=[['name', 'list']],
+ mutually_exclusive=[['name', 'list'], ['best', 'nobest']],
supports_check_mode=True,
)
@@ -70,6 +71,7 @@ class YumDnf(metaclass=ABCMeta):
self.allow_downgrade = self.module.params['allow_downgrade']
self.allowerasing = self.module.params['allowerasing']
self.autoremove = self.module.params['autoremove']
+ self.best = self.module.params['best']
self.bugfix = self.module.params['bugfix']
self.cacheonly = self.module.params['cacheonly']
self.conf_file = self.module.params['conf_file']
diff --git a/lib/ansible/modules/add_host.py b/lib/ansible/modules/add_host.py
index de3c8619135..81930eab7d7 100644
--- a/lib/ansible/modules/add_host.py
+++ b/lib/ansible/modules/add_host.py
@@ -61,7 +61,7 @@ notes:
- The alias O(host) of the parameter O(name) is only available on Ansible 2.4 and newer.
- Since Ansible 2.4, the C(inventory_dir) variable is now set to V(None) instead of the 'global inventory source',
because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour.
-- Though this module does not change the remote host, we do provide 'changed' status as it can be useful for those trying to track inventory changes.
+- Though this module does not change the remote host, we do provide C(changed) status as it can be useful for those trying to track inventory changes.
- The hosts added will not bypass the C(--limit) from the command line, so both of those need to be in agreement to make them available as play targets.
They are still available from hostvars and for delegation as a normal part of the inventory.
seealso:
diff --git a/lib/ansible/modules/apt.py b/lib/ansible/modules/apt.py
index ad72782b7ec..70a2a07cc07 100644
--- a/lib/ansible/modules/apt.py
+++ b/lib/ansible/modules/apt.py
@@ -21,6 +21,7 @@ options:
description:
- A list of package names, like V(foo), or package specifier with version, like V(foo=1.0) or V(foo>=1.0).
Name wildcards (fnmatch) like V(apt*) and version wildcards like V(foo=1.0*) are also supported.
+ - Do not use single or double quotes around the version when referring to the package name with a specific version, such as V(foo=1.0) or V(foo>=1.0).
aliases: [ package, pkg ]
type: list
elements: str
@@ -62,21 +63,20 @@ options:
default: 'no'
default_release:
description:
- - Corresponds to the C(-t) option for I(apt) and sets pin priorities
+ - Corresponds to the C(-t) option for I(apt) and sets pin priorities.
aliases: [ default-release ]
type: str
install_recommends:
description:
- - Corresponds to the C(--no-install-recommends) option for I(apt). V(true) installs recommended packages. V(false) does not install
+ - Corresponds to the C(--no-install-recommends) option for C(apt). V(true) installs recommended packages. V(false) does not install
recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
aliases: [ install-recommends ]
type: bool
force:
description:
- - 'Corresponds to the C(--force-yes) to I(apt-get) and implies O(allow_unauthenticated=yes) and O(allow_downgrade=yes)'
- - "This option will disable checking both the packages' signatures and the certificates of the
- web servers they are downloaded from."
- - 'This option *is not* the equivalent of passing the C(-f) flag to I(apt-get) on the command line'
+ - 'Corresponds to the C(--force-yes) to C(apt-get) and implies O(allow_unauthenticated=yes) and O(allow_downgrade=yes).'
+ - "This option will disable checking both the packages' signatures and the certificates of the web servers they are downloaded from."
+ - 'This option *is not* the equivalent of passing the C(-f) flag to C(apt-get) on the command line.'
- '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.**
Please also see C(man apt-get) for more information.'
type: bool
@@ -84,7 +84,7 @@ options:
clean:
description:
- Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
- the lock file from /var/cache/apt/archives/ and /var/cache/apt/archives/partial/.
+ the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
- Can be run as part of the package installation (clean runs before install) or as a separate step.
type: bool
default: 'no'
@@ -92,7 +92,7 @@ options:
allow_unauthenticated:
description:
- Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
- - 'O(allow_unauthenticated) is only supported with O(state): V(install)/V(present)'
+ - 'O(allow_unauthenticated) is only supported with O(state): V(install)/V(present).'
aliases: [ allow-unauthenticated ]
type: bool
default: 'no'
@@ -110,7 +110,7 @@ options:
version_added: "2.12"
allow_change_held_packages:
description:
- - Allows changing the version of a package which is on the apt hold list
+ - Allows changing the version of a package which is on the apt hold list.
type: bool
default: 'no'
version_added: '2.13'
@@ -127,14 +127,14 @@ options:
type: str
dpkg_options:
description:
- - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
- - Options should be supplied as comma separated list
+ - Add C(dpkg) options to C(apt) command. Defaults to C(-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold").
+ - Options should be supplied as comma separated list.
default: force-confdef,force-confold
type: str
deb:
description:
- Path to a .deb package on the remote machine.
- - If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
+ - If C(://) in the path, ansible will attempt to download deb before installing. (Version added 2.1)
- Requires the C(xz-utils) package to extract the control file of the deb package to install.
type: path
required: false
@@ -142,7 +142,8 @@ options:
autoremove:
description:
- If V(true), remove unused dependency packages for all module states except V(build-dep). It can also be used as the only option.
- - Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information.
+ - Previous to version 2.4, O(autoclean) was also an alias for O(autoremove), now it is its own separate command.
+ See documentation for further information.
type: bool
default: 'no'
version_added: "2.1"
@@ -154,10 +155,10 @@ options:
version_added: "2.4"
policy_rc_d:
description:
- - Force the exit code of /usr/sbin/policy-rc.d.
- - For example, if I(policy_rc_d=101) the installed package will not trigger a service start.
- - If /usr/sbin/policy-rc.d already exists, it is backed up and restored after the package installation.
- - If V(null), the /usr/sbin/policy-rc.d isn't created/changed.
+ - Force the exit code of C(/usr/sbin/policy-rc.d).
+ - For example, if O(policy_rc_d=101) the installed package will not trigger a service start.
+ - If C(/usr/sbin/policy-rc.d) already exists, it is backed up and restored after the package installation.
+ - If V(null), the C(/usr/sbin/policy-rc.d) is not created/changed.
type: int
default: null
version_added: "2.8"
@@ -178,7 +179,7 @@ options:
version_added: "2.11"
force_apt_get:
description:
- - Force usage of apt-get instead of aptitude
+ - Force usage of apt-get instead of aptitude.
type: bool
default: 'no'
version_added: "2.4"
@@ -204,22 +205,22 @@ attributes:
platforms: debian
notes:
- Three of the upgrade modes (V(full), V(safe) and its alias V(true)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back.
- - In most cases, packages installed with apt will start newly installed services by default. Most distributions have mechanisms to avoid this.
+ - In most cases, packages installed with I(apt) will start newly installed services by default. Most distributions have mechanisms to avoid this.
For example when installing Postgresql-9.5 in Debian 9, creating an executable shell script (/usr/sbin/policy-rc.d) that throws
- a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or its execute permission afterward.
- - The apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier
+ a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or its execute permission afterward.
+ - The C(apt-get) commandline supports implicit regex matches here but we do not because it can let typos through easier
(If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user.
- Since we don't have warnings and prompts before installing, we disallow this.Use an explicit fnmatch pattern if you want wildcarding)
+ Since there are no warnings and prompts before installing, we disallow this. Use an explicit fnmatch pattern if you want wildcarding).
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
- When O(default_release) is used, an implicit priority of 990 is used. This is the same behavior as C(apt-get -t).
- When an exact version is specified, an implicit priority of 1001 is used.
- - If the interpreter can't import ``python-apt``/``python3-apt`` the module will check for it in system-owned interpreters as well.
+ - If the interpreter can't import C(python-apt)/C(python3-apt) the module will check for it in system-owned interpreters as well.
If the dependency can't be found, the module will attempt to install it.
If the dependency is found or installed, the module will be respawned under the correct interpreter.
'''
EXAMPLES = '''
-- name: Install apache httpd (state=present is optional)
+- name: Install apache httpd (state=present is optional)
ansible.builtin.apt:
name: apache2
state: present
@@ -364,14 +365,15 @@ import datetime
import fnmatch
import locale as locale_module
import os
-import random
import re
+import secrets
import shutil
import sys
import tempfile
import time
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.file import S_IRWXU_RXG_RXO
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.common.text.converters import to_native, to_text
@@ -379,8 +381,8 @@ from ansible.module_utils.six import string_types
from ansible.module_utils.urls import fetch_file
DPKG_OPTIONS = 'force-confdef,force-confold'
-APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
-APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
+APT_GET_ZERO = "\n0 upgraded, 0 newly installed, 0 to remove"
+APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed, 0 to remove"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
APT_MARK_INVALID_OP = 'Invalid operation'
@@ -448,7 +450,7 @@ class PolicyRcD(object):
with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d:
policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d'])
- os.chmod('/usr/sbin/policy-rc.d', 0o0755)
+ os.chmod('/usr/sbin/policy-rc.d', S_IRWXU_RXG_RXO)
except Exception:
self.m.fail_json(msg="Failed to create or chmod /usr/sbin/policy-rc.d")
@@ -505,7 +507,7 @@ def package_best_match(pkgname, version_cmp, version, release, cache):
policy.create_pin('Release', pkgname, release, 990)
if version_cmp == "=":
# Installing a specific version from command line overrides all pinning
- # We don't mimmic this exactly, but instead set a priority which is higher than all APT built-in pin priorities.
+ # We don't mimic this exactly, but instead set a priority which is higher than all APT built-in pin priorities.
policy.create_pin('Version', pkgname, version, 1001)
pkg = cache[pkgname]
pkgver = policy.get_candidate_ver(pkg)
@@ -885,6 +887,11 @@ def install_deb(
except Exception as e:
m.fail_json(msg="Unable to install package: %s" % to_native(e))
+ # Install 'Recommends' of this deb file
+ if install_recommends:
+ pkg_recommends = get_field_of_deb(m, deb_file, "Recommends")
+ deps_to_install.extend([pkg_name.strip() for pkg_name in pkg_recommends.split()])
+
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
@@ -1245,9 +1252,19 @@ def main():
LC_ALL=locale,
LC_MESSAGES=locale,
LC_CTYPE=locale,
+ LANGUAGE=locale,
)
module.run_command_environ_update = APT_ENV_VARS
+ global APTITUDE_CMD
+ APTITUDE_CMD = module.get_bin_path("aptitude", False)
+ global APT_GET_CMD
+ APT_GET_CMD = module.get_bin_path("apt-get")
+
+ p = module.params
+ install_recommends = p['install_recommends']
+ dpkg_options = expand_dpkg_options(p['dpkg_options'])
+
if not HAS_PYTHON_APT:
# This interpreter can't see the apt Python library- we'll do the following to try and fix that:
# 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
@@ -1286,10 +1303,18 @@ def main():
module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name)
- module.run_command(['apt-get', 'update'], check_rc=True)
+ module.run_command([APT_GET_CMD, 'update'], check_rc=True)
# try to install the apt Python binding
- module.run_command(['apt-get', 'install', '--no-install-recommends', apt_pkg_name, '-y', '-q'], check_rc=True)
+ apt_pkg_cmd = [APT_GET_CMD, 'install', apt_pkg_name, '-y', '-q', dpkg_options]
+
+ if install_recommends is False:
+ apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=no"])
+ elif install_recommends is True:
+ apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=yes"])
+ # install_recommends is None uses the OS default
+
+ module.run_command(apt_pkg_cmd, check_rc=True)
# try again to find the bindings in common places
interpreter = probe_interpreters_for_module(interpreters, 'apt')
@@ -1303,18 +1328,11 @@ def main():
# we've done all we can do; just tell the user it's busted and get out
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
- global APTITUDE_CMD
- APTITUDE_CMD = module.get_bin_path("aptitude", False)
- global APT_GET_CMD
- APT_GET_CMD = module.get_bin_path("apt-get")
-
- p = module.params
-
if p['clean'] is True:
aptclean_stdout, aptclean_stderr, aptclean_diff = aptclean(module)
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
- if not p['package'] and not p['upgrade'] and not p['deb']:
+ if not p['package'] and p['upgrade'] == 'no' and not p['deb']:
module.exit_json(
changed=True,
msg=aptclean_stdout,
@@ -1333,11 +1351,9 @@ def main():
updated_cache = False
updated_cache_time = 0
- install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
allow_downgrade = p['allow_downgrade']
allow_change_held_packages = p['allow_change_held_packages']
- dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
fail_on_autoremove = p['fail_on_autoremove']
autoclean = p['autoclean']
@@ -1372,23 +1388,32 @@ def main():
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
- randomize = random.randint(0, 1000) / 1000.0
+ randomize = secrets.randbelow(1000) / 1000.0
for retry in range(update_cache_retries):
try:
if not module.check_mode:
cache.update()
break
- except apt.cache.FetchFailedException as e:
- err = to_native(e)
+ except apt.cache.FetchFailedException as fetch_failed_exc:
+ err = fetch_failed_exc
+ module.warn(
+ f"Failed to update cache after {retry + 1} retries due "
+ f"to {to_native(fetch_failed_exc)}, retrying"
+ )
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
+ module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to refresh the cache again")
else:
- module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+ msg = (
+ f"Failed to update apt cache after {update_cache_retries} retries: "
+ f"{err if err else 'unknown reason'}"
+ )
+ module.fail_json(msg=msg)
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
diff --git a/lib/ansible/modules/apt_key.py b/lib/ansible/modules/apt_key.py
index 669bad20c6f..ec86e829baa 100644
--- a/lib/ansible/modules/apt_key.py
+++ b/lib/ansible/modules/apt_key.py
@@ -26,13 +26,13 @@ attributes:
platform:
platforms: debian
notes:
- - The apt-key command used by this module has been deprecated. See the L(Debian wiki,https://wiki.debian.org/DebianRepository/UseThirdParty) for details.
- This module is kept for backwards compatibility for systems that still use apt-key as the main way to manage apt repository keys.
+ - The C(apt-key) command used by this module has been deprecated. See the L(Debian wiki,https://wiki.debian.org/DebianRepository/UseThirdParty) for details.
+ This module is kept for backwards compatibility for systems that still use C(apt-key) as the main way to manage apt repository keys.
- As a sanity check, downloaded key id must match the one specified.
- "Use full fingerprint (40 characters) key ids to avoid key collisions.
To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)."
- - If you specify both the key id and the URL with O(state=present), the task can verify or add the key as needed.
- - Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's update_cache option).
+ - If you specify both the key O(id) and the O(url) with O(state=present), the task can verify or add the key as needed.
+ - Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's C(update_cache) option).
requirements:
- gpg
seealso:
@@ -42,7 +42,7 @@ options:
description:
- The identifier of the key.
- Including this allows check mode to correctly report the changed state.
- - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
+ - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
- This parameter is required when O(state) is set to V(absent).
type: str
data:
@@ -188,7 +188,7 @@ def lang_env(module):
if not hasattr(lang_env, 'result'):
locale = get_best_parsable_locale(module)
- lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale)
return lang_env.result
diff --git a/lib/ansible/modules/apt_repository.py b/lib/ansible/modules/apt_repository.py
index bea93340c6f..3fb027c0c32 100644
--- a/lib/ansible/modules/apt_repository.py
+++ b/lib/ansible/modules/apt_repository.py
@@ -41,13 +41,13 @@ options:
default: "present"
mode:
description:
- - The octal mode for newly created files in sources.list.d.
+ - The octal mode for newly created files in C(sources.list.d).
- Default is what system uses (probably 0644).
type: raw
version_added: "1.6"
update_cache:
description:
- - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
+ - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
type: bool
default: "yes"
aliases: [ update-cache ]
@@ -72,9 +72,9 @@ options:
version_added: '1.8'
filename:
description:
- - Sets the name of the source list file in sources.list.d.
+ - Sets the name of the source list file in C(sources.list.d).
Defaults to a file name based on the repository source url.
- The .list extension will be automatically added.
+ The C(.list) extension will be automatically added.
type: str
version_added: '2.1'
codename:
@@ -90,8 +90,8 @@ options:
Without this library, the module does not work.
- Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3.
- Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not
- the system Python, set O(install_python_apt=false) and ensure that the Python apt library
- for your Python version is installed some other way.
+ the system Python, set O(install_python_apt=false) and ensure that the Python apt library
+ for your Python version is installed some other way.
type: bool
default: true
author:
@@ -174,12 +174,13 @@ import glob
import json
import os
import re
+import secrets
import sys
import tempfile
-import random
import time
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.file import S_IRWU_RG_RO as DEFAULT_SOURCES_PERM
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.urls import fetch_url
@@ -200,7 +201,6 @@ except ImportError:
HAVE_PYTHON_APT = False
APT_KEY_DIRS = ['/etc/apt/keyrings', '/etc/apt/trusted.gpg.d', '/usr/share/keyrings']
-DEFAULT_SOURCES_PERM = 0o0644
VALID_SOURCE_TYPES = ('deb', 'deb-src')
@@ -465,6 +465,7 @@ class UbuntuSourcesList(SourcesList):
# prefer api.launchpad.net over launchpad.net/api
# see: https://github.com/ansible/ansible/pull/81978#issuecomment-1767062178
LP_API = 'https://api.launchpad.net/1.0/~%s/+archive/%s'
+ PPA_URI = 'https://ppa.launchpadcontent.net'
def __init__(self, module):
self.module = module
@@ -496,14 +497,14 @@ class UbuntuSourcesList(SourcesList):
except IndexError:
ppa_name = 'ppa'
- line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename)
+ line = 'deb %s/%s/%s/ubuntu %s main' % (self.PPA_URI, ppa_owner, ppa_name, self.codename)
return line, ppa_owner, ppa_name
def _key_already_exists(self, key_fingerprint):
if self.apt_key_bin:
locale = get_best_parsable_locale(self.module)
- APT_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale)
+ APT_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale)
self.module.run_command_environ_update = APT_ENV
rc, out, err = self.module.run_command([self.apt_key_bin, 'export', key_fingerprint], check_rc=True)
found = bool(not err or 'nothing exported' not in err)
@@ -737,29 +738,38 @@ def main():
if changed and not module.check_mode:
try:
+ err = ''
sourceslist.save()
if update_cache:
- err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
- randomize = random.randint(0, 1000) / 1000.0
+ randomize = secrets.randbelow(1000) / 1000.0
+ cache = apt.Cache()
for retry in range(update_cache_retries):
try:
- cache = apt.Cache()
cache.update()
break
- except apt.cache.FetchFailedException as e:
- err = to_native(e)
+ except apt.cache.FetchFailedException as fetch_failed_exc:
+ err = fetch_failed_exc
+ module.warn(
+ f"Failed to update cache after {retry + 1} due "
+ f"to {to_native(fetch_failed_exc)} retry, retrying"
+ )
# Use exponential backoff with a max fail count, plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
+ module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to update the cache again")
else:
revert_sources_list(sources_before, sources_after, sourceslist_before)
- module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+ msg = (
+ f"Failed to update apt cache after {update_cache_retries} retries: "
+ f"{err if err else 'unknown reason'}"
+ )
+ module.fail_json(msg=msg)
except (OSError, IOError) as ex:
revert_sources_list(sources_before, sources_after, sourceslist_before)
diff --git a/lib/ansible/modules/assemble.py b/lib/ansible/modules/assemble.py
index 77c33bef104..bd8ddf6cfff 100644
--- a/lib/ansible/modules/assemble.py
+++ b/lib/ansible/modules/assemble.py
@@ -61,14 +61,14 @@ options:
type: str
ignore_hidden:
description:
- - A boolean that controls if files that start with a '.' will be included or not.
+ - A boolean that controls if files that start with a C(.) will be included or not.
type: bool
default: no
version_added: '2.0'
validate:
description:
- The validation command to run before copying into place.
- - The path to the file to validate is passed in via '%s' which must be present as in the sshd example below.
+ - The path to the file to validate is passed in by C(%s) which must be present as in the sshd example below.
- The command is passed securely so shell features like expansion and pipes won't work.
type: str
version_added: '2.0'
@@ -205,6 +205,11 @@ def main():
regexp=dict(type='str'),
ignore_hidden=dict(type='bool', default=False),
validate=dict(type='str'),
+
+ # Options that are for the action plugin, but ignored by the module itself.
+ # We have them here so that the tests pass without ignores, which
+ # reduces the likelihood of further bugs added.
+ decrypt=dict(type='bool', default=True),
),
add_file_common_args=True,
)
diff --git a/lib/ansible/modules/assert.py b/lib/ansible/modules/assert.py
index 420044203cf..af758a53c51 100644
--- a/lib/ansible/modules/assert.py
+++ b/lib/ansible/modules/assert.py
@@ -17,14 +17,14 @@ version_added: "1.5"
options:
that:
description:
- - A list of string expressions of the same form that can be passed to the 'when' statement.
+ - A list of string expressions of the same form that can be passed to the C(when) statement.
type: list
elements: str
required: true
fail_msg:
description:
- The customized message used for a failing assertion.
- - This argument was called 'msg' before Ansible 2.7, now it is renamed to 'fail_msg' with alias 'msg'.
+ - This argument was called O(msg) before Ansible 2.7, now it is renamed to O(fail_msg) with alias O(msg).
type: str
aliases: [ msg ]
version_added: "2.7"
@@ -85,7 +85,7 @@ EXAMPLES = r'''
- >
"reject" not in some_command_result.stderr
-- name: After version 2.7 both 'msg' and 'fail_msg' can customize failing assertion message
+- name: After version 2.7 both O(msg) and O(fail_msg) can customize failing assertion message
ansible.builtin.assert:
that:
- my_param <= 100
@@ -93,7 +93,7 @@ EXAMPLES = r'''
fail_msg: "'my_param' must be between 0 and 100"
success_msg: "'my_param' is between 0 and 100"
-- name: Please use 'msg' when ansible version is smaller than 2.7
+- name: Please use O(msg) when ansible version is smaller than 2.7
ansible.builtin.assert:
that:
- my_param <= 100
diff --git a/lib/ansible/modules/async_status.py b/lib/ansible/modules/async_status.py
index 9bcc3ef1255..e07143adb55 100644
--- a/lib/ansible/modules/async_status.py
+++ b/lib/ansible/modules/async_status.py
@@ -36,7 +36,8 @@ attributes:
async:
support: none
check_mode:
- support: none
+ support: full
+ version_added: '2.17'
diff_mode:
support: none
bypass_host_loop:
@@ -116,12 +117,15 @@ from ansible.module_utils.common.text.converters import to_native
def main():
- module = AnsibleModule(argument_spec=dict(
- jid=dict(type='str', required=True),
- mode=dict(type='str', default='status', choices=['cleanup', 'status']),
- # passed in from the async_status action plugin
- _async_dir=dict(type='path', required=True),
- ))
+ module = AnsibleModule(
+ argument_spec=dict(
+ jid=dict(type="str", required=True),
+ mode=dict(type="str", default="status", choices=["cleanup", "status"]),
+ # passed in from the async_status action plugin
+ _async_dir=dict(type="path", required=True),
+ ),
+ supports_check_mode=True,
+ )
mode = module.params['mode']
jid = module.params['jid']
diff --git a/lib/ansible/modules/blockinfile.py b/lib/ansible/modules/blockinfile.py
index 4da042bbd4a..602f0f0684d 100644
--- a/lib/ansible/modules/blockinfile.py
+++ b/lib/ansible/modules/blockinfile.py
@@ -33,7 +33,7 @@ options:
marker:
description:
- The marker line template.
- - C({mark}) will be replaced with the values in O(marker_begin) (default="BEGIN") and O(marker_end) (default="END").
+ - C({mark}) will be replaced with the values in O(marker_begin) (default=C(BEGIN)) and O(marker_end) (default=C(END)).
- Using a custom marker without the C({mark}) variable may result in the block being repeatedly inserted on subsequent playbook runs.
- Multi-line markers are not supported and will result in the block being repeatedly inserted on subsequent playbook runs.
- A newline is automatically appended by the module to O(marker_begin) and O(marker_end).
@@ -50,12 +50,10 @@ options:
description:
- If specified and no begin/ending O(marker) lines are found, the block will be inserted after the last match of specified regular expression.
- A special value is available; V(EOF) for inserting the block at the end of the file.
- - If specified regular expression has no matches, V(EOF) will be used instead.
+ - If specified regular expression has no matches or no value is passed, V(EOF) will be used instead.
- The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
This behaviour was added in ansible-core 2.14.
type: str
- choices: [ EOF, '*regex*' ]
- default: EOF
insertbefore:
description:
- If specified and no begin/ending O(marker) lines are found, the block will be inserted before the last match of specified regular expression.
@@ -64,7 +62,6 @@ options:
- The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
This behaviour was added in ansible-core 2.14.
type: str
- choices: [ BOF, '*regex*' ]
create:
description:
- Create a new file if it does not exist.
@@ -106,7 +103,7 @@ options:
default: no
version_added: '2.16'
notes:
- - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
+ - When using C(with_*) loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
- As of Ansible 2.3, the O(dest) option has been changed to O(path) as default, but O(dest) still works as well.
- Option O(ignore:follow) has been removed in Ansible 2.5, because this module modifies the contents of the file
so O(ignore:follow=no) does not make sense.
@@ -268,7 +265,7 @@ def main():
module.fail_json(rc=257,
msg='Path %s does not exist !' % path)
destpath = os.path.dirname(path)
- if not os.path.exists(destpath) and not module.check_mode:
+ if destpath and not os.path.exists(destpath) and not module.check_mode:
try:
os.makedirs(destpath)
except OSError as e:
diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py
index 4a3b8e133a0..42d9beeff4b 100644
--- a/lib/ansible/modules/command.py
+++ b/lib/ansible/modules/command.py
@@ -50,7 +50,7 @@ options:
free_form:
description:
- The command module takes a free form string as a command to run.
- - There is no actual parameter named 'free form'.
+ - There is no actual parameter named C(free_form).
cmd:
type: str
description:
diff --git a/lib/ansible/modules/copy.py b/lib/ansible/modules/copy.py
index 67558f076b6..bb9ea0493d3 100644
--- a/lib/ansible/modules/copy.py
+++ b/lib/ansible/modules/copy.py
@@ -28,8 +28,8 @@ options:
- Local path to a file to copy to the remote server.
- This can be absolute or relative.
- If path is a directory, it is copied recursively. In this case, if path ends
- with "/", only inside contents of that directory are copied to destination.
- Otherwise, if it does not end with "/", the directory itself with all contents
+ with C(/), only inside contents of that directory are copied to destination.
+ Otherwise, if it does not end with C(/), the directory itself with all contents
is copied. This behavior is similar to the C(rsync) command line tool.
type: path
content:
@@ -44,7 +44,7 @@ options:
description:
- Remote absolute path where the file should be copied to.
- If O(src) is a directory, this must be a directory too.
- - If O(dest) is a non-existent path and if either O(dest) ends with "/" or O(src) is a directory, O(dest) is created.
+ - If O(dest) is a non-existent path and if either O(dest) ends with C(/) or O(src) is a directory, O(dest) is created.
- If O(dest) is a relative path, the starting directory is determined by the remote host.
- If O(src) and O(dest) are files, the parent directory of O(dest) is not created and the task fails if it does not already exist.
type: path
@@ -92,7 +92,7 @@ options:
description:
- Influence whether O(src) needs to be transferred or already is present remotely.
- If V(false), it will search for O(src) on the controller node.
- - If V(true) it will search for O(src) on the managed (remote) node.
+ - If V(true), it will search for O(src) on the managed (remote) node.
- O(remote_src) supports recursive copying as of version 2.8.
- O(remote_src) only works with O(mode=preserve) as of version 2.6.
- Auto-decryption of files does not work when O(remote_src=yes).
@@ -109,7 +109,6 @@ options:
description:
- This flag indicates that filesystem links in the source tree, if they exist, should be followed.
type: bool
- default: yes
version_added: '2.4'
checksum:
description:
@@ -290,7 +289,6 @@ import filecmp
import grp
import os
import os.path
-import platform
import pwd
import shutil
import stat
@@ -299,12 +297,6 @@ import traceback
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.process import get_bin_path
-from ansible.module_utils.common.locale import get_best_parsable_locale
-
-
-# The AnsibleModule object
-module = None
class AnsibleModuleError(Exception):
@@ -312,21 +304,6 @@ class AnsibleModuleError(Exception):
self.results = results
-# Once we get run_command moved into common, we can move this into a common/files module. We can't
-# until then because of the module.run_command() method. We may need to move it into
-# basic::AnsibleModule() until then but if so, make it a private function so that we don't have to
-# keep it for backwards compatibility later.
-def clear_facls(path):
- setfacl = get_bin_path('setfacl')
- # FIXME "setfacl -b" is available on Linux and FreeBSD. There is "setfacl -D e" on z/OS. Others?
- acl_command = [setfacl, '-b', path]
- b_acl_command = [to_bytes(x) for x in acl_command]
- locale = get_best_parsable_locale(module)
- rc, out, err = module.run_command(b_acl_command, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale))
- if rc != 0:
- raise RuntimeError('Error running "{0}": stdout: "{1}"; stderr: "{2}"'.format(' '.join(b_acl_command), out, err))
-
-
def split_pre_existing_dir(dirname):
'''
Return the first pre-existing directory and a list of the new directories that will be created.
@@ -527,8 +504,6 @@ def copy_common_dirs(src, dest, module):
def main():
- global module
-
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
@@ -540,7 +515,7 @@ def main():
force=dict(type='bool', default=True),
validate=dict(type='str'),
directory_mode=dict(type='raw'),
- remote_src=dict(type='bool'),
+ remote_src=dict(type='bool', default=False),
local_follow=dict(type='bool'),
checksum=dict(type='str'),
follow=dict(type='bool', default=False),
@@ -703,54 +678,8 @@ def main():
else:
raise
- # might be needed below
- if hasattr(os, 'listxattr'):
- try:
- src_has_acls = 'system.posix_acl_access' in os.listxattr(src)
- except Exception as e:
- # assume unwanted ACLs by default
- src_has_acls = True
-
# at this point we should always have tmp file
- module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
-
- if hasattr(os, 'listxattr') and platform.system() == 'Linux' and not remote_src:
- # atomic_move used above to copy src into dest might, in some cases,
- # use shutil.copy2 which in turn uses shutil.copystat.
- # Since Python 3.3, shutil.copystat copies file extended attributes:
- # https://docs.python.org/3/library/shutil.html#shutil.copystat
- # os.listxattr (along with others) was added to handle the operation.
-
- # This means that on Python 3 we are copying the extended attributes which includes
- # the ACLs on some systems - further limited to Linux as the documentation above claims
- # that the extended attributes are copied only on Linux. Also, os.listxattr is only
- # available on Linux.
-
- # If not remote_src, then the file was copied from the controller. In that
- # case, any filesystem ACLs are artifacts of the copy rather than preservation
- # of existing attributes. Get rid of them:
-
- if src_has_acls:
- # FIXME If dest has any default ACLs, there are not applied to src now because
- # they were overridden by copystat. Should/can we do anything about this?
- # 'system.posix_acl_default' in os.listxattr(os.path.dirname(b_dest))
-
- try:
- clear_facls(dest)
- except ValueError as e:
- if 'setfacl' in to_native(e):
- # No setfacl so we're okay. The controller couldn't have set a facl
- # without the setfacl command
- pass
- else:
- raise
- except RuntimeError as e:
- # setfacl failed.
- if 'Operation not supported' in to_native(e):
- # The file system does not support ACLs.
- pass
- else:
- raise
+ module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'], keep_dest_attrs=not remote_src)
except (IOError, OSError):
module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
diff --git a/lib/ansible/modules/cron.py b/lib/ansible/modules/cron.py
index 0aa497968be..dcd350159cc 100644
--- a/lib/ansible/modules/cron.py
+++ b/lib/ansible/modules/cron.py
@@ -18,19 +18,19 @@ description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
- crontab entry C("#Ansible: ") corresponding to the "name" passed to the module,
- which is used by future ansible/module calls to find/check the state. The "name"
- parameter should be unique, and changing the "name" value will result in a new cron
+ crontab entry C("#Ansible: ") corresponding to the O(name) passed to the module,
+ which is used by future ansible/module calls to find/check the state. The O(name)
+ parameter should be unique, and changing the O(name) value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
- needs to find/check the state, it uses the "name" parameter to find the environment
+ needs to find/check the state, it uses the O(name) parameter to find the environment
variable definition line.
- - When using symbols such as %, they must be properly escaped.
+ - When using symbols such as C(%), they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- - Description of a crontab entry or, if env is set, the name of environment variable.
+ - Description of a crontab entry or, if O(env) is set, the name of environment variable.
- This parameter is always required as of ansible-core 2.12.
type: str
required: yes
@@ -41,7 +41,7 @@ options:
type: str
job:
description:
- - The command to execute or, if env is set, the value of environment variable.
+ - The command to execute or, if O(env) is set, the value of environment variable.
- The command should not contain line breaks.
- Required if O(state=present).
type: str
@@ -58,10 +58,10 @@ options:
The assumption is that this file is exclusively managed by the module,
do not use if the file contains multiple entries, NEVER use for /etc/crontab.
- If this is a relative path, it is interpreted with respect to C(/etc/cron.d).
- - Many linux distros expect (and some require) the filename portion to consist solely
+ - Many Linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- - Using this parameter requires you to specify the O(user) as well, unless O(state) is not V(present).
- - Either this parameter or O(name) is required
+ - Using this parameter requires you to specify the O(user) as well, unless O(state=absent).
+ - Either this parameter or O(name) is required.
type: path
backup:
description:
@@ -131,6 +131,9 @@ options:
version_added: "2.1"
requirements:
- cron (any 'vixie cron' conformant variant, like cronie)
+notes:
+ - If you are experiencing permissions issues with cron and MacOS,
+ you should see the official MacOS documentation for further information.
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
@@ -214,6 +217,7 @@ import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.file import S_IRWU_RWG_RWO
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.six.moves import shlex_quote
@@ -307,7 +311,7 @@ class CronTab(object):
fileh = open(self.b_cron_file, 'wb')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
- os.chmod(path, int('0644', 8))
+ os.chmod(path, S_IRWU_RWG_RWO)
fileh = os.fdopen(filed, 'wb')
fileh.write(to_bytes(self.render()))
@@ -324,7 +328,7 @@ class CronTab(object):
os.unlink(path)
if rc != 0:
- self.module.fail_json(msg=err)
+ self.module.fail_json(msg=f"Failed to install new cronfile: {path}", stderr=err, stdout=out, rc=rc)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
diff --git a/lib/ansible/modules/deb822_repository.py b/lib/ansible/modules/deb822_repository.py
index 9334c451bd3..0fa33c73d70 100644
--- a/lib/ansible/modules/deb822_repository.py
+++ b/lib/ansible/modules/deb822_repository.py
@@ -8,28 +8,28 @@ DOCUMENTATION = '''
author: 'Ansible Core Team (@ansible)'
short_description: 'Add and remove deb822 formatted repositories'
description:
-- 'Add and remove deb822 formatted repositories in Debian based distributions'
+- 'Add and remove deb822 formatted repositories in Debian based distributions.'
module: deb822_repository
notes:
-- This module will not automatically update caches, call the apt module based
+- This module will not automatically update caches, call the M(ansible.builtin.apt) module based
on the changed state.
options:
allow_downgrade_to_insecure:
description:
- Allow downgrading a package that was previously authenticated but
- is no longer authenticated
+ is no longer authenticated.
type: bool
allow_insecure:
description:
- - Allow insecure repositories
+ - Allow insecure repositories.
type: bool
allow_weak:
description:
- - Allow repositories signed with a key using a weak digest algorithm
+ - Allow repositories signed with a key using a weak digest algorithm.
type: bool
architectures:
description:
- - 'Architectures to search within repository'
+ - Architectures to search within repository.
type: list
elements: str
by_hash:
@@ -51,7 +51,7 @@ options:
components:
description:
- Components specify different sections of one distribution version
- present in a Suite.
+ present in a C(Suite).
type: list
elements: str
date_max_future:
@@ -64,8 +64,8 @@ options:
type: bool
inrelease_path:
description:
- - Determines the path to the InRelease file, relative to the normal
- position of an InRelease file.
+ - Determines the path to the C(InRelease) file, relative to the normal
+ position of an C(InRelease) file.
type: str
languages:
description:
@@ -81,8 +81,8 @@ options:
type: str
pdiffs:
description:
- - Controls if APT should try to use PDiffs to update old indexes
- instead of downloading the new indexes entirely
+ - Controls if APT should try to use C(PDiffs) to update old indexes
+ instead of downloading the new indexes entirely.
type: bool
signed_by:
description:
@@ -97,21 +97,20 @@ options:
Suite can specify an exact path in relation to the URI(s) provided,
in which case the Components: must be omitted and suite must end
with a slash (C(/)). Alternatively, it may take the form of a
- distribution version (e.g. a version codename like disco or artful).
+ distribution version (for example a version codename like C(disco) or C(artful)).
If the suite does not specify a path, at least one component must
be present.
type: list
elements: str
targets:
description:
- - Defines which download targets apt will try to acquire from this
- source.
+ - Defines which download targets apt will try to acquire from this source.
type: list
elements: str
trusted:
description:
- Decides if a source is considered trusted or if warnings should be
- raised before e.g. packages are installed from this source.
+ raised before, for example packages are installed from this source.
type: bool
types:
choices:
@@ -123,7 +122,7 @@ options:
elements: str
description:
- Which types of packages to look for from a given source; either
- binary V(deb) or source code V(deb-src)
+ binary V(deb) or source code V(deb-src).
uris:
description:
- The URIs must specify the base of the Debian distribution archive,
@@ -132,7 +131,7 @@ options:
elements: str
mode:
description:
- - The octal mode for newly created files in sources.list.d.
+ - The octal mode for newly created files in C(sources.list.d).
type: raw
default: '0644'
state:
@@ -236,6 +235,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.common.file import S_IRWXU_RXG_RXO, S_IRWU_RG_RO
from ansible.module_utils.common.text.converters import to_bytes
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import raise_from # type: ignore[attr-defined]
@@ -259,7 +259,7 @@ def ensure_keyrings_dir(module):
changed = False
if not os.path.isdir(KEYRINGS_DIR):
if not module.check_mode:
- os.mkdir(KEYRINGS_DIR, 0o755)
+ os.mkdir(KEYRINGS_DIR, S_IRWXU_RXG_RXO)
changed |= True
changed |= module.set_fs_attributes_if_different(
@@ -353,7 +353,7 @@ def write_signed_by_key(module, v, slug):
module.atomic_move(tmpfile, filename)
changed |= True
- changed |= module.set_mode_if_different(filename, 0o0644, False)
+ changed |= module.set_mode_if_different(filename, S_IRWU_RG_RO, False)
return changed, filename, None
diff --git a/lib/ansible/modules/debconf.py b/lib/ansible/modules/debconf.py
index 994bcda1638..7a32c4b0e86 100644
--- a/lib/ansible/modules/debconf.py
+++ b/lib/ansible/modules/debconf.py
@@ -29,24 +29,24 @@ notes:
- Several questions have to be answered (depending on the package).
Use 'debconf-show ' on any Debian or derivative with the package
installed to see questions/settings available.
- - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
+ - Some distros will always record tasks involving the setting of passwords as changed. This is due to C(debconf-get-selections) masking passwords.
- It is highly recommended to add C(no_log=True) to the task while handling sensitive information using this module.
- - The debconf module does not reconfigure packages, it just updates the debconf database.
+ - The M(ansible.builtin.debconf) module does not reconfigure packages, it just updates the debconf database.
An additional step is needed (typically with C(notify) if debconf makes a change)
to reconfigure the package and apply the changes.
- debconf is extensively used for pre-seeding configuration prior to installation
+ C(debconf) is extensively used for pre-seeding configuration prior to installation
rather than modifying configurations.
- So, while dpkg-reconfigure does use debconf data, it is not always authoritative
+ So, while C(dpkg-reconfigure) does use debconf data, it is not always authoritative
and you may need to check how your package is handled.
- - Also note dpkg-reconfigure is a 3-phase process. It invokes the
+ - Also note C(dpkg-reconfigure) is a 3-phase process. It invokes the
control scripts from the C(/var/lib/dpkg/info) directory with the
C(.prerm reconfigure ),
C(.config reconfigure ) and C(.postinst control ) arguments.
- The main issue is that the C(.config reconfigure) step for many packages
will first reset the debconf database (overriding changes made by this module) by
checking the on-disk configuration. If this is the case for your package then
- dpkg-reconfigure will effectively ignore changes made by debconf.
- - However as dpkg-reconfigure only executes the C(.config) step if the file
+ C(dpkg-reconfigure) will effectively ignore changes made by debconf.
+ - However as C(dpkg-reconfigure) only executes the C(.config) step if the file
exists, it is possible to rename it to C(/var/lib/dpkg/info/.config.ignore)
before executing C(dpkg-reconfigure -f noninteractive ) and then restore it.
This seems to be compliant with Debian policy for the .config file.
@@ -70,16 +70,18 @@ options:
- The type of the value supplied.
- It is highly recommended to add C(no_log=True) to task while specifying O(vtype=password).
- V(seen) was added in Ansible 2.2.
+ - After Ansible 2.17, user can specify C(value) as a list, if C(vtype) is set as V(multiselect).
type: str
choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ]
value:
description:
- - Value to set the configuration to.
- type: str
+ - Value to set the configuration to.
+ - After Ansible 2.17, C(value) is of type C(raw).
+ type: raw
aliases: [ answer ]
unseen:
description:
- - Do not set 'seen' flag when pre-seeding.
+ - Do not set C(seen) flag when pre-seeding.
type: bool
default: false
author:
@@ -123,7 +125,7 @@ EXAMPLES = r'''
RETURN = r'''#'''
-from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.common.text.converters import to_text, to_native
from ansible.module_utils.basic import AnsibleModule
@@ -132,21 +134,24 @@ def get_password_value(module, pkg, question, vtype):
cmd = [getsel]
rc, out, err = module.run_command(cmd)
if rc != 0:
- module.fail_json(msg="Failed to get the value '%s' from '%s'" % (question, pkg))
+ module.fail_json(msg=f"Failed to get the value '{question}' from '{pkg}': {err}")
- desired_line = None
for line in out.split("\n"):
- if line.startswith(pkg):
- desired_line = line
- break
-
- if not desired_line:
- module.fail_json(msg="Failed to find the value '%s' from '%s'" % (question, pkg))
-
- (dpkg, dquestion, dvtype, dvalue) = desired_line.split()
- if dquestion == question and dvtype == vtype:
- return dvalue
- return ''
+ if not line.startswith(pkg):
+ continue
+
+ # line is a collection of tab separated values
+ fields = line.split('\t')
+ if len(fields) <= 3:
+ # No password found, return a blank password
+ return ''
+ try:
+ if fields[1] == question and fields[2] == vtype:
+ # If correct question and question type found, return password value
+ return fields[3]
+ except IndexError:
+ # Fail safe
+ return ''
def get_selections(module, pkg):
@@ -171,8 +176,6 @@ def set_selection(module, pkg, question, vtype, value, unseen):
if unseen:
cmd.append('-u')
- if vtype == 'boolean':
- value = value.lower()
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
@@ -184,7 +187,7 @@ def main():
name=dict(type='str', required=True, aliases=['pkg']),
question=dict(type='str', aliases=['selection', 'setting']),
vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
- value=dict(type='str', aliases=['answer']),
+ value=dict(type='raw', aliases=['answer']),
unseen=dict(type='bool', default=False),
),
required_together=(['question', 'vtype', 'value'],),
@@ -207,25 +210,37 @@ def main():
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
+ # ensure we compare booleans supplied to the way debconf sees them (true/false strings)
+ if vtype == 'boolean':
+ value = to_text(value).lower()
+
# if question doesn't exist, value cannot match
if question not in prev:
changed = True
else:
existing = prev[question]
- # ensure we compare booleans supplied to the way debconf sees them (true/false strings)
if vtype == 'boolean':
- value = to_text(value).lower()
existing = to_text(prev[question]).lower()
-
- if vtype == 'password':
+ elif vtype == 'password':
existing = get_password_value(module, pkg, question, vtype)
+ elif vtype == 'multiselect' and isinstance(value, list):
+ try:
+ value = sorted(value)
+ except TypeError as exc:
+ module.fail_json(msg="Invalid value provided for 'multiselect': %s" % to_native(exc))
+ existing = sorted([i.strip() for i in existing.split(",")])
if value != existing:
changed = True
if changed:
if not module.check_mode:
+ if vtype == 'multiselect' and isinstance(value, list):
+ try:
+ value = ", ".join(value)
+ except TypeError as exc:
+ module.fail_json(msg="Invalid value provided for 'multiselect': %s" % to_native(exc))
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
diff --git a/lib/ansible/modules/debug.py b/lib/ansible/modules/debug.py
index cdaf118cd2d..325d2541c2c 100644
--- a/lib/ansible/modules/debug.py
+++ b/lib/ansible/modules/debug.py
@@ -14,7 +14,7 @@ description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook.
-- Useful for debugging together with the 'when:' directive.
+- Useful for debugging together with the C(when:) directive.
- This module is also supported for Windows targets.
version_added: '0.8'
options:
diff --git a/lib/ansible/modules/dnf.py b/lib/ansible/modules/dnf.py
index aa949879155..c9ddbb5ae5e 100644
--- a/lib/ansible/modules/dnf.py
+++ b/lib/ansible/modules/dnf.py
@@ -19,9 +19,15 @@ description:
options:
use_backend:
description:
- - By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
+ - Backend module to use.
default: "auto"
- choices: [ auto, yum, yum4, dnf4, dnf5 ]
+ choices:
+ auto: Automatically select the backend based on the C(ansible_facts.pkg_mgr) fact.
+ yum: Alias for V(auto) (see Notes)
+ dnf: M(ansible.builtin.dnf)
+ yum4: Alias for V(dnf)
+ dnf4: Alias for V(dnf)
+ dnf5: M(ansible.builtin.dnf5)
type: str
version_added: 2.15
name:
@@ -49,14 +55,14 @@ options:
state:
description:
- Whether to install (V(present), V(latest)), or remove (V(absent)) a package.
- - Default is V(None), however in effect the default action is V(present) unless the O(autoremove) option is
- enabled for this module, then V(absent) is inferred.
+ - Default is V(None), however in effect the default action is V(present) unless the O(autoremove=true),
+ then V(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- - I(Repoid) of repositories to enable for the install/update operation.
+ - C(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
type: list
@@ -65,9 +71,9 @@ options:
disablerepo:
description:
- - I(Repoid) of repositories to disable for the install/update operation.
+ - C(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
- When specifying multiple repos, separate them with a ",".
+ When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
@@ -80,7 +86,7 @@ options:
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
- installed. Has an effect only if O(state) is V(present) or V(latest).
+ installed. Has an effect only if O(state=present) or O(state=latest).
- This setting affects packages installed from a repository as well as
"local" packages installed from the filesystem or a URL.
type: bool
@@ -105,13 +111,13 @@ options:
description:
- If V(true), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
- required by any such package. Should be used alone or when O(state) is V(absent)
+ required by any such package. Should be used alone or when O(state=absent).
type: bool
default: "no"
version_added: "2.4"
exclude:
description:
- - Package name(s) to exclude when state=present, or latest. This can be a
+ - Package name(s) to exclude when O(state=present), or latest. This can be a
list or a comma separated string.
version_added: "2.7"
type: list
@@ -120,14 +126,14 @@ options:
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
- without raising an error. Equivalent to passing the --skip-broken option.
+ without raising an error. Equivalent to passing the C(--skip-broken) option.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
- Has an effect only if O(state) is V(present) or V(latest).
+ Has an effect only if O(state=present) or O(state=latest).
type: bool
default: "no"
aliases: [ expire-cache ]
@@ -135,7 +141,7 @@ options:
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- - Has an effect only if O(state) is V(latest)
+ - Has an effect only if O(state=present) or O(state=latest).
default: "no"
type: bool
version_added: "2.7"
@@ -155,7 +161,7 @@ options:
version_added: "2.7"
enable_plugin:
description:
- - I(Plugin) name to enable for the install/update operation.
+ - C(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
version_added: "2.7"
type: list
@@ -163,7 +169,7 @@ options:
default: []
disable_plugin:
description:
- - I(Plugin) name to disable for the install/update operation.
+ - C(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
version_added: "2.7"
type: list
@@ -173,13 +179,14 @@ options:
description:
- Disable the excludes defined in DNF config files.
- If set to V(all), disables all excludes.
- - If set to V(main), disable excludes defined in [main] in dnf.conf.
+ - If set to V(main), disable excludes defined in C([main]) in C(dnf.conf).
- If set to V(repoid), disable excludes defined for given repo id.
version_added: "2.7"
type: str
validate_certs:
description:
- - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to V(false), the SSL certificates will not be validated.
+ - This only applies if using a https url as the source of the rpm. For example, for localinstall.
+ If set to V(false), the SSL certificates will not be validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
@@ -195,7 +202,7 @@ options:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
- Note that setting allow_downgrade=True can make this module
+ Note that setting O(allow_downgrade=true) can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
@@ -238,18 +245,26 @@ options:
version_added: "2.8"
allowerasing:
description:
- - If V(true) it allows erasing of installed packages to resolve dependencies.
+ - If V(true) it allows erasing of installed packages to resolve dependencies.
required: false
type: bool
default: "no"
version_added: "2.10"
nobest:
description:
- - Set best option to False, so that transactions are not limited to best candidates only.
+ - This is the opposite of the O(best) option kept for backwards compatibility.
+ - Since ansible-core 2.17 the default value is set by the operating system distribution.
required: false
type: bool
- default: "no"
version_added: "2.11"
+ best:
+ description:
+ - When set to V(true), either use a package with the highest version available or fail.
+ - When set to V(false), if the latest version cannot be installed go with the lower version.
+ - Default is set by the operating system distribution.
+ required: false
+ type: bool
+ version_added: "2.17"
cacheonly:
description:
- Tells dnf to run entirely from system cache; does not download or update metadata.
@@ -274,15 +289,18 @@ attributes:
platform:
platforms: rhel
notes:
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
+ - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
- Group removal doesn't work if the group was installed with Ansible because
upstream dnf's API doesn't properly mark groups as installed, therefore upon
removal the module is unable to detect that the group is installed
- (https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
+ U(https://bugzilla.redhat.com/show_bug.cgi?id=1620324).
+ - While O(use_backend=yum) and the ability to call the action plugin as
+ M(ansible.builtin.yum) are provided for syntax compatibility, the YUM
+ backend was removed in ansible-core 2.17 because the required libraries are
+ not available for any supported version of Python. If you rely on this
+ functionality, use an older version of Ansible.
requirements:
- - "python >= 2.6"
- - python-dnf
- - for the autoremove option you need dnf >= 2.0.1"
+ - python3-dnf
author:
- Igor Gnatenko (@ignatenkobrain)
- Cristian van Ee (@DJMuggs)
@@ -379,12 +397,10 @@ EXAMPLES = '''
'''
import os
-import re
import sys
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.urls import fetch_file
-from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
@@ -410,11 +426,7 @@ class DnfModule(YumDnf):
self._ensure_dnf()
self.pkg_mgr_name = "dnf"
-
- try:
- self.with_modules = dnf.base.WITH_MODULES
- except AttributeError:
- self.with_modules = False
+ self.with_modules = dnf.base.WITH_MODULES
def _sanitize_dnf_error_msg_install(self, spec, error):
"""
@@ -429,22 +441,6 @@ class DnfModule(YumDnf):
return error
- def _sanitize_dnf_error_msg_remove(self, spec, error):
- """
- For unhandled dnf.exceptions.Error scenarios, there are certain error
- messages we want to ignore in a removal scenario as known benign
- failures. Do that here.
- """
- if (
- 'no package matched' in to_native(error) or
- 'No match for argument:' in to_native(error)
- ):
- return (False, "{0} is not installed".format(spec))
-
- # Return value is tuple of:
- # ("Is this actually a failure?", "Error Message")
- return (True, error)
-
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
@@ -471,94 +467,6 @@ class DnfModule(YumDnf):
return result
- def _split_package_arch(self, packagename):
- # This list was auto generated on a Fedora 28 system with the following one-liner
- # printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
- redhat_rpm_arches = [
- "aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha",
- "alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel",
- "armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon",
- "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el",
- "mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6",
- "noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64",
- "ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries",
- "riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v",
- "sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
- ]
-
- name, delimiter, arch = packagename.rpartition('.')
- if name and arch and arch in redhat_rpm_arches:
- return name, arch
- return packagename, None
-
- def _packagename_dict(self, packagename):
- """
- Return a dictionary of information for a package name string or None
- if the package name doesn't contain at least all NVR elements
- """
-
- if packagename[-4:] == '.rpm':
- packagename = packagename[:-4]
-
- rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)')
- try:
- arch = None
- nevr, arch = self._split_package_arch(packagename)
- if arch:
- packagename = nevr
- rpm_nevr_match = rpm_nevr_re.match(packagename)
- if rpm_nevr_match:
- name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
- if not version or not version.split('.')[0].isdigit():
- return None
- else:
- return None
- except AttributeError as e:
- self.module.fail_json(
- msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)),
- rc=1,
- results=[]
- )
-
- if not epoch:
- epoch = "0"
-
- if ':' in name:
- epoch_name = name.split(":")
-
- epoch = epoch_name[0]
- name = ''.join(epoch_name[1:])
-
- result = {
- 'name': name,
- 'epoch': epoch,
- 'release': release,
- 'version': version,
- }
-
- return result
-
- # Original implementation from yum.rpmUtils.miscutils (GPLv2+)
- # http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py
- def _compare_evr(self, e1, v1, r1, e2, v2, r2):
- # return 1: a is newer than b
- # 0: a and b are the same version
- # -1: b is newer than a
- if e1 is None:
- e1 = '0'
- else:
- e1 = str(e1)
- v1 = str(v1)
- r1 = str(r1)
- if e2 is None:
- e2 = '0'
- else:
- e2 = str(e2)
- v2 = str(v2)
- r2 = str(r2)
- rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
- return rc
-
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ['LC_ALL'] = os.environ['LC_MESSAGES'] = locale
@@ -567,7 +475,6 @@ class DnfModule(YumDnf):
global dnf
try:
import dnf
- import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.package
@@ -582,7 +489,6 @@ class DnfModule(YumDnf):
system_interpreters = ['/usr/libexec/platform-python',
'/usr/bin/python3',
- '/usr/bin/python2',
'/usr/bin/python']
if not has_respawned():
@@ -597,7 +503,7 @@ class DnfModule(YumDnf):
# done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
self.module.fail_json(
msg="Could not import the dnf python module using {0} ({1}). "
- "Please install `python3-dnf` or `python2-dnf` package or ensure you have specified the "
+ "Please install `python3-dnf` package or ensure you have specified the "
"correct ansible_python_interpreter. (attempted {2})"
.format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
results=[]
@@ -674,13 +580,20 @@ class DnfModule(YumDnf):
# setting this to an empty string instead of None appears to mimic the DNF CLI behavior
conf.substitutions['releasever'] = ''
+ # Honor installroot for dnf directories
+ # This will also perform variable substitutions in the paths
+ for opt in ('cachedir', 'logdir', 'persistdir'):
+ conf.prepend_installroot(opt)
+
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
- # Set best
- if self.nobest:
- conf.best = 0
+ # best and nobest are mutually exclusive
+ if self.nobest is not None:
+ conf.best = not self.nobest
+ elif self.best is not None:
+ conf.best = self.best
if self.download_only:
conf.downloadonly = True
@@ -722,22 +635,14 @@ class DnfModule(YumDnf):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify)
- try:
- # this method has been supported in dnf-4.2.17-6 or later
- # https://bugzilla.redhat.com/show_bug.cgi?id=1788212
- base.setup_loggers()
- except AttributeError:
- pass
- try:
- base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
- base.pre_configure_plugins()
- except AttributeError:
- pass # older versions of dnf didn't require this and don't have these methods
+
+ base.setup_loggers()
+ base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
+ base.pre_configure_plugins()
+
self._specify_repositories(base, disablerepo, enablerepo)
- try:
- base.configure_plugins()
- except AttributeError:
- pass # older versions of dnf didn't require this and don't have these methods
+
+ base.configure_plugins()
try:
if self.update_cache:
@@ -803,42 +708,33 @@ class DnfModule(YumDnf):
self.module.exit_json(msg="", results=results)
def _is_installed(self, pkg):
- installed = self.base.sack.query().installed()
-
- package_spec = {}
- name, arch = self._split_package_arch(pkg)
- if arch:
- package_spec['arch'] = arch
-
- package_details = self._packagename_dict(pkg)
- if package_details:
- package_details['epoch'] = int(package_details['epoch'])
- package_spec.update(package_details)
- else:
- package_spec['name'] = name
-
- return bool(installed.filter(**package_spec))
-
- def _is_newer_version_installed(self, pkg_name):
- candidate_pkg = self._packagename_dict(pkg_name)
- if not candidate_pkg:
- # The user didn't provide a versioned rpm, so version checking is
- # not required
- return False
-
- installed = self.base.sack.query().installed()
- installed_pkg = installed.filter(name=candidate_pkg['name']).run()
- if installed_pkg:
- installed_pkg = installed_pkg[0]
-
- # this looks weird but one is a dict and the other is a dnf.Package
- evr_cmp = self._compare_evr(
- installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
- candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
+ installed_query = dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).installed()
+ if dnf.util.is_glob_pattern(pkg):
+ available_query = dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).available()
+ return not (
+ {p.name for p in available_query} - {p.name for p in installed_query}
)
-
- return evr_cmp == 1
else:
+ return bool(installed_query)
+
+ def _is_newer_version_installed(self, pkg_spec):
+ try:
+ if isinstance(pkg_spec, dnf.package.Package):
+ installed = sorted(self.base.sack.query().installed().filter(name=pkg_spec.name, arch=pkg_spec.arch))[-1]
+ return installed.evr_gt(pkg_spec)
+ else:
+ available = dnf.subject.Subject(pkg_spec).get_best_query(sack=self.base.sack).available()
+ installed = self.base.sack.query().installed().filter(name=available[0].name)
+ for arch in sorted(set(p.arch for p in installed)): # select only from already-installed arches for this case
+ installed_pkg = sorted(installed.filter(arch=arch))[-1]
+ try:
+ available_pkg = sorted(available.filter(arch=arch))[-1]
+ except IndexError:
+ continue # nothing currently available for this arch; keep going
+ if installed_pkg.evr_gt(available_pkg):
+ return True
+ return False
+ except IndexError:
return False
def _mark_package_install(self, pkg_spec, upgrade=False):
@@ -898,30 +794,16 @@ class DnfModule(YumDnf):
"results": []
}
except dnf.exceptions.Error as e:
- if to_text("already installed") in to_text(e):
- return {'failed': False, 'msg': '', 'failure': ''}
- else:
- return {
- 'failed': True,
- 'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
- 'failure': " ".join((pkg_spec, to_native(e))),
- 'rc': 1,
- "results": []
- }
+ return {
+ 'failed': True,
+ 'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
return {'failed': False, 'msg': msg, 'failure': '', 'rc': 0}
- def _whatprovides(self, filepath):
- self.base.read_all_repos()
- available = self.base.sack.query().available()
- # Search in file
- files_filter = available.filter(file=filepath)
- # And Search in provides
- pkg_spec = files_filter.union(available.filter(provides=filepath)).run()
-
- if pkg_spec:
- return pkg_spec[0].name
-
def _parse_spec_group_file(self):
pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
already_loaded_comps = False # Only load this if necessary, it's slow
@@ -933,11 +815,13 @@ class DnfModule(YumDnf):
elif name.endswith(".rpm"):
filenames.append(name)
elif name.startswith('/'):
- # like "dnf install /usr/bin/vi"
- pkg_spec = self._whatprovides(name)
- if pkg_spec:
- pkg_specs.append(pkg_spec)
- continue
+ # dnf install /usr/bin/vi
+ installed = self.base.sack.query().filter(provides=name, file=name).installed().run()
+ if installed:
+ pkg_specs.append(installed[0].name) # should be only one?
+ elif not self.update_only:
+ # not installed, pass the filename for dnf to process
+ pkg_specs.append(name)
elif name.startswith("@") or ('/' in name):
if not already_loaded_comps:
self.base.read_comps()
@@ -980,36 +864,20 @@ class DnfModule(YumDnf):
return not_installed
def _install_remote_rpms(self, filenames):
- if int(dnf.__version__.split(".")[0]) >= 2:
- pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
- else:
- pkgs = []
- try:
- for filename in filenames:
- pkgs.append(self.base.add_remote_rpm(filename))
- except IOError as e:
- if to_text("Can not load RPM file") in to_text(e):
- self.module.fail_json(
- msg="Error occurred attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
- results=[],
- rc=1,
- )
- if self.update_only:
- self._update_only(pkgs)
- else:
- for pkg in pkgs:
- try:
- if self._is_newer_version_installed(self._package_dict(pkg)['nevra']):
- if self.allow_downgrade:
- self.base.package_install(pkg, strict=self.base.conf.strict)
- else:
+ try:
+ pkgs = self.base.add_remote_rpms(filenames)
+ if self.update_only:
+ self._update_only(pkgs)
+ else:
+ for pkg in pkgs:
+ if not (self._is_newer_version_installed(pkg) and not self.allow_downgrade):
self.base.package_install(pkg, strict=self.base.conf.strict)
- except Exception as e:
- self.module.fail_json(
- msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
- results=[],
- rc=1,
- )
+ except Exception as e:
+ self.module.fail_json(
+ msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
+ results=[],
+ rc=1,
+ )
def _is_module_installed(self, module_spec):
if self.with_modules:
@@ -1026,7 +894,7 @@ class DnfModule(YumDnf):
else:
return True # No stream provided, but module found
- return False # seems like a sane default
+ return False # seems like a logical default
def ensure(self):
@@ -1195,13 +1063,6 @@ class DnfModule(YumDnf):
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
- # Previously we forced base.conf.best=True here.
- # However in 2.11+ there is a self.nobest option, so defer to that.
- # Note, however, that just because nobest isn't set, doesn't mean that
- # base.conf.best is actually true. We only force it false in
- # _configure_base(), we never set it to true, and it can default to false.
- # Thus, we still need to explicitly set it here.
- self.base.conf.best = not self.nobest
install_result = self._mark_package_install(pkg_spec, upgrade=True)
if install_result['failed']:
if install_result['msg']:
@@ -1237,14 +1098,6 @@ class DnfModule(YumDnf):
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
- except AttributeError:
- # Group either isn't installed or wasn't marked installed at install time
- # because of DNF bug
- #
- # This is necessary until the upstream dnf API bug is fixed where installing
- # a group via the dnf API doesn't actually mark the group as installed
- # https://bugzilla.redhat.com/show_bug.cgi?id=1620324
- pass
for environment in environments:
try:
@@ -1253,25 +1106,11 @@ class DnfModule(YumDnf):
# Environment is already uninstalled.
pass
- installed = self.base.sack.query().installed()
for pkg_spec in pkg_specs:
- # short-circuit installed check for wildcard matching
- if '*' in pkg_spec:
- try:
- self.base.remove(pkg_spec)
- except dnf.exceptions.MarkingError as e:
- is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e))
- if is_failure:
- failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e)))
- else:
- response['results'].append(handled_remove_error)
- continue
-
- installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query(
- sack=self.base.sack).installed().run()
-
- for pkg in installed_pkg:
- self.base.remove(str(pkg))
+ try:
+ self.base.remove(pkg_spec)
+ except dnf.exceptions.MarkingError as e:
+ response['results'].append(f"{e.value}: {pkg_spec}")
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
@@ -1325,10 +1164,8 @@ class DnfModule(YumDnf):
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
- self.module.fail_json(
- msg="Failed to download packages: {0}".format(to_text(e)),
- results=[],
- )
+ failure_response['msg'] = "Failed to download packages: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
# Validate GPG. This is NOT done in dnf.Base (it's done in the
# upstream CLI subclass of dnf.Base)
@@ -1369,33 +1206,10 @@ class DnfModule(YumDnf):
failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
- if to_text("already installed") in to_text(e):
- response['changed'] = False
- response['results'].append("Package already installed: {0}".format(to_native(e)))
- self.module.exit_json(**response)
- else:
- failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
- self.module.fail_json(**failure_response)
+ failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
def run(self):
- """The main function."""
-
- # Check if autoremove is called correctly
- if self.autoremove:
- if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
- self.module.fail_json(
- msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
- results=[],
- )
-
- # Check if download_dir is called correctly
- if self.download_dir:
- if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'):
- self.module.fail_json(
- msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__,
- results=[],
- )
-
if self.update_cache and not self.names and not self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
@@ -1435,8 +1249,10 @@ class DnfModule(YumDnf):
if self.with_modules:
self.module_base = dnf.module.module_base.ModuleBase(self.base)
-
- self.ensure()
+ try:
+ self.ensure()
+ finally:
+ self.base.close()
def main():
@@ -1451,7 +1267,7 @@ def main():
# list=repos
# list=pkgspec
- yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf4', 'dnf5'])
+ yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'dnf', 'yum', 'yum4', 'dnf4', 'dnf5'])
module = AnsibleModule(
**yumdnf_argument_spec
diff --git a/lib/ansible/modules/dnf5.py b/lib/ansible/modules/dnf5.py
index 2a4ed660deb..df4ee206748 100644
--- a/lib/ansible/modules/dnf5.py
+++ b/lib/ansible/modules/dnf5.py
@@ -17,7 +17,7 @@ options:
name:
description:
- "A package name or package specifier with version, like C(name-1.0).
- When using state=latest, this can be '*' which means run: dnf -y update.
+ When using O(state=latest), this can be C(*) which means run: C(dnf -y update).
You can also pass a url or a local path to an rpm file.
To operate on several packages this can accept a comma separated string of packages or a list of packages."
- Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0).
@@ -37,15 +37,15 @@ options:
state:
description:
- Whether to install (V(present), V(latest)), or remove (V(absent)) a package.
- - Default is V(None), however in effect the default action is V(present) unless the V(autoremove) option is
- enabled for this module, then V(absent) is inferred.
+ - Default is V(None), however in effect the default action is V(present) unless the O(autoremove=true),
+ then V(absent) is inferred.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
type: str
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
- When specifying multiple repos, separate them with a ",".
+ When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
@@ -53,7 +53,7 @@ options:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
- When specifying multiple repos, separate them with a ",".
+ When specifying multiple repos, separate them with a C(,).
type: list
elements: str
default: []
@@ -84,12 +84,12 @@ options:
description:
- If V(true), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
- required by any such package. Should be used alone or when O(state) is V(absent)
+ required by any such package. Should be used alone or when O(state=absent).
type: bool
default: "no"
exclude:
description:
- - Package name(s) to exclude when state=present, or latest. This can be a
+ - Package name(s) to exclude when O(state=present) or O(state=latest). This can be a
list or a comma separated string.
type: list
elements: str
@@ -97,20 +97,20 @@ options:
skip_broken:
description:
- Skip all unavailable packages or packages with broken dependencies
- without raising an error. Equivalent to passing the --skip-broken option.
+ without raising an error. Equivalent to passing the C(--skip-broken) option.
type: bool
default: "no"
update_cache:
description:
- Force dnf to check if cache is out of date and redownload if needed.
- Has an effect only if O(state) is V(present) or V(latest).
+ Has an effect only if O(state=present) or O(state=latest).
type: bool
default: "no"
aliases: [ expire-cache ]
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- - Has an effect only if O(state) is V(latest)
+ - Has an effect only if O(state=present) or O(state=latest).
default: "no"
type: bool
security:
@@ -127,17 +127,19 @@ options:
type: bool
enable_plugin:
description:
- - This is currently a no-op as dnf5 itself does not implement this feature.
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
+ - O(disable_plugin) takes precedence in case a plugin is listed in both O(enable_plugin) and O(disable_plugin).
+ - Requires python3-libdnf5 5.2.0.0+.
type: list
elements: str
default: []
disable_plugin:
description:
- - This is currently a no-op as dnf5 itself does not implement this feature.
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
+ - O(disable_plugin) takes precedence in case a plugin is listed in both O(enable_plugin) and O(disable_plugin).
+ - Requires python3-libdnf5 5.2.0.0+.
type: list
default: []
elements: str
@@ -145,7 +147,7 @@ options:
description:
- Disable the excludes defined in DNF config files.
- If set to V(all), disables all excludes.
- - If set to V(main), disable excludes defined in [main] in dnf.conf.
+ - If set to V(main), disable excludes defined in C([main]) in C(dnf.conf).
- If set to V(repoid), disable excludes defined for given repo id.
type: str
validate_certs:
@@ -164,7 +166,7 @@ options:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
- Note that setting allow_downgrade=True can make this module
+ Note that setting O(allow_downgrade=true) can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
@@ -208,10 +210,18 @@ options:
default: "no"
nobest:
description:
- - Set best option to False, so that transactions are not limited to best candidates only.
+ - This is the opposite of the O(best) option kept for backwards compatibility.
+ - Since ansible-core 2.17 the default value is set by the operating system distribution.
required: false
type: bool
- default: "no"
+ best:
+ description:
+ - When set to V(true), either use a package with the highest version available or fail.
+ - When set to V(false), if the latest version cannot be installed go with the lower version.
+ - Default is set by the operating system distribution.
+ required: false
+ type: bool
+ version_added: "2.17"
cacheonly:
description:
- Tells dnf to run entirely from system cache; does not download or update metadata.
@@ -235,7 +245,6 @@ attributes:
platform:
platforms: rhel
requirements:
- - "python3"
- "python3-libdnf5"
version_added: 2.15
"""
@@ -349,10 +358,23 @@ libdnf5 = None
def is_installed(base, spec):
settings = libdnf5.base.ResolveSpecSettings()
- query = libdnf5.rpm.PackageQuery(base)
- query.filter_installed()
- match, nevra = query.resolve_pkg_spec(spec, settings, True)
- return match
+ installed_query = libdnf5.rpm.PackageQuery(base)
+ installed_query.filter_installed()
+ match, nevra = installed_query.resolve_pkg_spec(spec, settings, True)
+
+ # FIXME use `is_glob_pattern` function when available:
+ # https://github.com/rpm-software-management/dnf5/issues/1563
+ glob_patterns = set("*[?")
+ if any(set(char) & glob_patterns for char in spec):
+ available_query = libdnf5.rpm.PackageQuery(base)
+ available_query.filter_available()
+ available_query.resolve_pkg_spec(spec, settings, True)
+
+ return not (
+ {p.get_name() for p in available_query} - {p.get_name() for p in installed_query}
+ )
+ else:
+ return match
def is_newer_version_installed(base, spec):
@@ -366,19 +388,37 @@ def is_newer_version_installed(base, spec):
spec_nevra = next(iter(libdnf5.rpm.Nevra.parse(spec)))
except (RuntimeError, StopIteration):
return False
- spec_name = spec_nevra.get_name()
- v = spec_nevra.get_version()
- r = spec_nevra.get_release()
- if not v or not r:
+
+ spec_version = spec_nevra.get_version()
+ if not spec_version:
return False
- spec_evr = "{}:{}-{}".format(spec_nevra.get_epoch() or "0", v, r)
- query = libdnf5.rpm.PackageQuery(base)
- query.filter_installed()
- query.filter_name([spec_name])
- query.filter_evr([spec_evr], libdnf5.common.QueryCmp_GT)
+ installed = libdnf5.rpm.PackageQuery(base)
+ installed.filter_installed()
+ installed.filter_name([spec_nevra.get_name()])
+ installed.filter_latest_evr()
+ try:
+ installed_package = list(installed)[-1]
+ except IndexError:
+ return False
- return query.size() > 0
+ target = libdnf5.rpm.PackageQuery(base)
+ target.filter_name([spec_nevra.get_name()])
+ target.filter_version([spec_version])
+ spec_release = spec_nevra.get_release()
+ if spec_release:
+ target.filter_release([spec_release])
+ spec_epoch = spec_nevra.get_epoch()
+ if spec_epoch:
+ target.filter_epoch([spec_epoch])
+ target.filter_latest_evr()
+ try:
+ target_package = list(target)[-1]
+ except IndexError:
+ return False
+
+ # FIXME https://github.com/rpm-software-management/dnf5/issues/1104
+ return libdnf5.rpm.rpmvercmp(installed_package.get_evr(), target_package.get_evr()) == 1
def package_to_dict(package):
@@ -399,8 +439,7 @@ def get_unneeded_pkgs(base):
query = libdnf5.rpm.PackageQuery(base)
query.filter_installed()
query.filter_unneeded()
- for pkg in query:
- yield pkg
+ yield from query
class Dnf5Module(YumDnf):
@@ -410,6 +449,29 @@ class Dnf5Module(YumDnf):
self.pkg_mgr_name = "dnf5"
+ def fail_on_non_existing_plugins(self, base):
+ # https://github.com/rpm-software-management/dnf5/issues/1460
+ try:
+ plugin_names = [p.get_name() for p in base.get_plugins_info()]
+ except AttributeError:
+ # plugins functionality requires python3-libdnf5 5.2.0.0+
+ # silently ignore here, the module will fail later when
+ # base.enable_disable_plugins is attempted to be used if
+ # user specifies enable_plugin/disable_plugin
+ return
+
+ msg = []
+ if enable_unmatched := set(self.enable_plugin).difference(plugin_names):
+ msg.append(
+ f"No matches were found for the following plugin name patterns while enabling libdnf5 plugins: {', '.join(enable_unmatched)}."
+ )
+ if disable_unmatched := set(self.disable_plugin).difference(plugin_names):
+ msg.append(
+ f"No matches were found for the following plugin name patterns while disabling libdnf5 plugins: {', '.join(disable_unmatched)}."
+ )
+ if msg:
+ self.module.fail_json(msg=" ".join(msg))
+
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ["LC_ALL"] = os.environ["LC_MESSAGES"] = locale
@@ -428,7 +490,6 @@ class Dnf5Module(YumDnf):
system_interpreters = [
"/usr/libexec/platform-python",
"/usr/bin/python3",
- "/usr/bin/python2",
"/usr/bin/python",
]
@@ -452,12 +513,6 @@ class Dnf5Module(YumDnf):
)
def run(self):
- if sys.version_info.major < 3:
- self.module.fail_json(
- msg="The dnf5 module requires Python 3.",
- failures=[],
- rc=1,
- )
if not self.list and not self.download_only and os.geteuid() != 0:
self.module.fail_json(
msg="This command has to be run under the root user.",
@@ -465,13 +520,6 @@ class Dnf5Module(YumDnf):
rc=1,
)
- if self.enable_plugin or self.disable_plugin:
- self.module.fail_json(
- msg="enable_plugin and disable_plugin options are not yet implemented in DNF5",
- failures=[],
- rc=1,
- )
-
base = libdnf5.base.Base()
conf = base.get_config()
@@ -479,7 +527,7 @@ class Dnf5Module(YumDnf):
conf.config_file_path = self.conf_file
try:
- base.load_config_from_file()
+ base.load_config()
except RuntimeError as e:
self.module.fail_json(
msg=str(e),
@@ -498,7 +546,11 @@ class Dnf5Module(YumDnf):
self.disable_excludes = "*"
conf.disable_excludes = self.disable_excludes
conf.skip_broken = self.skip_broken
- conf.best = not self.nobest
+ # best and nobest are mutually exclusive
+ if self.nobest is not None:
+ conf.best = not self.nobest
+ elif self.best is not None:
+ conf.best = self.best
conf.install_weak_deps = self.install_weak_deps
conf.gpgcheck = not self.disable_gpg_check
conf.localpkg_gpgcheck = not self.disable_gpg_check
@@ -510,12 +562,28 @@ class Dnf5Module(YumDnf):
if self.download_dir:
conf.destdir = self.download_dir
+ if self.enable_plugin:
+ try:
+ base.enable_disable_plugins(self.enable_plugin, True)
+ except AttributeError:
+ self.module.fail_json(msg="'enable_plugin' requires python3-libdnf5 5.2.0.0+")
+
+ if self.disable_plugin:
+ try:
+ base.enable_disable_plugins(self.disable_plugin, False)
+ except AttributeError:
+ self.module.fail_json(msg="'disable_plugin' requires python3-libdnf5 5.2.0.0+")
+
base.setup()
+ # https://github.com/rpm-software-management/dnf5/issues/1460
+ self.fail_on_non_existing_plugins(base)
+
log_router = base.get_logger()
global_logger = libdnf5.logger.GlobalLogger()
global_logger.set(log_router.get(), libdnf5.logger.Logger.Level_DEBUG)
- logger = libdnf5.logger.create_file_logger(base)
+ # FIXME hardcoding the filename does not seem right, should libdnf5 expose the default file name?
+ logger = libdnf5.logger.create_file_logger(base, "dnf5.log")
log_router.add_logger(logger)
if self.update_cache:
@@ -540,7 +608,11 @@ class Dnf5Module(YumDnf):
for repo in repo_query:
repo.enable()
- sack.update_and_load_enabled_repos(True)
+ try:
+ sack.load_repos()
+ except AttributeError:
+ # dnf5 < 5.2.0.0
+ sack.update_and_load_enabled_repos(True)
if self.update_cache and not self.names and not self.list:
self.module.exit_json(
@@ -572,7 +644,11 @@ class Dnf5Module(YumDnf):
self.module.exit_json(msg="", results=results, rc=0)
settings = libdnf5.base.GoalJobSettings()
- settings.group_with_name = True
+ try:
+ settings.set_group_with_name(True)
+ except AttributeError:
+ # dnf5 < 5.2.0.0
+ settings.group_with_name = True
if self.bugfix or self.security:
advisory_query = libdnf5.advisory.AdvisoryQuery(base)
types = []
@@ -587,18 +663,12 @@ class Dnf5Module(YumDnf):
results = []
if self.names == ["*"] and self.state == "latest":
goal.add_rpm_upgrade(settings)
- elif self.state in {"install", "present", "latest"}:
+ elif self.state in {"installed", "present", "latest"}:
upgrade = self.state == "latest"
for spec in self.names:
if is_newer_version_installed(base, spec):
if self.allow_downgrade:
- if upgrade:
- if is_installed(base, spec):
- goal.add_upgrade(spec, settings)
- else:
- goal.add_install(spec, settings)
- else:
- goal.add_install(spec, settings)
+ goal.add_install(spec, settings)
elif is_installed(base, spec):
if upgrade:
goal.add_upgrade(spec, settings)
@@ -626,7 +696,7 @@ class Dnf5Module(YumDnf):
if transaction.get_problems():
failures = []
for log_event in transaction.get_resolve_logs():
- if log_event.get_problem() == libdnf5.base.GoalProblem_NOT_FOUND and self.state in {"install", "present", "latest"}:
+ if log_event.get_problem() == libdnf5.base.GoalProblem_NOT_FOUND and self.state in {"installed", "present", "latest"}:
# NOTE dnf module compat
failures.append("No package {} available.".format(log_event.get_spec()))
else:
diff --git a/lib/ansible/modules/dpkg_selections.py b/lib/ansible/modules/dpkg_selections.py
index b591636802d..a1fa672732d 100644
--- a/lib/ansible/modules/dpkg_selections.py
+++ b/lib/ansible/modules/dpkg_selections.py
@@ -11,7 +11,7 @@ DOCUMENTATION = '''
module: dpkg_selections
short_description: Dpkg package selection selections
description:
- - Change dpkg package selection state via --get-selections and --set-selections.
+ - Change dpkg package selection state via C(--get-selections) and C(--set-selections).
version_added: "2.0"
author:
- Brian Brazil (@brian-brazil)
@@ -68,7 +68,7 @@ def main():
dpkg = module.get_bin_path('dpkg', True)
locale = get_best_parsable_locale(module)
- DPKG_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale)
+ DPKG_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale)
module.run_command_environ_update = DPKG_ENV
name = module.params['name']
diff --git a/lib/ansible/modules/expect.py b/lib/ansible/modules/expect.py
index 614476332b6..760d7148d60 100644
--- a/lib/ansible/modules/expect.py
+++ b/lib/ansible/modules/expect.py
@@ -70,10 +70,10 @@ notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), and so on), you must specify a shell in the command such as
C(/bin/bash -c "/path/to/something | grep else").
- - Case insensitive searches are indicated with a prefix of C(?i).
+ - Case insensitive searches are indicated with a prefix of C((?i)).
- The C(pexpect) library used by this module operates with a search window
of 2000 bytes, and does not use a multiline regex match. To perform a
- start of line bound match, use a pattern like ``(?m)^pattern``
+ start of line bound match, use a pattern like C((?m)^pattern).
- The M(ansible.builtin.expect) module is designed for simple scenarios.
For more complex needs, consider the use of expect code with the M(ansible.builtin.shell)
or M(ansible.builtin.script) modules. (An example is part of the M(ansible.builtin.shell) module documentation).
diff --git a/lib/ansible/modules/fetch.py b/lib/ansible/modules/fetch.py
index 66726e3ae39..a5edb767df0 100644
--- a/lib/ansible/modules/fetch.py
+++ b/lib/ansible/modules/fetch.py
@@ -28,8 +28,8 @@ options:
dest:
description:
- A directory to save the file into.
- - For example, if the O(dest) directory is C(/backup) a O(src) file named C(/etc/profile) on host
- C(host.example.com), would be saved into C(/backup/host.example.com/etc/profile).
+ - For example, if O(dest=/backup), then O(src=/etc/profile) on host
+ C(host.example.com), would save the file into C(/backup/host.example.com/etc/profile).
The host name is based on the inventory name.
required: yes
fail_on_missing:
diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py
index 564d7f6cdbe..38d2fe77e81 100644
--- a/lib/ansible/modules/file.py
+++ b/lib/ansible/modules/file.py
@@ -63,9 +63,9 @@ options:
force:
description:
- >
- Force the creation of the symlinks in two cases: the source file does
+ Force the creation of the links in two cases: if the link type is symbolic and the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
- O(path) file and create a symlink to the O(src) file in place of it).
+ O(path) file and create a link to the O(src) file in place of it).
type: bool
default: no
follow:
@@ -73,7 +73,7 @@ options:
- This flag indicates that filesystem links, if they exist, should be followed.
- O(follow=yes) and O(state=link) can modify O(src) when combined with parameters such as O(mode).
- Previous to Ansible 2.5, this was V(false) by default.
- - While creating a symlink with a non-existent destination, set O(follow) to V(false) to avoid a warning message related to permission issues.
+ - While creating a symlink with a non-existent destination, set O(follow=false) to avoid a warning message related to permission issues.
The warning message is added to notify the user that we can not set permissions to the non-existent destination.
type: bool
default: yes
@@ -231,7 +231,6 @@ path:
import errno
import os
import shutil
-import sys
import time
from pwd import getpwnam, getpwuid
@@ -239,38 +238,13 @@ from grp import getgrnam, getgrgid
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
-
+from ansible.module_utils.common.sentinel import Sentinel
# There will only be a single AnsibleModule object per module
module = None
-class AnsibleModuleError(Exception):
- def __init__(self, results):
- self.results = results
-
- def __repr__(self):
- return 'AnsibleModuleError(results={0})'.format(self.results)
-
-
-class ParameterError(AnsibleModuleError):
- pass
-
-
-class Sentinel(object):
- def __new__(cls, *args, **kwargs):
- return cls
-
-
-def _ansible_excepthook(exc_type, exc_value, tb):
- # Using an exception allows us to catch it if the calling code knows it can recover
- if issubclass(exc_type, AnsibleModuleError):
- module.fail_json(**exc_value.results)
- else:
- sys.__excepthook__(exc_type, exc_value, tb)
-
-
-def additional_parameter_handling(params):
+def additional_parameter_handling(module):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
@@ -282,6 +256,7 @@ def additional_parameter_handling(params):
# if state == file: place inside of the directory (use _original_basename)
# if state == link: place inside of the directory (use _original_basename. Fallback to src?)
# if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
+ params = module.params
if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
basename = None
@@ -307,13 +282,17 @@ def additional_parameter_handling(params):
# make sure the target path is a directory when we're doing a recursive operation
if params['recurse'] and params['state'] != 'directory':
- raise ParameterError(results={"msg": "recurse option requires state to be 'directory'",
- "path": params["path"]})
+ module.fail_json(
+ msg="recurse option requires state to be 'directory'",
+ path=params["path"]
+ )
# Fail if 'src' but no 'state' is specified
if params['src'] and params['state'] not in ('link', 'hard'):
- raise ParameterError(results={'msg': "src option requires state to be 'link' or 'hard'",
- 'path': params['path']})
+ module.fail_json(
+ msg="src option requires state to be 'link' or 'hard'",
+ path=params['path']
+ )
def get_state(path):
@@ -377,8 +356,8 @@ def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
except RuntimeError as e:
# on Python3 "RecursionError" is raised which is derived from "RuntimeError"
# TODO once this function is moved into the common file utilities, this should probably raise more general exception
- raise AnsibleModuleError(
- results={'msg': "Could not recursively set attributes on %s. Original error was: '%s'" % (to_native(b_path), to_native(e))}
+ module.fail_json(
+ msg=f"Could not recursively set attributes on {to_native(b_path)}. Original error was: '{to_native(e)}'"
)
return changed
@@ -419,17 +398,17 @@ def initial_diff(path, state, prev_state):
def get_timestamp_for_time(formatted_time, time_format):
if formatted_time == 'preserve':
return None
- elif formatted_time == 'now':
+ if formatted_time == 'now':
return Sentinel
- else:
- try:
- struct = time.strptime(formatted_time, time_format)
- struct_time = time.mktime(struct)
- except (ValueError, OverflowError) as e:
- raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
- % (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
+ try:
+ struct = time.strptime(formatted_time, time_format)
+ struct_time = time.mktime(struct)
+ except (ValueError, OverflowError) as e:
+ module.fail_json(
+ msg=f"Error while obtaining timestamp for time {formatted_time} using format {time_format}: {to_native(e, nonstring='simplerepr')}",
+ )
- return struct_time
+ return struct_time
def update_timestamp_for_file(path, mtime, atime, diff=None):
@@ -486,18 +465,19 @@ def update_timestamp_for_file(path, mtime, atime, diff=None):
diff['before']['atime'] = previous_atime
diff['after']['atime'] = atime
except OSError as e:
- raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
- % to_native(e, nonstring='simplerepr'), 'path': path})
+ module.fail_json(
+ msg=f"Error while updating modification or access time: {to_native(e, nonstring='simplerepr')}",
+ path=path
+ )
return True
def keep_backward_compatibility_on_timestamps(parameter, state):
if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
return 'preserve'
- elif state == 'touch' and parameter is None:
+ if state == 'touch' and parameter is None:
return 'now'
- else:
- return parameter
+ return parameter
def execute_diff_peek(path):
@@ -530,14 +510,18 @@ def ensure_absent(path):
try:
shutil.rmtree(b_path, ignore_errors=False)
except Exception as e:
- raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
+ module.fail_json(
+ msg=f"rmtree failed: {to_native(e)}"
+ )
else:
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
- raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
- 'path': path})
+ module.fail_json(
+ msg=f"unlinking failed: {to_native(e)}",
+ path=path
+ )
result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
else:
@@ -566,9 +550,10 @@ def execute_touch(path, follow, timestamps):
open(b_path, 'wb').close()
changed = True
except (OSError, IOError) as e:
- raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
- % to_native(e, nonstring='simplerepr'),
- 'path': path})
+ module.fail_json(
+ msg=f"Error, could not touch target: {to_native(e, nonstring='simplerepr')}",
+ path=path
+ )
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
file_args = module.load_file_common_arguments(module.params)
@@ -606,8 +591,11 @@ def ensure_file_attributes(path, follow, timestamps):
if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
- raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
- 'path': path, 'state': prev_state})
+ module.fail_json(
+ msg=f"file ({path}) is {prev_state}, cannot continue",
+ path=path,
+ state=prev_state
+ )
diff = initial_diff(path, 'file', prev_state)
changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
@@ -664,15 +652,18 @@ def ensure_directory(path, follow, recurse, timestamps):
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except Exception as e:
- raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
- ' %s' % (curpath, to_native(e)),
- 'path': path})
+ module.fail_json(
+ msg=f"There was an issue creating {curpath} as requested: {to_native(e)}",
+ path=path
+ )
return {'path': path, 'changed': changed, 'diff': diff}
elif prev_state != 'directory':
# We already know prev_state is not 'absent', therefore it exists in some form.
- raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
- 'path': path})
+ module.fail_json(
+ msg=f"{path} already exists as a {prev_state}",
+ path=path
+ )
#
# previous state == directory
@@ -714,31 +705,39 @@ def ensure_symlink(path, src, follow, force, timestamps):
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and src is not None and not os.path.exists(b_absrc):
- raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
- ' really want to create the link: %s' % absrc,
- 'path': path, 'src': src})
+ module.fail_json(
+ msg="src file does not exist, use 'force=yes' if you"
+ f" really want to create the link: {absrc}",
+ path=path,
+ src=src
+ )
if prev_state == 'directory':
if not force:
- raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
- % (prev_state, path),
- 'path': path})
+ module.fail_json(
+ msg=f'refusing to convert from {prev_state} to symlink for {path}',
+ path=path
+ )
elif os.listdir(b_path):
# refuse to replace a directory that has files in it
- raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
- ' convert it' % path,
- 'path': path})
+ module.fail_json(
+ msg=f'the directory {path} is not empty, refusing to convert it',
+ path=path
+ )
elif prev_state in ('file', 'hard') and not force:
- raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
- % (prev_state, path),
- 'path': path})
+ module.fail_json(
+ msg=f'refusing to convert from {prev_state} to symlink for {path}',
+ path=path
+ )
diff = initial_diff(path, 'link', prev_state)
changed = False
if prev_state in ('hard', 'file', 'directory', 'absent'):
if src is None:
- raise AnsibleModuleError(results={'msg': 'src is required for creating new symlinks'})
+ module.fail_json(
+ msg='src is required for creating new symlinks',
+ )
changed = True
elif prev_state == 'link':
if src is not None:
@@ -748,7 +747,11 @@ def ensure_symlink(path, src, follow, force, timestamps):
diff['after']['src'] = src
changed = True
else:
- raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
+ module.fail_json(
+ msg='unexpected position reached',
+ dest=path,
+ src=src
+ )
if changed and not module.check_mode:
if prev_state != 'absent':
@@ -764,16 +767,18 @@ def ensure_symlink(path, src, follow, force, timestamps):
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
- raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
- % to_native(e, nonstring='simplerepr'),
- 'path': path})
+ module.fail_json(
+ msg=f"Error while replacing: {to_native(e, nonstring='simplerepr')}",
+ path=path
+ )
else:
try:
os.symlink(b_src, b_path)
except OSError as e:
- raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
- % to_native(e, nonstring='simplerepr'),
- 'path': path})
+ module.fail_json(
+ msg=f"Error while linking: {to_native(e, nonstring='simplerepr')}",
+ path=path
+ )
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
@@ -808,12 +813,18 @@ def ensure_hardlink(path, src, follow, force, timestamps):
# src is the source of a hardlink. We require it if we are creating a new hardlink.
# We require path in the argument_spec so we know it is present at this point.
if prev_state != 'hard' and src is None:
- raise AnsibleModuleError(results={'msg': 'src is required for creating new hardlinks'})
+ module.fail_json(
+ msg='src is required for creating new hardlinks'
+ )
# Even if the link already exists, if src was specified it needs to exist.
# The inode number will be compared to ensure the link has the correct target.
if src is not None and not os.path.exists(b_src):
- raise AnsibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src})
+ module.fail_json(
+ msg='src does not exist',
+ dest=path,
+ src=src
+ )
diff = initial_diff(path, 'hard', prev_state)
changed = False
@@ -827,26 +838,39 @@ def ensure_hardlink(path, src, follow, force, timestamps):
diff['after']['src'] = src
changed = True
elif prev_state == 'hard':
- if src is not None and not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
+ if src is not None and os.stat(b_path).st_ino != os.stat(b_src).st_ino:
changed = True
if not force:
- raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
- 'dest': path, 'src': src})
+ module.fail_json(
+ msg='Cannot link, different hard link exists at destination',
+ dest=path,
+ src=src
+ )
elif prev_state == 'file':
changed = True
if not force:
- raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
- 'dest': path, 'src': src})
+ module.fail_json(
+ msg=f'Cannot link, {prev_state} exists at destination',
+ dest=path,
+ src=src
+ )
elif prev_state == 'directory':
changed = True
if os.path.exists(b_path):
if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
return {'path': path, 'changed': False}
elif not force:
- raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
- 'dest': path, 'src': src})
+ module.fail_json(
+ msg='Cannot link: different hard link exists at destination',
+ dest=path,
+ src=src
+ )
else:
- raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
+ module.fail_json(
+ msg='unexpected position reached',
+ dest=path,
+ src=src
+ )
if changed and not module.check_mode:
if prev_state != 'absent':
@@ -867,16 +891,20 @@ def ensure_hardlink(path, src, follow, force, timestamps):
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
- raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
- % to_native(e, nonstring='simplerepr'),
- 'path': path})
+ module.fail_json(
+ msg=f"Error while replacing: {to_native(e, nonstring='simplerepr')}",
+ path=path
+ )
else:
try:
+ if follow and os.path.islink(b_src):
+ b_src = os.readlink(b_src)
os.link(b_src, b_path)
except OSError as e:
- raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
- % to_native(e, nonstring='simplerepr'),
- 'path': path})
+ module.fail_json(
+ msg=f"Error while linking: {to_native(e, nonstring='simplerepr')}",
+ path=path
+ )
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
@@ -938,9 +966,7 @@ def main():
supports_check_mode=True,
)
- # When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule
- sys.excepthook = _ansible_excepthook
- additional_parameter_handling(module.params)
+ additional_parameter_handling(module)
params = module.params
state = params['state']
diff --git a/lib/ansible/modules/find.py b/lib/ansible/modules/find.py
index 5e8e36a2327..a516b354bc9 100644
--- a/lib/ansible/modules/find.py
+++ b/lib/ansible/modules/find.py
@@ -58,8 +58,8 @@ options:
contains:
description:
- A regular expression or pattern which should be matched against the file content.
- - If O(read_whole_file) is V(false) it matches against the beginning of the line (uses
- V(re.match(\))). If O(read_whole_file) is V(true), it searches anywhere for that pattern
+ - If O(read_whole_file=false) it matches against the beginning of the line (uses
+ V(re.match(\))). If O(read_whole_file=true), it searches anywhere for that pattern
(uses V(re.search(\))).
- Works only when O(file_type) is V(file).
type: str
@@ -75,14 +75,15 @@ options:
paths:
description:
- List of paths of directories to search. All paths must be fully qualified.
+ - From ansible-core 2.18 and onwards, the data type has changed from C(str) to C(path).
type: list
required: true
aliases: [ name, path ]
- elements: str
+ elements: path
file_type:
description:
- Type of file to select.
- - The 'link' and 'any' choices were added in Ansible 2.3.
+ - The V(link) and V(any) choices were added in Ansible 2.3.
type: str
choices: [ any, directory, file, link ]
default: file
@@ -116,7 +117,7 @@ options:
restricted to modes that can be applied using the python
C(os.chmod) function.
- The mode can be provided as an octal such as V("0644") or
- as symbolic such as V(u=rw,g=r,o=r)
+ as symbolic such as V(u=rw,g=r,o=r).
type: raw
version_added: '2.16'
exact_mode:
@@ -145,15 +146,23 @@ options:
depth:
description:
- Set the maximum number of levels to descend into.
- - Setting recurse to V(false) will override this value, which is effectively depth 1.
+ - Setting O(recurse=false) will override this value, which is effectively depth 1.
- Default is unlimited depth.
type: int
version_added: "2.6"
encoding:
description:
- - When doing a C(contains) search, determine the encoding of the files to be searched.
+ - When doing a O(contains) search, determine the encoding of the files to be searched.
type: str
version_added: "2.17"
+ limit:
+ description:
+ - Limit the maximum number of matching paths returned. After finding this many, the find action will stop looking.
+ - Matches are made from the top, down (i.e. shallowest directory first).
+ - If not set, or set to v(null), it will do unlimited matches.
+ - Default is unlimited matches.
+ type: int
+ version_added: "2.18"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
@@ -227,6 +236,16 @@ EXAMPLES = r'''
- '^_[0-9]{2,4}_.*.log$'
- '^[a-z]{1,5}_.*log$'
+- name: Find file containing "wally" without necessarily reading all files
+ ansible.builtin.find:
+ paths: /var/log
+ file_type: file
+ contains: wally
+ read_whole_file: true
+ patterns: "^.*\\.log$"
+ use_regex: true
+ recurse: true
+ limit: 1
'''
RETURN = r'''
@@ -450,7 +469,7 @@ def statinfo(st):
def main():
module = AnsibleModule(
argument_spec=dict(
- paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'),
+ paths=dict(type='list', required=True, aliases=['name', 'path'], elements='path'),
patterns=dict(type='list', default=[], aliases=['pattern'], elements='str'),
excludes=dict(type='list', aliases=['exclude'], elements='str'),
contains=dict(type='str'),
@@ -467,7 +486,8 @@ def main():
depth=dict(type='int'),
mode=dict(type='raw'),
exact_mode=dict(type='bool', default=True),
- encoding=dict(type='str')
+ encoding=dict(type='str'),
+ limit=dict(type='int')
),
supports_check_mode=True,
)
@@ -520,17 +540,20 @@ def main():
else:
module.fail_json(size=params['size'], msg="failed to process size")
+ if params['limit'] is not None and params['limit'] <= 0:
+ module.fail_json(msg="limit cannot be %d (use None for unlimited)" % params['limit'])
+
now = time.time()
msg = 'All paths examined'
looked = 0
has_warnings = False
for npath in params['paths']:
- npath = os.path.expanduser(os.path.expandvars(npath))
try:
if not os.path.isdir(npath):
raise Exception("'%s' is not a directory" % to_native(npath))
- for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow']):
+ # Setting `topdown=True` to explicitly guarantee matches are made from the shallowest directory first
+ for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow'], topdown=True):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
@@ -596,7 +619,12 @@ def main():
r.update(statinfo(st))
filelist.append(r)
- if not params['recurse']:
+ if len(filelist) == params["limit"]:
+ # Breaks out of directory files loop only
+ msg = "Limit of matches reached"
+ break
+
+ if not params['recurse'] or len(filelist) == params["limit"]:
break
except Exception as e:
skipped[npath] = to_text(e)
diff --git a/lib/ansible/modules/gather_facts.py b/lib/ansible/modules/gather_facts.py
index 561275f2844..5787bbb3f83 100644
--- a/lib/ansible/modules/gather_facts.py
+++ b/lib/ansible/modules/gather_facts.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2017 Ansible Project
+# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
@@ -27,6 +27,8 @@ options:
- By default it will be true if more than one fact module is used.
- For low cost/delay fact modules parallelism overhead might end up meaning the whole process takes longer.
Test your specific case to see if it is a speed improvement or not.
+ - The C(ansible_facts_parallel) variable can be used to set this option,
+ overriding the default, but not the direct assignment of the option in the task.
type: bool
attributes:
action:
@@ -49,8 +51,8 @@ attributes:
notes:
- This is mostly a wrapper around other fact gathering modules.
- Options passed into this action must be supported by all the underlying fact modules configured.
- - If using C(gather_timeout) and parallel execution, it will limit the total execution time of
- modules that do not accept C(gather_timeout) themselves.
+ - If using O(ignore:gather_timeout) and parallel execution, it will limit the total execution time of
+ modules that do not accept O(ignore:gather_timeout) themselves.
- Facts returned by each module will be merged, conflicts will favor 'last merged'.
Order is not guaranteed, when doing parallel gathering on multiple modules.
author:
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
index 920b986b580..965e5f6196e 100644
--- a/lib/ansible/modules/get_url.py
+++ b/lib/ansible/modules/get_url.py
@@ -13,7 +13,7 @@ short_description: Downloads files from HTTP, HTTPS, or FTP to node
description:
- Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
server I(must) have direct access to the remote resource.
- - By default, if an environment variable C(_proxy) is set on
+ - By default, if an environment variable E(_proxy) is set on
the target host, requests will be sent through that proxy. This
behaviour can be overridden by setting a variable for this task
(see R(setting the environment,playbooks_environment)),
@@ -27,23 +27,23 @@ version_added: '0.6'
options:
ciphers:
description:
- - SSL/TLS Ciphers to use for the request
- - 'When a list is provided, all ciphers are joined in order with V(:)'
+ - SSL/TLS Ciphers to use for the request.
+ - 'When a list is provided, all ciphers are joined in order with C(:).'
- See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
for more details.
- - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions.
type: list
elements: str
version_added: '2.14'
decompress:
description:
- - Whether to attempt to decompress gzip content-encoded responses
+ - Whether to attempt to decompress gzip content-encoded responses.
type: bool
default: true
version_added: '2.14'
url:
description:
- - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ - HTTP, HTTPS, or FTP URL in the form C((http|https|ftp)://[user[:pass]]@host.domain[:port]/path).
type: str
required: true
dest:
@@ -60,9 +60,9 @@ options:
tmp_dest:
description:
- Absolute path of where temporary file is downloaded to.
- - When run on Ansible 2.5 or greater, path defaults to ansible's remote_tmp setting
+ - When run on Ansible 2.5 or greater, path defaults to ansible's C(remote_tmp) setting.
- When run on Ansible prior to 2.5, it defaults to E(TMPDIR), E(TEMP) or E(TMP) env variables or a platform specific value.
- - U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir)
+ - U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir).
type: path
version_added: '2.1'
force:
@@ -87,18 +87,20 @@ options:
- 'If a checksum is passed to this parameter, the digest of the
destination file will be calculated after it is downloaded to ensure
its integrity and verify that the transfer completed successfully.
- Format: :, e.g. checksum="sha256:D98291AC[...]B6DC7B97",
- checksum="sha256:http://example.com/path/sha256sum.txt"'
+ Format: :, for example C(checksum="sha256:D98291AC[...]B6DC7B97",
+ C(checksum="sha256:http://example.com/path/sha256sum.txt").'
- If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions.
- - The Python ``hashlib`` module is responsible for providing the available algorithms.
+ - The Python C(hashlib) module is responsible for providing the available algorithms.
The choices vary based on Python version and OpenSSL version.
- - On systems running in FIPS compliant mode, the ``md5`` algorithm may be unavailable.
+ - On systems running in FIPS compliant mode, the C(md5) algorithm may be unavailable.
- Additionally, if a checksum is passed to this parameter, and the file exist under
the O(dest) location, the C(destination_checksum) would be calculated, and if
checksum equals C(destination_checksum), the file download would be skipped
- (unless O(force) is V(true)). If the checksum does not equal C(destination_checksum),
+ (unless O(force=true)). If the checksum does not equal C(destination_checksum),
the destination file is deleted.
+ - If the checksum URL requires username and password, O(url_username) and O(url_password) are used
+ to download the checksum file.
type: str
default: ''
version_added: "2.0"
@@ -185,16 +187,16 @@ options:
authentication.
- Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
- Credentials for GSSAPI can be specified with O(url_username)/O(url_password) or with the GSSAPI env var
- C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ E(KRB5CCNAME) that specified a custom Kerberos credential cache.
- NTLM authentication is I(not) supported even if the GSSAPI mech for NTLM has been installed.
type: bool
default: no
version_added: '2.11'
use_netrc:
description:
- - Determining whether to use credentials from ``~/.netrc`` file
- - By default .netrc is used with Basic authentication headers
- - When set to False, .netrc credentials are ignored
+ - Determining whether to use credentials from C(~/.netrc) file.
+ - By default C(.netrc) is used with Basic authentication headers.
+ - When V(false), C(.netrc) credentials are ignored.
type: bool
default: true
version_added: '2.14'
@@ -365,6 +367,7 @@ url:
sample: https://www.ansible.com/
'''
+import email.message
import os
import re
import shutil
@@ -437,23 +440,16 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head
def extract_filename_from_headers(headers):
- """
- Extracts a filename from the given dict of HTTP headers.
-
- Looks for the content-disposition header and applies a regex.
- Returns the filename if successful, else None."""
- cont_disp_regex = 'attachment; ?filename="?([^"]+)'
- res = None
-
- if 'content-disposition' in headers:
- cont_disp = headers['content-disposition']
- match = re.match(cont_disp_regex, cont_disp)
- if match:
- res = match.group(1)
- # Try preventing any funny business.
- res = os.path.basename(res)
+ """Extracts a filename from the given dict of HTTP headers.
- return res
+ Returns the filename if successful, else None.
+ """
+ msg = email.message.Message()
+ msg['content-disposition'] = headers.get('content-disposition', '')
+ if filename := msg.get_param('filename', header='content-disposition'):
+ # Avoid directory traversal
+ filename = os.path.basename(filename)
+ return filename
def is_url(checksum):
@@ -661,6 +657,16 @@ def main():
result['checksum_src'] != result['checksum_dest'])
module.exit_json(msg=info.get('msg', ''), **result)
+ # If a checksum was provided, ensure that the temporary file matches this checksum
+ # before moving it to the destination.
+ if checksum != '':
+ tmpsrc_checksum = module.digest_from_file(tmpsrc, algorithm)
+
+ if checksum != tmpsrc_checksum:
+ os.remove(tmpsrc)
+ module.fail_json(msg=f"The checksum for {tmpsrc} did not match {checksum}; it was {tmpsrc_checksum}.", **result)
+
+ # Copy temporary file to destination if necessary
backup_file = None
if result['checksum_src'] != result['checksum_dest']:
try:
@@ -679,13 +685,6 @@ def main():
if os.path.exists(tmpsrc):
os.remove(tmpsrc)
- if checksum != '':
- destination_checksum = module.digest_from_file(dest, algorithm)
-
- if checksum != destination_checksum:
- os.remove(dest)
- module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum), **result)
-
# allow file attribute changes
file_args = module.load_file_common_arguments(module.params, path=dest)
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
diff --git a/lib/ansible/modules/git.py b/lib/ansible/modules/git.py
index 5460a24a269..c9ccff4bfc8 100644
--- a/lib/ansible/modules/git.py
+++ b/lib/ansible/modules/git.py
@@ -42,19 +42,19 @@ options:
default: "HEAD"
accept_hostkey:
description:
- - Will ensure or not that "-o StrictHostKeyChecking=no" is present as an ssh option.
+ - Will ensure or not that C(-o StrictHostKeyChecking=no) is present as an ssh option.
- Be aware that this disables a protection against MITM attacks.
- - Those using OpenSSH >= 7.5 might want to set O(ssh_opts) to V(StrictHostKeyChecking=accept-new)
+ - Those using OpenSSH >= 7.5 might want to use O(accept_newhostkey) or set O(ssh_opts) to V(StrictHostKeyChecking=accept-new)
instead, it does not remove the MITM issue but it does restrict it to the first attempt.
type: bool
default: 'no'
version_added: "1.5"
accept_newhostkey:
description:
- - As of OpenSSH 7.5, "-o StrictHostKeyChecking=accept-new" can be
+ - As of OpenSSH 7.5, C(-o StrictHostKeyChecking=accept-new) can be
used which is safer and will only accepts host keys which are
- not present or are the same. if V(true), ensure that
- "-o StrictHostKeyChecking=accept-new" is present as an ssh option.
+ not present or are the same. If V(true), ensure that
+ C(-o StrictHostKeyChecking=accept-new) is present as an ssh option.
type: bool
default: 'no'
version_added: "2.12"
@@ -65,21 +65,21 @@ options:
- For older versions it appends E(GIT_SSH_OPTS) (specific to this module) to the
variables above or via a wrapper script.
- Other options can add to this list, like O(key_file) and O(accept_hostkey).
- - An example value could be "-o StrictHostKeyChecking=no" (although this particular
+ - An example value could be C(-o StrictHostKeyChecking=no) (although this particular
option is better set by O(accept_hostkey)).
- - The module ensures that 'BatchMode=yes' is always present to avoid prompts.
+ - The module ensures that C(BatchMode=yes) is always present to avoid prompts.
type: str
version_added: "1.5"
key_file:
description:
- Specify an optional private key file path, on the target host, to use for the checkout.
- - This ensures 'IdentitiesOnly=yes' is present in O(ssh_opts).
+ - This ensures C(IdentitiesOnly=yes) is present in O(ssh_opts).
type: path
version_added: "1.5"
reference:
description:
- - Reference repository (see "git clone --reference ...").
+ - Reference repository (see C(git clone --reference ...)).
type: str
version_added: "1.4"
remote:
@@ -165,7 +165,7 @@ options:
track_submodules:
description:
- If V(true), submodules will track the latest commit on their
- master branch (or other branch specified in .gitmodules). If
+ master branch (or other branch specified in C(.gitmodules)). If
V(false), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the C(--remote) flag
to git submodule update.
@@ -207,15 +207,18 @@ options:
type: path
version_added: "2.7"
- gpg_whitelist:
+ gpg_allowlist:
description:
- A list of trusted GPG fingerprints to compare to the fingerprint of the
GPG-signed commit.
- Only used when O(verify_commit=yes).
- Use of this feature requires Git 2.6+ due to its reliance on git's C(--raw) flag to C(verify-commit) and C(verify-tag).
+ - Alias O(gpg_allowlist) is added in version 2.17.
+ - Alias O(gpg_whitelist) is deprecated and will be removed in version 2.21.
type: list
elements: str
default: []
+ aliases: [ gpg_whitelist ]
version_added: "2.9"
requirements:
@@ -232,63 +235,63 @@ notes:
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
- the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
+ the git module, with the following command: C(ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts)."
'''
EXAMPLES = '''
- name: Git checkout
ansible.builtin.git:
- repo: 'https://foosball.example.org/path/to/repo.git'
- dest: /srv/checkout
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
version: release-0.22
- name: Read-write git checkout from github
ansible.builtin.git:
- repo: git@github.com:mylogin/hello.git
- dest: /home/mylogin/hello
+ repo: git@github.com:ansible/ansible.git
+ dest: /tmp/checkout
- name: Just ensuring the repo checkout exists
ansible.builtin.git:
- repo: 'https://foosball.example.org/path/to/repo.git'
- dest: /srv/checkout
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
update: no
- name: Just get information about the repository whether or not it has already been cloned locally
ansible.builtin.git:
- repo: 'https://foosball.example.org/path/to/repo.git'
- dest: /srv/checkout
+ repo: git@github.com:ansible/ansible.git
+ dest: /tmp/checkout
clone: no
update: no
- name: Checkout a github repo and use refspec to fetch all pull requests
ansible.builtin.git:
- repo: https://github.com/ansible/ansible-examples.git
- dest: /src/ansible-examples
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
refspec: '+refs/pull/*:refs/heads/*'
- name: Create git archive from repo
ansible.builtin.git:
- repo: https://github.com/ansible/ansible-examples.git
- dest: /src/ansible-examples
- archive: /tmp/ansible-examples.zip
+ repo: git@github.com:ansible/ansible.git
+ dest: /tmp/checkout
+ archive: /tmp/ansible.zip
- name: Clone a repo with separate git directory
ansible.builtin.git:
- repo: https://github.com/ansible/ansible-examples.git
- dest: /src/ansible-examples
- separate_git_dir: /src/ansible-examples.git
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
+ separate_git_dir: /tmp/repo
- name: Example clone of a single branch
ansible.builtin.git:
- repo: https://github.com/ansible/ansible-examples.git
- dest: /src/ansible-examples
+ repo: git@github.com:ansible/ansible.git
+ dest: /tmp/checkout
single_branch: yes
version: master
- name: Avoid hanging when http(s) password is missing
ansible.builtin.git:
- repo: https://github.com/ansible/could-be-a-private-repo
- dest: /src/from-private-repo
+ repo: 'https://github.com/ansible/ansible.git'
+ dest: /tmp/checkout
environment:
GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password
# or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password
@@ -465,7 +468,7 @@ def write_ssh_wrapper(module):
def set_git_ssh_env(key_file, ssh_opts, git_version, module):
'''
use environment variables to configure git's ssh execution,
- which varies by version but this functino should handle all.
+ which varies by version but this function should handle all.
'''
# initialise to existing ssh opts and/or append user provided
@@ -497,7 +500,7 @@ def set_git_ssh_env(key_file, ssh_opts, git_version, module):
# older than 2.3 does not know how to use git_ssh_command,
# so we force it into get_ssh var
# https://github.com/gitster/git/commit/09d60d785c68c8fa65094ecbe46fbc2a38d0fc1f
- if git_version < LooseVersion('2.3.0'):
+ if git_version is not None and git_version < LooseVersion('2.3.0'):
# for use in wrapper
os.environ["GIT_SSH_OPTS"] = ssh_opts
@@ -567,7 +570,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'):
def clone(git_path, module, repo, dest, remote, depth, version, bare,
- reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch):
+ reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_allowlist, single_branch):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
@@ -634,7 +637,7 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare,
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
- verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
+ verify_commit_sign(git_path, module, dest, version, gpg_allowlist)
def has_local_mods(module, git_path, dest, bare):
@@ -910,7 +913,7 @@ def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, g
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
# ensure all tags are fetched
- if git_version_used >= LooseVersion('1.9'):
+ if git_version_used is not None and git_version_used >= LooseVersion('1.9'):
fetch_cmd.append('--tags')
else:
# old git versions have a bug in --tags that prevents updating existing tags
@@ -1015,7 +1018,7 @@ def set_remote_branch(git_path, module, dest, remote, version, depth):
module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
-def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist):
+def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_allowlist):
cmd = ''
if version == 'HEAD':
branch = get_head_branch(git_path, module, dest, remote)
@@ -1051,26 +1054,26 @@ def switch_version(git_path, module, dest, remote, version, verify_commit, depth
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
if verify_commit:
- verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
+ verify_commit_sign(git_path, module, dest, version, gpg_allowlist)
return (rc, out1, err1)
-def verify_commit_sign(git_path, module, dest, version, gpg_whitelist):
+def verify_commit_sign(git_path, module, dest, version, gpg_allowlist):
if version in get_annotated_tags(git_path, module, dest):
git_sub = "verify-tag"
else:
git_sub = "verify-commit"
cmd = "%s %s %s" % (git_path, git_sub, version)
- if gpg_whitelist:
+ if gpg_allowlist:
cmd += " --raw"
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
- if gpg_whitelist:
+ if gpg_allowlist:
fingerprint = get_gpg_fingerprint(err)
- if fingerprint not in gpg_whitelist:
- module.fail_json(msg='The gpg_whitelist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc)
+ if fingerprint not in gpg_allowlist:
+ module.fail_json(msg='The gpg_allowlist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc)
return (rc, out, err)
@@ -1183,7 +1186,16 @@ def main():
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
- gpg_whitelist=dict(default=[], type='list', elements='str'),
+ gpg_allowlist=dict(
+ default=[], type='list', aliases=['gpg_whitelist'], elements='str',
+ deprecated_aliases=[
+ dict(
+ name='gpg_whitelist',
+ version='2.21',
+ collection_name='ansible.builtin',
+ )
+ ],
+ ),
accept_hostkey=dict(default='no', type='bool'),
accept_newhostkey=dict(default='no', type='bool'),
key_file=dict(default=None, type='path', required=False),
@@ -1214,7 +1226,7 @@ def main():
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
- gpg_whitelist = module.params['gpg_whitelist']
+ gpg_allowlist = module.params['gpg_allowlist']
reference = module.params['reference']
single_branch = module.params['single_branch']
git_path = module.params['executable'] or module.get_bin_path('git', True)
@@ -1296,7 +1308,7 @@ def main():
# GIT_SSH= as an environment variable, might create sh wrapper script for older versions.
set_git_ssh_env(key_file, ssh_opts, git_version_used, module)
- if depth is not None and git_version_used < LooseVersion('1.9.1'):
+ if depth is not None and git_version_used is not None and git_version_used < LooseVersion('1.9.1'):
module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
@@ -1321,7 +1333,7 @@ def main():
module.exit_json(**result)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference,
- refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch)
+ refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_allowlist, single_branch)
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
@@ -1376,7 +1388,7 @@ def main():
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
- switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist)
+ switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_allowlist)
# Deal with submodules
submodules_updated = False
diff --git a/lib/ansible/modules/group.py b/lib/ansible/modules/group.py
index 100d211584c..716e7e0a515 100644
--- a/lib/ansible/modules/group.py
+++ b/lib/ansible/modules/group.py
@@ -37,7 +37,7 @@ options:
force:
description:
- Whether to delete a group even if it is the primary group of a user.
- - Only applicable on platforms which implement a --force flag on the group deletion command.
+ - Only applicable on platforms which implement a C(--force) flag on the group deletion command.
type: bool
default: false
version_added: "2.15"
@@ -62,6 +62,22 @@ options:
type: bool
default: no
version_added: "2.8"
+ gid_min:
+ description:
+ - Sets the GID_MIN value for group creation.
+ - Overwrites /etc/login.defs default value.
+ - Currently supported on Linux. Does nothing when used with other platforms.
+ - Requires O(local) is omitted or V(False).
+ type: int
+ version_added: "2.18"
+ gid_max:
+ description:
+ - Sets the GID_MAX value for group creation.
+ - Overwrites /etc/login.defs default value.
+ - Currently supported on Linux. Does nothing when used with other platforms.
+ - Requires O(local) is omitted or V(False).
+ type: int
+ version_added: "2.18"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
@@ -151,6 +167,14 @@ class Group(object):
self.system = module.params['system']
self.local = module.params['local']
self.non_unique = module.params['non_unique']
+ self.gid_min = module.params['gid_min']
+ self.gid_max = module.params['gid_max']
+
+ if self.local:
+ if self.gid_min is not None:
+ module.fail_json(msg="'gid_min' can not be used with 'local'")
+ if self.gid_max is not None:
+ module.fail_json(msg="'gid_max' can not be used with 'local'")
def execute_command(self, cmd):
return self.module.run_command(cmd)
@@ -184,6 +208,12 @@ class Group(object):
cmd.append('-o')
elif key == 'system' and kwargs[key] is True:
cmd.append('-r')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -292,6 +322,12 @@ class SunOS(Group):
cmd.append(str(kwargs[key]))
if self.non_unique:
cmd.append('-o')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -323,6 +359,12 @@ class AIX(Group):
cmd.append('id=' + str(kwargs[key]))
elif key == 'system' and kwargs[key] is True:
cmd.append('-a')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -368,6 +410,12 @@ class FreeBsdGroup(Group):
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
return self.execute_command(cmd)
def group_mod(self, **kwargs):
@@ -492,6 +540,12 @@ class OpenBsdGroup(Group):
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -538,6 +592,12 @@ class NetBsdGroup(Group):
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
cmd.append(self.name)
return self.execute_command(cmd)
@@ -578,6 +638,14 @@ class BusyBoxGroup(Group):
if self.system:
cmd.append('-S')
+ if self.gid_min is not None:
+ cmd.append('-K')
+ cmd.append('GID_MIN=' + str(self.gid_min))
+
+ if self.gid_max is not None:
+ cmd.append('-K')
+ cmd.append('GID_MAX=' + str(self.gid_max))
+
cmd.append(self.name)
return self.execute_command(cmd)
@@ -626,6 +694,8 @@ def main():
system=dict(type='bool', default=False),
local=dict(type='bool', default=False),
non_unique=dict(type='bool', default=False),
+ gid_min=dict(type='int'),
+ gid_max=dict(type='int'),
),
supports_check_mode=True,
required_if=[
diff --git a/lib/ansible/modules/hostname.py b/lib/ansible/modules/hostname.py
index 1f0bfa03c23..4b2ee682f2e 100644
--- a/lib/ansible/modules/hostname.py
+++ b/lib/ansible/modules/hostname.py
@@ -36,7 +36,7 @@ options:
description:
- Which strategy to use to update the hostname.
- If not set we try to autodetect, but this can be problematic, particularly with containers as they can present misleading information.
- - Note that 'systemd' should be specified for RHEL/EL/CentOS 7+. Older distributions should use 'redhat'.
+ - Note that V(systemd) should be specified for RHEL/EL/CentOS 7+. Older distributions should use V(redhat).
choices: ['alpine', 'debian', 'freebsd', 'generic', 'macos', 'macosx', 'darwin', 'openbsd', 'openrc', 'redhat', 'sles', 'solaris', 'systemd']
type: str
version_added: '2.9'
@@ -516,7 +516,7 @@ class DarwinStrategy(BaseStrategy):
However, macOS also has LocalHostName and ComputerName settings.
LocalHostName controls the Bonjour/ZeroConf name, used by services
like AirDrop. This class implements a method, _scrub_hostname(), that mimics
- the transformations macOS makes on hostnames when enterened in the Sharing
+ the transformations macOS makes on hostnames when entered in the Sharing
preference pane. It replaces spaces with dashes and removes all special
characters.
@@ -886,8 +886,6 @@ def main():
if name != current_hostname:
name_before = current_hostname
- elif name != permanent_hostname:
- name_before = permanent_hostname
else:
name_before = permanent_hostname
diff --git a/lib/ansible/modules/include_vars.py b/lib/ansible/modules/include_vars.py
index 99e77cb5faa..9238682dead 100644
--- a/lib/ansible/modules/include_vars.py
+++ b/lib/ansible/modules/include_vars.py
@@ -21,20 +21,20 @@ options:
file:
description:
- The file name from which variables should be loaded.
- - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
+ - If the path is relative, it will look for the file in C(vars/) subdirectory of a role or relative to playbook.
type: path
version_added: "2.2"
dir:
description:
- The directory name from which the variables should be loaded.
- - If the path is relative and the task is inside a role, it will look inside the role's vars/ subdirectory.
+ - If the path is relative and the task is inside a role, it will look inside the role's C(vars/) subdirectory.
- If the path is relative and not inside a role, it will be parsed relative to the playbook.
type: path
version_added: "2.2"
name:
description:
- The name of a variable into which assign the included vars.
- - If omitted (null) they will be made top level vars.
+ - If omitted (V(null)) they will be made top level vars.
type: str
version_added: "2.2"
depth:
@@ -81,8 +81,8 @@ options:
version_added: "2.12"
free-form:
description:
- - This module allows you to specify the 'file' option directly without any other options.
- - There is no 'free-form' option, this is just an indicator, see example below.
+ - This module allows you to specify the O(file) option directly without any other options.
+ - There is no O(ignore:free-form) option, this is just an indicator, see example below.
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.conn
diff --git a/lib/ansible/modules/iptables.py b/lib/ansible/modules/iptables.py
index 36ce1bc995b..fad9b405ae4 100644
--- a/lib/ansible/modules/iptables.py
+++ b/lib/ansible/modules/iptables.py
@@ -39,7 +39,7 @@ options:
description:
- This option specifies the packet matching table on which the command should operate.
- If the kernel is configured with automatic module loading, an attempt will be made
- to load the appropriate module for that table if it is not already there.
+ to load the appropriate module for that table if it is not already there.
type: str
choices: [ filter, nat, mangle, raw, security ]
default: filter
@@ -135,7 +135,7 @@ options:
a specific property.
- The set of matches makes up the condition under which a target is invoked.
- Matches are evaluated first to last if specified as an array and work in short-circuit
- fashion, i.e. if one extension yields false, the evaluation will stop.
+ fashion, in other words if one extension yields false, the evaluation will stop.
type: list
elements: str
default: []
@@ -153,7 +153,7 @@ options:
gateway:
description:
- This specifies the IP address of the host to send the cloned packets.
- - This option is only valid when O(jump) is set to V(TEE).
+ - This option is only valid when O(jump=TEE).
type: str
version_added: "2.8"
log_prefix:
@@ -165,7 +165,7 @@ options:
description:
- Logging level according to the syslogd-defined priorities.
- The value can be strings or numbers from 1-8.
- - This parameter is only applicable if O(jump) is set to V(LOG).
+ - This parameter is only applicable if O(jump=LOG).
type: str
version_added: "2.8"
choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
@@ -242,13 +242,13 @@ options:
type: str
to_destination:
description:
- - This specifies a destination address to use with C(DNAT).
+ - This specifies a destination address to use with O(ctstate=DNAT).
- Without this, the destination address is never altered.
type: str
version_added: "2.1"
to_source:
description:
- - This specifies a source address to use with C(SNAT).
+ - This specifies a source address to use with O(ctstate=SNAT).
- Without this, the source address is never altered.
type: str
version_added: "2.2"
@@ -301,7 +301,7 @@ options:
match_set:
description:
- Specifies a set name that can be defined by ipset.
- - Must be used together with the match_set_flags parameter.
+ - Must be used together with the O(match_set_flags) parameter.
- When the V(!) argument is prepended then it inverts the rule.
- Uses the iptables set extension.
type: str
@@ -309,10 +309,11 @@ options:
match_set_flags:
description:
- Specifies the necessary flags for the match_set parameter.
- - Must be used together with the match_set parameter.
+ - Must be used together with the O(match_set) parameter.
- Uses the iptables set extension.
+ - Choices V(dst,dst) and V(src,src) added in version 2.17.
type: str
- choices: [ "src", "dst", "src,dst", "dst,src" ]
+ choices: [ "src", "dst", "src,dst", "dst,src", "dst,dst", "src,src" ]
version_added: "2.11"
limit:
description:
@@ -341,14 +342,14 @@ options:
reject_with:
description:
- 'Specifies the error packet type to return while rejecting. It implies
- "jump: REJECT".'
+ C(jump=REJECT).'
type: str
version_added: "2.1"
icmp_type:
description:
- This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
- command 'iptables -p icmp -h'
+ command C(iptables -p icmp -h).
type: str
version_added: "2.2"
flush:
@@ -386,10 +387,10 @@ options:
version_added: "2.13"
numeric:
description:
- - This parameter controls the running of the list -action of iptables, which is used internally by the module
- - Does not affect the actual functionality. Use this if iptables hang when creating a chain or altering policy
- - If V(true), then iptables skips the DNS-lookup of the IP addresses in a chain when it uses the list -action
- - Listing is used internally for example when setting a policy or creating a chain
+ - This parameter controls the running of the list -action of iptables, which is used internally by the module.
+ - Does not affect the actual functionality. Use this if iptables hang when creating a chain or altering policy.
+ - If V(true), then iptables skips the DNS-lookup of the IP addresses in a chain when it uses the list -action.
+ - Listing is used internally for example when setting a policy or creating a chain.
type: bool
default: false
version_added: "2.15"
@@ -820,7 +821,10 @@ def main():
src_range=dict(type='str'),
dst_range=dict(type='str'),
match_set=dict(type='str'),
- match_set_flags=dict(type='str', choices=['src', 'dst', 'src,dst', 'dst,src']),
+ match_set_flags=dict(
+ type='str',
+ choices=['src', 'dst', 'src,dst', 'dst,src', 'src,src', 'dst,dst']
+ ),
limit=dict(type='str'),
limit_burst=dict(type='str'),
uid_owner=dict(type='str'),
@@ -844,6 +848,7 @@ def main():
required_if=[
['jump', 'TEE', ['gateway']],
['jump', 'tee', ['gateway']],
+ ['flush', False, ['chain']],
]
)
args = dict(
@@ -861,10 +866,6 @@ def main():
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
- # Check if chain option is required
- if args['flush'] is False and args['chain'] is None:
- module.fail_json(msg="Either chain or flush parameter must be specified.")
-
if module.params.get('log_prefix', None) or module.params.get('log_level', None):
if module.params['jump'] is None:
module.params['jump'] = 'LOG'
diff --git a/lib/ansible/modules/known_hosts.py b/lib/ansible/modules/known_hosts.py
index 8235258c664..b4155660e1b 100644
--- a/lib/ansible/modules/known_hosts.py
+++ b/lib/ansible/modules/known_hosts.py
@@ -10,7 +10,7 @@ DOCUMENTATION = r'''
module: known_hosts
short_description: Add or remove a host from the C(known_hosts) file
description:
- - The M(ansible.builtin.known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
+ - The M(ansible.builtin.known_hosts) module lets you add or remove host keys from the C(known_hosts) file.
- Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
This is useful if you're going to want to use the M(ansible.builtin.git) module over ssh, for example.
- If you have a very large number of host keys to manage, you will find the M(ansible.builtin.template) module more useful.
@@ -19,7 +19,7 @@ options:
name:
aliases: [ 'host' ]
description:
- - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that ssh-keygen can find it.
+ - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that C(ssh-keygen) can find it.
- Must match with or present in key attribute.
- For custom SSH port, O(name) needs to specify port as well. See example section.
type: str
@@ -49,8 +49,8 @@ options:
version_added: "2.3"
state:
description:
- - V(present) to add the host key.
- - V(absent) to remove it.
+ - V(present) to add host keys.
+ - V(absent) to remove host keys.
choices: [ "absent", "present" ]
default: "present"
type: str
@@ -101,6 +101,7 @@ EXAMPLES = r'''
# state = absent|present (default: present)
import base64
+import copy
import errno
import hashlib
import hmac
@@ -118,6 +119,7 @@ def enforce_state(module, params):
Add or remove key.
"""
+ results = dict(changed=False)
host = params["name"].lower()
key = params.get("key", None)
path = params.get("path")
@@ -140,13 +142,12 @@ def enforce_state(module, params):
found, replace_or_add, found_line = search_for_host_key(module, host, key, path, sshkeygen)
- params['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
+ results['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
# check if we are trying to remove a non matching key,
# in that case return with no change to the host
if state == 'absent' and not found_line and key:
- params['changed'] = False
- return params
+ return results
# We will change state if found==True & state!="present"
# or found==False & state=="present"
@@ -154,15 +155,15 @@ def enforce_state(module, params):
# Alternatively, if replace is true (i.e. key present, and we must change
# it)
if module.check_mode:
- module.exit_json(changed=replace_or_add or (state == "present") != found,
- diff=params['diff'])
+ results['changed'] = replace_or_add or (state == "present") != found
+ module.exit_json(**results)
# Now do the work.
# Only remove whole host if found and no key provided
if found and not key and state == "absent":
module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True)
- params['changed'] = True
+ results['changed'] = True
# Next, add a new (or replacing) entry
if replace_or_add or found != (state == "present"):
@@ -188,9 +189,9 @@ def enforce_state(module, params):
else:
module.atomic_move(outf.name, path)
- params['changed'] = True
+ results['changed'] = True
- return params
+ return results
def sanity_check(module, host, key, sshkeygen):
@@ -364,7 +365,9 @@ def main():
supports_check_mode=True
)
- results = enforce_state(module, module.params)
+ # TODO: deprecate returning everything that was passed in
+ results = copy.copy(module.params)
+ results.update(enforce_state(module, module.params))
module.exit_json(**results)
diff --git a/lib/ansible/modules/lineinfile.py b/lib/ansible/modules/lineinfile.py
index 9e9fdd9b8ef..105fcc86604 100644
--- a/lib/ansible/modules/lineinfile.py
+++ b/lib/ansible/modules/lineinfile.py
@@ -87,13 +87,11 @@ options:
- If specified, the line will be inserted after the last match of specified regular expression.
- If the first match is required, use(firstmatch=yes).
- A special value is available; V(EOF) for inserting the line at the end of the file.
- - If specified regular expression has no matches, EOF will be used instead.
+ - If specified regular expression has no matches or no value is passed, V(EOF) will be used instead.
- If O(insertbefore) is set, default value V(EOF) will be ignored.
- If regular expressions are passed to both O(regexp) and O(insertafter), O(insertafter) is only honored if no match for O(regexp) is found.
- May not be used with O(backrefs) or O(insertbefore).
type: str
- choices: [ EOF, '*regex*' ]
- default: EOF
insertbefore:
description:
- Used with O(state=present).
@@ -104,7 +102,6 @@ options:
- If regular expressions are passed to both O(regexp) and O(insertbefore), O(insertbefore) is only honored if no match for O(regexp) is found.
- May not be used with O(backrefs) or O(insertafter).
type: str
- choices: [ BOF, '*regex*' ]
version_added: "1.1"
create:
description:
diff --git a/lib/ansible/modules/meta.py b/lib/ansible/modules/meta.py
index d17b70008a6..91b3f0403f9 100644
--- a/lib/ansible/modules/meta.py
+++ b/lib/ansible/modules/meta.py
@@ -33,7 +33,12 @@ options:
- V(end_host) (added in Ansible 2.8) is a per-host variation of V(end_play). Causes the play to end for the current host without failing it.
- V(end_batch) (added in Ansible 2.12) causes the current batch (see C(serial)) to end without failing the host(s).
Note that with C(serial=0) or undefined this behaves the same as V(end_play).
- choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch ]
+ - V(end_role) (added in Ansible 2.18) causes the currently executing role to end without failing the host(s).
+ Effectively all tasks from within a role after V(end_role) is executed are ignored. Since handlers live in a global,
+ play scope, all handlers added via the role are unaffected and are still executed if notified. It is an error
+ to call V(end_role) from outside of a role or from a handler. Note that V(end_role) does not have an effect to
+ the parent roles or roles that depend (via dependencies in meta/main.yml) on a role executing V(end_role).
+ choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch, end_role ]
required: true
extends_documentation_fragment:
- action_common_attributes
@@ -62,6 +67,8 @@ attributes:
connection:
details: Most options in this action do not use a connection, except V(reset_connection) which still does not connect to the remote
support: partial
+ until:
+ support: none
notes:
- V(clear_facts) will remove the persistent facts from M(ansible.builtin.set_fact) using O(ansible.builtin.set_fact#module:cacheable=True),
but not the current host variable it creates for the current run.
diff --git a/lib/ansible/modules/mount_facts.py b/lib/ansible/modules/mount_facts.py
new file mode 100644
index 00000000000..f5d2bf47f3a
--- /dev/null
+++ b/lib/ansible/modules/mount_facts.py
@@ -0,0 +1,651 @@
+# -*- coding: utf-8 -*-
+# Copyright: Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import annotations
+
+
+DOCUMENTATION = """
+---
+module: mount_facts
+version_added: 2.18
+short_description: Retrieve mount information.
+description:
+ - Retrieve information about mounts from preferred sources and filter the results based on the filesystem type and device.
+options:
+ devices:
+ description: A list of fnmatch patterns to filter mounts by the special device or remote file system.
+ default: ~
+ type: list
+ elements: str
+ fstypes:
+ description: A list of fnmatch patterns to filter mounts by the type of the file system.
+ default: ~
+ type: list
+ elements: str
+ sources:
+ description:
+ - A list of sources used to determine the mounts. Missing file sources (or empty files) are skipped. Repeat sources, including symlinks, are skipped.
+ - The C(mount_points) return value contains the first definition found for a mount point.
+ - Additional mounts to the same mount point are available from C(aggregate_mounts) (if enabled).
+ - By default, mounts are retrieved from all of the standard locations, which have the predefined aliases V(all)/V(static)/V(dynamic).
+ - V(all) contains V(dynamic) and V(static).
+ - V(dynamic) contains V(/etc/mtab), V(/proc/mounts), V(/etc/mnttab), and the value of O(mount_binary) if it is not None.
+ This allows platforms like BSD or AIX, which don't have an equivalent to V(/proc/mounts), to collect the current mounts by default.
+ See the O(mount_binary) option to disable the fall back or configure a different executable.
+ - V(static) contains V(/etc/fstab), V(/etc/vfstab), and V(/etc/filesystems).
+ Note that V(/etc/filesystems) is specific to AIX. The Linux file by this name has a different format/purpose and is ignored.
+ - The value of O(mount_binary) can be configured as a source, which will cause it to always execute.
+ Depending on the other sources configured, this could be inefficient/redundant.
+ For example, if V(/proc/mounts) and V(mount) are listed as O(sources), Linux hosts will retrieve the same mounts twice.
+ default: ~
+ type: list
+ elements: str
+ mount_binary:
+ description:
+ - The O(mount_binary) is used if O(sources) contain the value "mount", or if O(sources) contains a dynamic
+ source, and none were found (as can be expected on BSD or AIX hosts).
+ - Set to V(null) to stop after no dynamic file source is found instead.
+ type: raw
+ default: mount
+ timeout:
+ description:
+ - This is the maximum number of seconds to wait for each mount to complete. When this is V(null), wait indefinitely.
+ - Configure in conjunction with O(on_timeout) to skip unresponsive mounts.
+ - This timeout also applies to the O(mount_binary) command to list mounts.
+ - If the module is configured to run during the play's fact gathering stage, set a timeout using module_defaults to prevent a hang (see example).
+ type: float
+ on_timeout:
+ description:
+ - The action to take when gathering mount information exceeds O(timeout).
+ type: str
+ default: error
+ choices:
+ - error
+ - warn
+ - ignore
+ include_aggregate_mounts:
+ description:
+ - Whether or not the module should return the C(aggregate_mounts) list in C(ansible_facts).
+ - When this is V(null), a warning will be emitted if multiple mounts for the same mount point are found.
+ default: ~
+ type: bool
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+author:
+ - Ansible Core Team
+ - Sloane Hertel (@s-hertel)
+"""
+
+EXAMPLES = """
+- name: Get non-local devices
+ mount_facts:
+ devices: "[!/]*"
+
+- name: Get FUSE subtype mounts
+ mount_facts:
+ fstypes:
+ - "fuse.*"
+
+- name: Get NFS mounts during gather_facts with timeout
+ hosts: all
+ gather_facts: true
+ vars:
+ ansible_facts_modules:
+ - ansible.builtin.mount_facts
+ module_default:
+ ansible.builtin.mount_facts:
+ timeout: 10
+ fstypes:
+ - nfs
+ - nfs4
+
+- name: Get mounts from a non-default location
+ mount_facts:
+ sources:
+ - /usr/etc/fstab
+
+- name: Get mounts from the mount binary
+ mount_facts:
+ sources:
+ - mount
+ mount_binary: /sbin/mount
+"""
+
+RETURN = """
+ansible_facts:
+ description:
+ - An ansible_facts dictionary containing a dictionary of C(mount_points) and list of C(aggregate_mounts) when enabled.
+ - Each key in C(mount_points) is a mount point, and the value contains mount information (similar to C(ansible_facts["mounts"])).
+ Each value also contains the key C(ansible_context), with details about the source and line(s) corresponding to the parsed mount point.
+ - When C(aggregate_mounts) are included, the containing dictionaries are the same format as the C(mount_point) values.
+ returned: on success
+ type: dict
+ sample:
+ mount_points:
+ /proc/sys/fs/binfmt_misc:
+ ansible_context:
+ source: /proc/mounts
+ source_data: "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850 0 0"
+ block_available: 0
+ block_size: 4096
+ block_total: 0
+ block_used: 0
+ device: "systemd-1"
+ dump: 0
+ fstype: "autofs"
+ inode_available: 0
+ inode_total: 0
+ inode_used: 0
+ mount: "/proc/sys/fs/binfmt_misc"
+ options: "rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850"
+ passno: 0
+ size_available: 0
+ size_total: 0
+ uuid: null
+ aggregate_mounts:
+ - ansible_context:
+ source: /proc/mounts
+ source_data: "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850 0 0"
+ block_available: 0
+ block_size: 4096
+ block_total: 0
+ block_used: 0
+ device: "systemd-1"
+ dump: 0
+ fstype: "autofs"
+ inode_available: 0
+ inode_total: 0
+ inode_used: 0
+ mount: "/proc/sys/fs/binfmt_misc"
+ options: "rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850"
+ passno: 0
+ size_available: 0
+ size_total: 0
+ uuid: null
+ - ansible_context:
+ source: /proc/mounts
+ source_data: "binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0"
+ block_available: 0
+ block_size: 4096
+ block_total: 0
+ block_used: 0
+ device: binfmt_misc
+ dump: 0
+ fstype: binfmt_misc
+ inode_available: 0
+ inode_total: 0
+ inode_used: 0
+ mount: "/proc/sys/fs/binfmt_misc"
+ options: "rw,nosuid,nodev,noexec,relatime"
+ passno: 0
+ size_available: 0
+ size_total: 0
+ uuid: null
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts import timeout as _timeout
+from ansible.module_utils.facts.utils import get_mount_size, get_file_content
+
+from contextlib import suppress
+from dataclasses import astuple, dataclass
+from fnmatch import fnmatch
+
+import codecs
+import datetime
+import functools
+import os
+import re
+import subprocess
+import typing as t
+
+STATIC_SOURCES = ["/etc/fstab", "/etc/vfstab", "/etc/filesystems"]
+DYNAMIC_SOURCES = ["/etc/mtab", "/proc/mounts", "/etc/mnttab"]
+
+# AIX and BSD don't have a file-based dynamic source, so the module also supports running a mount binary to collect these.
+# Pattern for Linux, including OpenBSD and NetBSD
+LINUX_MOUNT_RE = re.compile(r"^(?P\S+) on (?P\S+) type (?P\S+) \((?P.+)\)$")
+# Pattern for other BSD including FreeBSD, DragonFlyBSD, and MacOS
+BSD_MOUNT_RE = re.compile(r"^(?P\S+) on (?P\S+) \((?P.+)\)$")
+# Pattern for AIX, example in https://www.ibm.com/docs/en/aix/7.2?topic=m-mount-command
+AIX_MOUNT_RE = re.compile(r"^(?P\S*)\s+(?P\S+)\s+(?P\S+)\s+(?P\S+)\s+(?P