diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 5d4dbf257d1..c3619bc0349 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -31,7 +31,7 @@ variables: - name: fetchDepth value: 500 - name: defaultContainer - value: quay.io/ansible/azure-pipelines-test-container:4.0.1 + value: quay.io/ansible/azure-pipelines-test-container:6.0.0 pool: Standard @@ -54,14 +54,12 @@ stages: nameFormat: Python {0} testFormat: units/{0} targets: - - test: 2.7 - - test: 3.6 - - test: 3.7 - test: 3.8 - test: 3.9 - test: '3.10' - test: 3.11 - test: 3.12 + - test: 3.13 - stage: Windows dependsOn: [] jobs: @@ -70,45 +68,50 @@ stages: nameFormat: Server {0} testFormat: windows/{0}/1 targets: - - test: 2016 - - test: 2019 - - test: 2022 + - name: 2016 WinRM HTTP + test: 2016/winrm/http + - name: 2019 WinRM HTTPS + test: 2019/winrm/https + - name: 2022 WinRM HTTPS + test: 2022/winrm/https + - name: 2022 PSRP HTTP + test: 2022/psrp/http + - name: 2022 SSH Key + test: 2022/ssh/key + - name: 2025 PSRP HTTP + test: 2025/psrp/http + - name: 2025 SSH Key + test: 2025/ssh/key - stage: Remote dependsOn: [] jobs: - template: templates/matrix.yml # context/target parameters: targets: - - name: macOS 13.2 - test: macos/13.2 - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 8.8 py36 - test: rhel/8.8@3.6 - - name: RHEL 8.8 py311 - test: rhel/8.8@3.11 - - name: RHEL 9.2 py39 - test: rhel/9.2@3.9 - - name: RHEL 9.2 py311 - test: rhel/9.2@3.11 - - name: FreeBSD 12.4 - test: freebsd/12.4 - - name: FreeBSD 13.2 - test: freebsd/13.2 + - name: macOS 14.3 + test: macos/14.3 + - name: RHEL 9.4 py39 + test: rhel/9.4@3.9 + - name: RHEL 9.4 py312 + test: rhel/9.4@3.12 + - name: FreeBSD 13.4 + test: freebsd/13.4 + - name: FreeBSD 14.1 + test: freebsd/14.1 groups: - 1 - 2 - template: templates/matrix.yml # context/controller parameters: targets: - - name: macOS 13.2 - test: macos/13.2 - - name: RHEL 8.8 - test: rhel/8.8 - - name: RHEL 9.2 - test: rhel/9.2 - - name: FreeBSD 13.2 - test: freebsd/13.2 + - name: macOS 14.3 + test: macos/14.3 + - name: RHEL 9.4 + test: rhel/9.4 + - name: FreeBSD 13.4 + test: freebsd/13.4 + - name: FreeBSD 14.1 + test: freebsd/14.1 groups: - 3 - 4 @@ -116,52 +119,44 @@ stages: - template: templates/matrix.yml # context/controller (ansible-test container management) parameters: targets: - - name: Alpine 3.17 - test: alpine/3.17 - - name: Alpine 3.18 - test: alpine/3.18 - - name: Fedora 38 - test: fedora/38 - - name: RHEL 8.8 - test: rhel/8.8 - - name: RHEL 9.2 - test: rhel/9.2 - - name: Ubuntu 22.04 - test: ubuntu/22.04 + - name: Alpine 3.20 + test: alpine/3.20 + - name: Fedora 40 + test: fedora/40 + - name: RHEL 9.4 + test: rhel/9.4 + - name: Ubuntu 24.04 + test: ubuntu/24.04 groups: - 6 - stage: Docker dependsOn: [] jobs: - - template: templates/matrix.yml + - template: templates/matrix.yml # context/target parameters: testFormat: linux/{0} targets: - - name: Alpine 3 - test: alpine3 - - name: CentOS 7 - test: centos7 - - name: Fedora 38 - test: fedora38 - - name: openSUSE 15 - test: opensuse15 - - name: Ubuntu 20.04 - test: ubuntu2004 + - name: Alpine 3.20 + test: alpine320 + - name: Fedora 40 + test: fedora40 - name: Ubuntu 22.04 test: ubuntu2204 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 1 - 2 - - template: templates/matrix.yml + - template: templates/matrix.yml # context/controller parameters: testFormat: linux/{0} targets: - - name: Alpine 3 - test: alpine3 - - name: Fedora 38 - test: fedora38 - - name: Ubuntu 22.04 - test: ubuntu2204 + - name: Alpine 3.20 + test: alpine320 + - name: Fedora 40 + test: fedora40 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 3 - 4 @@ -174,9 +169,9 @@ stages: nameFormat: Python {0} testFormat: galaxy/{0}/1 targets: - - test: '3.10' - test: 3.11 - test: 3.12 + - test: 3.13 - stage: Generic dependsOn: [] jobs: @@ -185,9 +180,9 @@ stages: nameFormat: Python {0} testFormat: generic/{0}/1 targets: - - test: '3.10' - test: 3.11 - test: 3.12 + - test: 3.13 - stage: Incidental_Windows displayName: Incidental Windows dependsOn: [] @@ -197,9 +192,20 @@ stages: nameFormat: Server {0} testFormat: i/windows/{0} targets: - - test: 2016 - - test: 2019 - - test: 2022 + - name: 2016 WinRM HTTP + test: 2016/winrm/http + - name: 2019 WinRM HTTPS + test: 2019/winrm/https + - name: 2022 WinRM HTTPS + test: 2022/winrm/https + - name: 2022 PSRP HTTP + test: 2022/psrp/http + - name: 2022 SSH Key + test: 2022/ssh/key + - name: 2025 PSRP HTTP + test: 2025/psrp/http + - name: 2025 SSH Key + test: 2025/ssh/key - stage: Incidental dependsOn: [] jobs: @@ -209,8 +215,6 @@ stages: targets: - name: IOS Python test: ios/csr1000v/ - - name: VyOS Python - test: vyos/1.1.8/ - stage: Summary condition: succeededOrFailed() dependsOn: diff --git a/.azure-pipelines/commands/incidental/vyos.sh b/.azure-pipelines/commands/incidental/vyos.sh deleted file mode 120000 index cad3e41b707..00000000000 --- a/.azure-pipelines/commands/incidental/vyos.sh +++ /dev/null @@ -1 +0,0 @@ -network.sh \ No newline at end of file diff --git a/.azure-pipelines/commands/incidental/windows.sh b/.azure-pipelines/commands/incidental/windows.sh index 24272f62baf..f5a3070c457 100755 --- a/.azure-pipelines/commands/incidental/windows.sh +++ b/.azure-pipelines/commands/incidental/windows.sh @@ -6,6 +6,8 @@ declare -a args IFS='/:' read -ra args <<< "$1" version="${args[1]}" +connection="${args[2]}" +connection_setting="${args[3]}" target="shippable/windows/incidental/" @@ -26,11 +28,7 @@ if [ -s /tmp/windows.txt ] || [ "${CHANGED:+$CHANGED}" == "" ]; then echo "Detected changes requiring integration tests specific to Windows:" cat /tmp/windows.txt - echo "Running Windows integration tests for multiple versions concurrently." - - platforms=( - --windows "${version}" - ) + echo "Running Windows integration tests for the version ${version}." else echo "No changes requiring integration tests specific to Windows were detected." echo "Running Windows integration tests for a single version only: ${single_version}" @@ -39,14 +37,10 @@ else echo "Skipping this job since it is for: ${version}" exit 0 fi - - platforms=( - --windows "${version}" - ) fi # shellcheck disable=SC2086 ansible-test windows-integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ - "${platforms[@]}" \ - --docker default --python "${python_default}" \ - --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" + --controller "docker:default,python=${python_default}" \ + --target "remote:windows/${version},connection=${connection}+${connection_setting},provider=${provider}" \ + --remote-terminate always --remote-stage "${stage}" diff --git a/.azure-pipelines/commands/windows.sh b/.azure-pipelines/commands/windows.sh index 693d4f24bdc..622eb9e2d5e 100755 --- a/.azure-pipelines/commands/windows.sh +++ b/.azure-pipelines/commands/windows.sh @@ -6,7 +6,9 @@ declare -a args IFS='/:' read -ra args <<< "$1" version="${args[1]}" -group="${args[2]}" +connection="${args[2]}" +connection_setting="${args[3]}" +group="${args[4]}" target="shippable/windows/group${group}/" @@ -31,11 +33,7 @@ if [ -s /tmp/windows.txt ] || [ "${CHANGED:+$CHANGED}" == "" ]; then echo "Detected changes requiring integration tests specific to Windows:" cat /tmp/windows.txt - echo "Running Windows integration tests for multiple versions concurrently." - - platforms=( - --windows "${version}" - ) + echo "Running Windows integration tests for the version ${version}." else echo "No changes requiring integration tests specific to Windows were detected." echo "Running Windows integration tests for a single version only: ${single_version}" @@ -44,17 +42,13 @@ else echo "Skipping this job since it is for: ${version}" exit 0 fi - - platforms=( - --windows "${version}" - ) fi -for version in "${python_versions[@]}"; do +for py_version in "${python_versions[@]}"; do changed_all_target="all" changed_all_mode="default" - if [ "${version}" == "${python_default}" ]; then + if [ "${py_version}" == "${python_default}" ]; then # smoketest tests if [ "${CHANGED}" ]; then # with change detection enabled run tests for anything changed @@ -80,7 +74,7 @@ for version in "${python_versions[@]}"; do fi # terminate remote instances on the final python version tested - if [ "${version}" = "${python_versions[-1]}" ]; then + if [ "${py_version}" = "${python_versions[-1]}" ]; then terminate="always" else terminate="never" @@ -88,7 +82,8 @@ for version in "${python_versions[@]}"; do # shellcheck disable=SC2086 ansible-test windows-integration --color -v --retry-on-error "${ci}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ - "${platforms[@]}" --changed-all-target "${changed_all_target}" --changed-all-mode "${changed_all_mode}" \ - --docker default --python "${version}" \ - --remote-terminate "${terminate}" --remote-stage "${stage}" --remote-provider "${provider}" + --changed-all-target "${changed_all_target}" --changed-all-mode "${changed_all_mode}" \ + --controller "docker:default,python=${py_version}" \ + --target "remote:windows/${version},connection=${connection}+${connection_setting},provider=${provider}" \ + --remote-terminate "${terminate}" --remote-stage "${stage}" done diff --git a/.azure-pipelines/scripts/combine-coverage.py b/.azure-pipelines/scripts/combine-coverage.py index 506ade6460c..10d83580c5e 100755 --- a/.azure-pipelines/scripts/combine-coverage.py +++ b/.azure-pipelines/scripts/combine-coverage.py @@ -7,8 +7,7 @@ Keep in mind that Azure Pipelines does not enforce unique job display names (onl It is up to pipeline authors to avoid name collisions when deviating from the recommended format. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import re diff --git a/.azure-pipelines/scripts/publish-codecov.py b/.azure-pipelines/scripts/publish-codecov.py index f2bc4b84b38..41f30af76d4 100755 --- a/.azure-pipelines/scripts/publish-codecov.py +++ b/.azure-pipelines/scripts/publish-codecov.py @@ -4,6 +4,7 @@ Upload code coverage reports to codecov.io. Multiple coverage files from multiple languages are accepted and aggregated after upload. Python coverage, as well as PowerShell and Python stubs can all be uploaded. """ +from __future__ import annotations import argparse import dataclasses diff --git a/.azure-pipelines/scripts/time-command.py b/.azure-pipelines/scripts/time-command.py index 5e8eb8d4c8f..c6b505006ec 100755 --- a/.azure-pipelines/scripts/time-command.py +++ b/.azure-pipelines/scripts/time-command.py @@ -1,8 +1,7 @@ #!/usr/bin/env python """Prepends a relative timestamp to each input line from stdin and writes it to stdout.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys import time diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 24f4438fbb3..72ee0f901d7 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -2,3 +2,5 @@ # Bulk PowerShell sanity fixes 6def4a3180fe03981ba64c6d8db28fed3bb39c0c 716631189cb5a3f66b3add98f39e64e98bc17bf7 +# Bulk update of strings from triple single quotes to triple double quotes +a0495fc31497798a7a833ba7406a9729e1528dd8 diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index efba4893a14..fc15ea5dfc2 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -4,22 +4,75 @@ Hi! Nice to see you here! ## QUESTIONS ? -Please see the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) for information on how to ask questions on the [mailing lists](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information) and IRC. +If you have questions about anything related to Ansible, get in touch with us! +See [Communicating with the Ansible community](https://docs.ansible.com/ansible/devel/community/communication.html) to find out how. -The GitHub issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, as the community page explains best. +The [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html) also explains how to contribute +and interact with the project, including how to submit bug reports and code to Ansible. + +Please note that the GitHub issue tracker is not the best place to ask questions for several reasons. +You'll get more helpful, and quicker, responses in the forum. ## CONTRIBUTING ? -By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable copyright license to all users and developers of the project, present and future, pursuant to the license of the project. You can also read the same [CLA](https://docs.ansible.com/ansible/latest/community/contributor_license_agreement.html) on the Ansible docsite. +By contributing to this project you agree to the [Developer Certificate of Origin (DCO)](#developer-certificate-of-origin-dco). + +The Ansible project is licensed under the [GPL-3.0](COPYING) or later. Some portions of the code fall under other licenses as noted in individual files. -Please review the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) for more information on contributing to Ansible. +The Ansible project accepts contributions through GitHub pull requests. +Please review the [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html) for more information on contributing to Ansible. ## BUG TO REPORT ? -First and foremost, also check the [Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). +First and foremost, also check the [Community Guide](https://docs.ansible.com/ansible/devel/community/index.html). -You can report bugs or make enhancement requests at the [Ansible GitHub issue page](http://github.com/ansible/ansible/issues/new/choose) by filling out the issue template that will be presented. +You can report bugs or make enhancement requests at +the [Ansible GitHub issue page](http://github.com/ansible/ansible/issues/new/choose) by filling out the issue template that will be presented. -Also please make sure you are testing on the latest released version of Ansible or the development branch; see the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details. +Also please make sure you are testing on the latest released version of Ansible or the development branch. +See the [Installation Guide](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) for details. Thanks! + +## DEVELOPER CERTIFICATE OF ORIGIN (DCO) + +This document was created by the Linux Kernel community and is a simple statement that you, as a contributor, have the legal right to make the contribution. + +```text +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 0aba372b8c3..8f4944c43c0 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -19,13 +19,14 @@ body: Also test if the latest release and devel branch are affected too. - **Tip:** If you are seeking community support, please consider - [starting a mailing list thread or chatting in IRC][ML||IRC]. + **Tip:** If you are seeking community support, please see + [Communicating with the Ansible community][communication] to + get in touch and ask questions. - [ML||IRC]: - https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--bug_report.yml#mailing-list-information + [communication]: + https://docs.ansible.com/ansible/devel/community/communication.html [issue search]: ../search?q=is%3Aissue&type=issues @@ -54,7 +55,7 @@ body: Why? - We would do it by ourselves but unfortunatelly, the curent + We would do it by ourselves but unfortunately, the current edition of GitHub Issue Forms Alpha does not support this yet 🤷 @@ -87,7 +88,7 @@ body: [collections org]: /ansible-collections - placeholder: dnf, apt, yum, pip, user etc. + placeholder: dnf, apt, pip, user etc. validations: required: true @@ -249,7 +250,7 @@ body: [ansibot help]: - /ansible/ansibullbot/blob/master/ISSUE_HELP.md#for-issue-submitters + /ansible/ansibotmini#ansibotmini - type: checkboxes @@ -258,7 +259,7 @@ body: description: | Read the [Ansible Code of Conduct][CoC] first. - [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--bug_report.yml + [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--bug_report.yml options: - label: I agree to follow the Ansible Code of Conduct required: true diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 74ec5696fdf..6aa4a2b7647 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,7 +2,7 @@ blank_issues_enabled: false # default: true contact_links: - name: 🔐 Security bug report 🔥 - url: https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser + url: https://docs.ansible.com/ansible/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser about: | Please learn how to report security vulnerabilities here. @@ -11,12 +11,12 @@ contact_links: a prompt response. For more information, see - https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html + https://docs.ansible.com/ansible/devel/community/reporting_bugs_and_features.html - name: 📝 Ansible Code of Conduct - url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser + url: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser about: ❤ Be nice to other members of the community. ☮ Behave. -- name: 💬 Talks to the community - url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information +- name: 💬 Talk to the community + url: https://docs.ansible.com/ansible/devel/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information about: Please ask and answer usage questions here - name: ⚡ Working groups url: https://github.com/ansible/community/wiki diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml index f8b81430a22..efe8d1c2035 100644 --- a/.github/ISSUE_TEMPLATE/documentation_report.yml +++ b/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -22,12 +22,14 @@ body: Also test if the latest release and devel branch are affected too. - **Tip:** If you are seeking community support, please consider - [starting a mailing list thread or chatting in IRC][ML||IRC]. + **Tip:** If you are seeking community support, please see + [Communicating with the Ansible community][communication] to + get in touch and ask questions. - [ML||IRC]: - https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--documentation_report.yml#mailing-list-information + + [communication]: + https://docs.ansible.com/ansible/devel/community/communication.html [issue search]: ../search?q=is%3Aissue&type=issues @@ -196,7 +198,7 @@ body: [ansibot help]: - /ansible/ansibullbot/blob/master/ISSUE_HELP.md#for-issue-submitters + /ansible/ansibotmini#ansibotmini - type: checkboxes @@ -205,7 +207,7 @@ body: description: | Read the [Ansible Code of Conduct][CoC] first. - [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--documentation_report.yml + [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--documentation_report.yml options: - label: I agree to follow the Ansible Code of Conduct required: true diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index f4c6034ff7b..2fce680fe64 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -21,8 +21,7 @@ body: If unsure, consider filing a [new proposal] instead outlining your use-cases, the research and implementation considerations. Then, - start a discussion on one of the public [IRC meetings] we have just - for this. + start a discussion in the [Ansible forum][forum].
@@ -44,21 +43,22 @@ body: Also test if the devel branch does not already implement this. - **Tip:** If you are seeking community support, please consider - [starting a mailing list thread or chatting in IRC][ML||IRC]. + **Tip:** If you are seeking community support, please see + [Communicating with the Ansible community][communication] to + get in touch and ask questions. [contribute to collections]: https://docs.ansible.com/ansible-core/devel/community/contributing_maintained_collections.html?utm_medium=github&utm_source=issue_form--feature_request.yml - [IRC meetings]: - https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--feature_request.yml#irc-meetings + [communication]: + https://docs.ansible.com/ansible/devel/community/communication.html [issue search]: ../search?q=is%3Aissue&type=issues - [ML||IRC]: - https://docs.ansible.com/ansible-core/devel/community/communication.html?utm_medium=github&utm_source=issue_form--feature_request.yml#mailing-list-information + [forum help]: + https://forum.ansible.com/c/help/6 [new proposal]: ../../proposals/issues/new @@ -109,7 +109,7 @@ body: Why? - We would do it by ourselves but unfortunatelly, the curent + We would do it by ourselves but unfortunately, the current edition of GitHub Issue Forms Alpha does not support this yet 🤷 @@ -139,7 +139,7 @@ body: [collections org]: /ansible-collections - placeholder: dnf, apt, yum, pip, user etc. + placeholder: dnf, apt, pip, user etc. validations: required: true @@ -176,7 +176,7 @@ body: [ansibot help]: - /ansible/ansibullbot/blob/master/ISSUE_HELP.md#for-issue-submitters + /ansible/ansibotmini#ansibotmini - type: checkboxes @@ -185,7 +185,7 @@ body: description: | Read the [Ansible Code of Conduct][CoC] first. - [CoC]: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--feature_request.yml + [CoC]: https://docs.ansible.com/ansible/devel/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--feature_request.yml options: - label: I agree to follow the Ansible Code of Conduct required: true diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 62566fa6e1e..00000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,29 +0,0 @@ -##### SUMMARY - - - - - -##### ISSUE TYPE - - - -- Bugfix Pull Request -- Docs Pull Request -- Feature Pull Request -- Test Pull Request - -##### COMPONENT NAME - - - -##### ADDITIONAL INFORMATION - - - - - - -```paste below - -``` diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 120000 index 00000000000..c8ecb720058 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1 @@ +PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE/Bug fix.md b/.github/PULL_REQUEST_TEMPLATE/Bug fix.md new file mode 100644 index 00000000000..b400b336dff --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/Bug fix.md @@ -0,0 +1,20 @@ +##### SUMMARY + + + + + +##### ISSUE TYPE + +- Bugfix Pull Request + +##### ADDITIONAL INFORMATION + + + + + + +```paste below + +``` diff --git a/.github/PULL_REQUEST_TEMPLATE/Documentation change.md b/.github/PULL_REQUEST_TEMPLATE/Documentation change.md new file mode 100644 index 00000000000..c62ff7bfc55 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/Documentation change.md @@ -0,0 +1,19 @@ +##### SUMMARY + + + + + +##### ISSUE TYPE + +- Docs Pull Request + +##### ADDITIONAL INFORMATION + + + + + +```paste below + +``` diff --git a/.github/PULL_REQUEST_TEMPLATE/New feature.md b/.github/PULL_REQUEST_TEMPLATE/New feature.md new file mode 100644 index 00000000000..9e10c45d5d4 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/New feature.md @@ -0,0 +1,19 @@ +##### SUMMARY + + + + + +##### ISSUE TYPE + +- Feature Pull Request + +##### ADDITIONAL INFORMATION + + + + + +```paste below + +``` diff --git a/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md new file mode 120000 index 00000000000..3df4f489ad7 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1 @@ +Unclear purpose or motivation.md \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE/Tests.md b/.github/PULL_REQUEST_TEMPLATE/Tests.md new file mode 100644 index 00000000000..b059793b49a --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/Tests.md @@ -0,0 +1,20 @@ +##### SUMMARY + + + + + +##### ISSUE TYPE + +- Test Pull Request + +##### ADDITIONAL INFORMATION + + + + + + +```paste below + +``` diff --git a/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md b/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md new file mode 100644 index 00000000000..33504c1d708 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/Unclear purpose or motivation.md @@ -0,0 +1,25 @@ +##### SUMMARY + + + + + +##### ISSUE TYPE + + + +- Bugfix Pull Request +- Docs Pull Request +- Feature Pull Request +- Test Pull Request + +##### ADDITIONAL INFORMATION + + + + + + +```paste below + +``` diff --git a/.github/RELEASE_NAMES.txt b/.github/RELEASE_NAMES.txt index ed2c3eeb98d..17d96a6897e 100644 --- a/.github/RELEASE_NAMES.txt +++ b/.github/RELEASE_NAMES.txt @@ -1,3 +1,6 @@ +2.19.0 What Is and What Should Never Be +2.18.0 Fool in the Rain +2.17.0 Gallows Pole 2.16.0 All My Love 2.15.0 Ten Years Gone 2.14.0 C'mon Everybody diff --git a/.gitignore b/.gitignore index 8b244f60ee7..57019fd1ab6 100644 --- a/.gitignore +++ b/.gitignore @@ -92,6 +92,8 @@ Vagrantfile /lib/ansible_base.egg-info/ # First used in the `devel` branch during Ansible 2.11 development. /lib/ansible_core.egg-info/ +# First used in the `devel` branch during Ansible 2.18 development. +/ansible_core.egg-info/ # vendored lib dir lib/ansible/_vendor/* !lib/ansible/_vendor/__init__.py diff --git a/MANIFEST.in b/MANIFEST.in index 24adcc01610..fa609f52e9a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,12 +1,10 @@ include COPYING -include bin/* include changelogs/CHANGELOG*.rst include changelogs/changelog.yaml include licenses/*.txt include requirements.txt -recursive-include docs * recursive-include packaging *.py *.j2 recursive-include test/integration * -recursive-include test/sanity *.in *.json *.py *.txt +recursive-include test/sanity *.in *.json *.py *.txt *.ini recursive-include test/support *.py *.ps1 *.psm1 *.cs *.md recursive-include test/units * diff --git a/README.md b/README.md index baf22c0e052..9685e77748d 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ [![PyPI version](https://img.shields.io/pypi/v/ansible-core.svg)](https://pypi.org/project/ansible-core) [![Docs badge](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.ansible.com/ansible/latest/) -[![Chat badge](https://img.shields.io/badge/chat-IRC-brightgreen.svg)](https://docs.ansible.com/ansible/latest/community/communication.html) +[![Chat badge](https://img.shields.io/badge/chat-IRC-brightgreen.svg)](https://docs.ansible.com/ansible/devel/community/communication.html) [![Build Status](https://dev.azure.com/ansible/ansible/_apis/build/status/CI?branchName=devel)](https://dev.azure.com/ansible/ansible/_build/latest?definitionId=20&branchName=devel) -[![Ansible Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) -[![Ansible mailing lists](https://img.shields.io/badge/mailing%20lists-Ansible-orange.svg)](https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information) +[![Ansible Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html) +[![Ansible mailing lists](https://img.shields.io/badge/mailing%20lists-Ansible-orange.svg)](https://docs.ansible.com/ansible/devel/community/communication.html#mailing-list-information) [![Repository License](https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg)](COPYING) [![Ansible CII Best Practices certification](https://bestpractices.coreinfrastructure.org/projects/2372/badge)](https://bestpractices.coreinfrastructure.org/projects/2372) @@ -40,21 +40,33 @@ features and fixes, directly. Although it is reasonably stable, you are more lik breaking changes when running the `devel` branch. We recommend getting involved in the Ansible community if you want to run the `devel` branch. -## Get Involved +## Communication -* Read [Community Information](https://docs.ansible.com/ansible/latest/community) for all +Join the Ansible forum to ask questions, get help, and interact with the +community. + +* [Get Help](https://forum.ansible.com/c/help/6): Find help or share your Ansible knowledge to help others. + Use tags to filter and subscribe to posts, such as the following: + * Posts tagged with [ansible](https://forum.ansible.com/tag/ansible) + * Posts tagged with [ansible-core](https://forum.ansible.com/tag/ansible-core) + * Posts tagged with [playbook](https://forum.ansible.com/tag/playbook) +* [Social Spaces](https://forum.ansible.com/c/chat/4): Meet and interact with fellow enthusiasts. +* [News & Announcements](https://forum.ansible.com/c/news/5): Track project-wide announcements including social events. +* [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): Get release announcements and important changes. + +For more ways to get in touch, see [Communicating with the Ansible community](https://docs.ansible.com/ansible/devel/community/communication.html). + +## Contribute to Ansible + +* Check out the [Contributor's Guide](./.github/CONTRIBUTING.md). +* Read [Community Information](https://docs.ansible.com/ansible/devel/community) for all kinds of ways to contribute to and interact with the project, - including mailing list information and how to submit bug reports and - code to Ansible. -* Join a [Working Group](https://github.com/ansible/community/wiki), - an organized community devoted to a specific technology domain or platform. + including how to submit bug reports and code to Ansible. * Submit a proposed code update through a pull request to the `devel` branch. * Talk to us before making larger changes to avoid duplicate efforts. This not only helps everyone know what is going on, but it also helps save time and effort if we decide some changes are needed. -* For a list of email lists, IRC channels and Working Groups, see the - [Communication page](https://docs.ansible.com/ansible/latest/community/communication.html) ## Coding Guidelines @@ -67,7 +79,7 @@ We document our Coding Guidelines in the [Developer Guide](https://docs.ansible. * The `devel` branch corresponds to the release actively under development. * The `stable-2.X` branches correspond to stable releases. -* Create a branch based on `devel` and set up a [dev environment](https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_general.html#common-environment-setup) if you want to open a PR. +* Create a branch based on `devel` and set up a [dev environment](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_general.html#common-environment-setup) if you want to open a PR. * See the [Ansible release and maintenance](https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html) page for information about active branches. ## Roadmap diff --git a/bin/ansible-connection b/bin/ansible-connection deleted file mode 120000 index a20affdbe6a..00000000000 --- a/bin/ansible-connection +++ /dev/null @@ -1 +0,0 @@ -../lib/ansible/cli/scripts/ansible_connection_cli_stub.py \ No newline at end of file diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 01cfedcd781..231ace8c768 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,2 +1,2 @@ -ancestor: 2.15.0 +ancestor: 2.18.0 releases: {} diff --git a/changelogs/fragments/22396-indicate-which-args-are-multi.yml b/changelogs/fragments/22396-indicate-which-args-are-multi.yml deleted file mode 100644 index eed874c23d7..00000000000 --- a/changelogs/fragments/22396-indicate-which-args-are-multi.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- CLI argument parsing - Automatically prepend to the help of CLI arguments that support being specified multiple times. - (https://github.com/ansible/ansible/issues/22396) diff --git a/changelogs/fragments/27816-fetch-unreachable.yml b/changelogs/fragments/27816-fetch-unreachable.yml deleted file mode 100644 index ac19539b9e0..00000000000 --- a/changelogs/fragments/27816-fetch-unreachable.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- fetch - Handle unreachable errors properly (https://github.com/ansible/ansible/issues/27816) diff --git a/changelogs/fragments/50603-tty-check.yaml b/changelogs/fragments/50603-tty-check.yaml deleted file mode 100644 index 9acc5351fbf..00000000000 --- a/changelogs/fragments/50603-tty-check.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -minor_changes: - - "ansible-vault create: Now raises an error when opening the editor without - tty. The flag --skip-tty-check restores previous behaviour." diff --git a/changelogs/fragments/73643-handlers-prevent-multiple-runs.yml b/changelogs/fragments/73643-handlers-prevent-multiple-runs.yml deleted file mode 100644 index 2cb132ddf96..00000000000 --- a/changelogs/fragments/73643-handlers-prevent-multiple-runs.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - Prevent running same handler multiple times when included via ``include_role`` (https://github.com/ansible/ansible/issues/73643) diff --git a/changelogs/fragments/74723-support-wildcard-win_fetch.yml b/changelogs/fragments/74723-support-wildcard-win_fetch.yml deleted file mode 100644 index e7942132827..00000000000 --- a/changelogs/fragments/74723-support-wildcard-win_fetch.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: -- win_fetch - Add support for using file with wildcards in file name. - (https://github.com/ansible/ansible/issues/73128) diff --git a/changelogs/fragments/75063-allow-dev-nul-as-skeleton-for-new-homedir.yml b/changelogs/fragments/75063-allow-dev-nul-as-skeleton-for-new-homedir.yml deleted file mode 100644 index f7e10b74536..00000000000 --- a/changelogs/fragments/75063-allow-dev-nul-as-skeleton-for-new-homedir.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - modules/user.py - Add check for valid directory when creating new user homedir (allows /dev/null as skeleton) (https://github.com/ansible/ansible/issues/75063) - diff --git a/changelogs/fragments/76372-fix-pip-virtualenv-command-parsing.yml b/changelogs/fragments/76372-fix-pip-virtualenv-command-parsing.yml deleted file mode 100644 index 96cd8b646aa..00000000000 --- a/changelogs/fragments/76372-fix-pip-virtualenv-command-parsing.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -bugfixes: - - >- - Fixed `pip` module failure in case of usage quotes for - `virtualenv_command` option for the venv command. - (https://github.com/ansible/ansible/issues/76372) diff --git a/changelogs/fragments/78487-galaxy-collections-path-warnings.yml b/changelogs/fragments/78487-galaxy-collections-path-warnings.yml deleted file mode 100644 index 4702e94f961..00000000000 --- a/changelogs/fragments/78487-galaxy-collections-path-warnings.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -minor_changes: -- >- - Add ``GALAXY_COLLECTIONS_PATH_WARNING`` option to disable the warning - given by ``ansible-galaxy collection install`` when installing a collection - to a path that isn't in the configured collection paths. diff --git a/changelogs/fragments/79364_replace.yml b/changelogs/fragments/79364_replace.yml deleted file mode 100644 index 614ff1c6646..00000000000 --- a/changelogs/fragments/79364_replace.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- replace - handle exception when bad escape character is provided in replace (https://github.com/ansible/ansible/issues/79364). diff --git a/changelogs/fragments/79677-fix-argspec-type-check.yml b/changelogs/fragments/79677-fix-argspec-type-check.yml deleted file mode 100644 index 3fe8b0f4206..00000000000 --- a/changelogs/fragments/79677-fix-argspec-type-check.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - module/role argument spec - validate the type for options that are None when the option is required or has a non-None default (https://github.com/ansible/ansible/issues/79656). diff --git a/changelogs/fragments/79734-ansible-test-change-detection.yml b/changelogs/fragments/79734-ansible-test-change-detection.yml deleted file mode 100644 index 7eb939fd1d2..00000000000 --- a/changelogs/fragments/79734-ansible-test-change-detection.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "ansible-test local change detection - use ``git merge-base HEAD`` instead of ``git merge-base --fork-point `` (https://github.com/ansible/ansible/pull/79734)." diff --git a/changelogs/fragments/79844-fix-timeout-mounts-linux.yml b/changelogs/fragments/79844-fix-timeout-mounts-linux.yml deleted file mode 100644 index 11d59705a67..00000000000 --- a/changelogs/fragments/79844-fix-timeout-mounts-linux.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - setup gather_timeout - Fix timeout in get_mounts_facts for linux. diff --git a/changelogs/fragments/80128-symbolic-modes-X-use-computed.yml b/changelogs/fragments/80128-symbolic-modes-X-use-computed.yml deleted file mode 100644 index c7f2434e8ef..00000000000 --- a/changelogs/fragments/80128-symbolic-modes-X-use-computed.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - file modules - Make symbolic modes with X use the computed permission, not original file (https://github.com/ansible/ansible/issues/80128) - - copy unit tests - Fixing "dir all perms" documentation and formatting for easier reading. diff --git a/changelogs/fragments/80258-defensive-display-non-utf8.yml b/changelogs/fragments/80258-defensive-display-non-utf8.yml deleted file mode 100644 index 5e9ed076a83..00000000000 --- a/changelogs/fragments/80258-defensive-display-non-utf8.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: -- Display - Defensively configure writing to stdout and stderr with a custom encoding error handler that will replace invalid characters - while providing a deprecation warning that non-utf8 text will result in an error in a future version. -- module responses - Ensure that module responses are utf-8 adhereing to JSON RFC and expectations of the core code. diff --git a/changelogs/fragments/80334-reduce-ansible-galaxy-api-calls.yml b/changelogs/fragments/80334-reduce-ansible-galaxy-api-calls.yml deleted file mode 100644 index c780a109618..00000000000 --- a/changelogs/fragments/80334-reduce-ansible-galaxy-api-calls.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible-galaxy - reduce API calls to servers by fetching signatures only for final candidates. diff --git a/changelogs/fragments/80406-validate-modules-semantic-markup.yml b/changelogs/fragments/80406-validate-modules-semantic-markup.yml deleted file mode 100644 index a120f6afc3f..00000000000 --- a/changelogs/fragments/80406-validate-modules-semantic-markup.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "validate-modules sanity test - replace semantic markup parsing and validating code with the code from `antsibull-docs-parser 0.2.0 `__ (https://github.com/ansible/ansible/pull/80406)." diff --git a/changelogs/fragments/80449-fix-symbolic-mode-error-msg.yml b/changelogs/fragments/80449-fix-symbolic-mode-error-msg.yml deleted file mode 100644 index b760774ef44..00000000000 --- a/changelogs/fragments/80449-fix-symbolic-mode-error-msg.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - file modules - fix validating invalid symbolic modes. diff --git a/changelogs/fragments/80460-add-symbolic-links-with-dir.yml b/changelogs/fragments/80460-add-symbolic-links-with-dir.yml deleted file mode 100644 index 97d93d91529..00000000000 --- a/changelogs/fragments/80460-add-symbolic-links-with-dir.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -minor_changes: - - include_vars - os.walk now follows symbolic links when traversing directories (https://github.com/ansible/ansible/pull/80460) diff --git a/changelogs/fragments/80476-fix-loop-task-post-validation.yml b/changelogs/fragments/80476-fix-loop-task-post-validation.yml deleted file mode 100644 index ec2a33b1fc2..00000000000 --- a/changelogs/fragments/80476-fix-loop-task-post-validation.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - Fix post-validating looped task fields so the strategy uses the correct values after task execution. diff --git a/changelogs/fragments/80506-syntax-check-playbook-only.yml b/changelogs/fragments/80506-syntax-check-playbook-only.yml deleted file mode 100644 index ed0f1b0bc75..00000000000 --- a/changelogs/fragments/80506-syntax-check-playbook-only.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: -- syntax check - Limit ``--syntax-check`` to ``ansible-playbook`` only, as that is the only CLI affected by this argument - (https://github.com/ansible/ansible/issues/80506) diff --git a/changelogs/fragments/80520-fix-current-hostname-openbsd.yml b/changelogs/fragments/80520-fix-current-hostname-openbsd.yml deleted file mode 100644 index 382797c2419..00000000000 --- a/changelogs/fragments/80520-fix-current-hostname-openbsd.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - The ``hostname`` module now also updates both current and permanent hostname on OpenBSD. Before it only updated the permanent hostname (https://github.com/ansible/ansible/issues/80520). diff --git a/changelogs/fragments/80605-template-overlay-native-jinja.yml b/changelogs/fragments/80605-template-overlay-native-jinja.yml deleted file mode 100644 index 75ed97170ce..00000000000 --- a/changelogs/fragments/80605-template-overlay-native-jinja.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "Properly disable ``jinja2_native`` in the template module when jinja2 override is used in the template (https://github.com/ansible/ansible/issues/80605)" diff --git a/changelogs/fragments/80648-fix-ansible-galaxy-cache-signatures-bug.yml b/changelogs/fragments/80648-fix-ansible-galaxy-cache-signatures-bug.yml deleted file mode 100644 index eda4eb62f93..00000000000 --- a/changelogs/fragments/80648-fix-ansible-galaxy-cache-signatures-bug.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - ansible-galaxy - fix installing signed collections (https://github.com/ansible/ansible/issues/80648). - - ansible-galaxy collection verify - fix verifying signed collections when the keyring is not configured. diff --git a/changelogs/fragments/80721-ansible-galaxy.yml b/changelogs/fragments/80721-ansible-galaxy.yml deleted file mode 100644 index d71d8f707bb..00000000000 --- a/changelogs/fragments/80721-ansible-galaxy.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- ansible-galaxy - Remove internal configuration argument ``v3`` (https://github.com/ansible/ansible/pull/80721) diff --git a/changelogs/fragments/80738-abs-unarachive-src.yml b/changelogs/fragments/80738-abs-unarachive-src.yml deleted file mode 100644 index f90c0356738..00000000000 --- a/changelogs/fragments/80738-abs-unarachive-src.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - unarchive - fix unarchiving sources that are copied to the remote node using a relative temporory directory path (https://github.com/ansible/ansible/issues/80710). diff --git a/changelogs/fragments/80880-register-handlers-immediately-if-iterating-handlers.yml b/changelogs/fragments/80880-register-handlers-immediately-if-iterating-handlers.yml deleted file mode 100644 index bc8d9de8bbe..00000000000 --- a/changelogs/fragments/80880-register-handlers-immediately-if-iterating-handlers.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: -- "From issue https://github.com/ansible/ansible/issues/80880, when notifying a - handler from another handler, handler notifications must be registered - immediately as the flush_handler call is not recursive." diff --git a/changelogs/fragments/80887-dnf5-api-change.yml b/changelogs/fragments/80887-dnf5-api-change.yml deleted file mode 100644 index c27d79d252e..00000000000 --- a/changelogs/fragments/80887-dnf5-api-change.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: -- dnf5 - Update dnf5 module to handle API change for setting the download directory - (https://github.com/ansible/ansible/issues/80887) diff --git a/changelogs/fragments/80943-ansible-galaxy-collection-subdir-install.yml b/changelogs/fragments/80943-ansible-galaxy-collection-subdir-install.yml deleted file mode 100644 index b1ffb02b201..00000000000 --- a/changelogs/fragments/80943-ansible-galaxy-collection-subdir-install.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: -- ansible-galaxy - Fix variable type error when installing subdir collections - (https://github.com/ansible/ansible/issues/80943) diff --git a/changelogs/fragments/80968-replace-deprecated-ast-attr.yml b/changelogs/fragments/80968-replace-deprecated-ast-attr.yml deleted file mode 100644 index 13100ded3d1..00000000000 --- a/changelogs/fragments/80968-replace-deprecated-ast-attr.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - Fix ``ast`` deprecation warnings for ``Str`` and ``value.s`` when using Python 3.12. diff --git a/changelogs/fragments/80985-fix-smgl-family-mapping.yml b/changelogs/fragments/80985-fix-smgl-family-mapping.yml deleted file mode 100644 index 9cf12c471df..00000000000 --- a/changelogs/fragments/80985-fix-smgl-family-mapping.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - distribution facts - fix Source Mage family mapping diff --git a/changelogs/fragments/81005-use-overlay-overrides.yml b/changelogs/fragments/81005-use-overlay-overrides.yml deleted file mode 100644 index 149abf2f9d3..00000000000 --- a/changelogs/fragments/81005-use-overlay-overrides.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- templating - In the template action and lookup, use local jinja2 environment overlay overrides instead of mutating the templars environment diff --git a/changelogs/fragments/81029-connection-types.yml b/changelogs/fragments/81029-connection-types.yml deleted file mode 100644 index 14466ba3729..00000000000 --- a/changelogs/fragments/81029-connection-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "paramiko_ssh, psrp, and ssh connection plugins - ensure that all values for options that should be strings are actually converted to strings (https://github.com/ansible/ansible/pull/81029)." diff --git a/changelogs/fragments/81064-daemonize-fixes.yml b/changelogs/fragments/81064-daemonize-fixes.yml deleted file mode 100644 index 06f2af3d1e6..00000000000 --- a/changelogs/fragments/81064-daemonize-fixes.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "``ansible.module_utils.service`` - fix inter-process communication in ``daemonize()``" diff --git a/changelogs/fragments/81082-deprecated-importlib-abc.yml b/changelogs/fragments/81082-deprecated-importlib-abc.yml deleted file mode 100644 index 6dfd90a16be..00000000000 --- a/changelogs/fragments/81082-deprecated-importlib-abc.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -minor_changes: - - Use ``importlib.resources.abc.TraversableResources`` instead of deprecated - ``importlib.abc.TraversableResources`` where available - (https:/github.com/ansible/ansible/pull/81082). diff --git a/changelogs/fragments/81104-inventory-script-plugin-raise-execution-error.yml b/changelogs/fragments/81104-inventory-script-plugin-raise-execution-error.yml deleted file mode 100644 index 924d314fae8..00000000000 --- a/changelogs/fragments/81104-inventory-script-plugin-raise-execution-error.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- Inventory scripts parser not treat exception when getting hostsvar (https://github.com/ansible/ansible/issues/81103) diff --git a/changelogs/fragments/81319-cloudstack-test-container-bump-version.yml b/changelogs/fragments/81319-cloudstack-test-container-bump-version.yml deleted file mode 100644 index 564b7d44513..00000000000 --- a/changelogs/fragments/81319-cloudstack-test-container-bump-version.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Updated the CloudStack test container to version 1.6.1. diff --git a/changelogs/fragments/83642-fix-sanity-ignore-for-uri.yml b/changelogs/fragments/83642-fix-sanity-ignore-for-uri.yml new file mode 100644 index 00000000000..14ff7a0723e --- /dev/null +++ b/changelogs/fragments/83642-fix-sanity-ignore-for-uri.yml @@ -0,0 +1,2 @@ +bugfixes: + - uri - mark ``url`` as required (https://github.com/ansible/ansible/pull/83642). diff --git a/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml b/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml new file mode 100644 index 00000000000..07d6312cb4d --- /dev/null +++ b/changelogs/fragments/83643-fix-sanity-ignore-for-copy.yml @@ -0,0 +1,3 @@ +minor_changes: + - copy - parameter ``local_follow`` was incorrectly documented as having default value ``True`` (https://github.com/ansible/ansible/pull/83643). + - copy - fix sanity test failures (https://github.com/ansible/ansible/pull/83643). diff --git a/changelogs/fragments/83690-get_url-content-disposition-filename.yml b/changelogs/fragments/83690-get_url-content-disposition-filename.yml new file mode 100644 index 00000000000..47f9734c35e --- /dev/null +++ b/changelogs/fragments/83690-get_url-content-disposition-filename.yml @@ -0,0 +1,2 @@ +bugfixes: + - get_url - fix honoring ``filename`` from the ``content-disposition`` header even when the type is ``inline`` (https://github.com/ansible/ansible/issues/83690) diff --git a/changelogs/fragments/83700-enable-file-disable-diff.yml b/changelogs/fragments/83700-enable-file-disable-diff.yml new file mode 100644 index 00000000000..4fdc9feb4c7 --- /dev/null +++ b/changelogs/fragments/83700-enable-file-disable-diff.yml @@ -0,0 +1,2 @@ +minor_changes: + - file - enable file module to disable diff_mode (https://github.com/ansible/ansible/issues/80817). diff --git a/changelogs/fragments/83965-action-groups-schema.yml b/changelogs/fragments/83965-action-groups-schema.yml new file mode 100644 index 00000000000..cd4a439044d --- /dev/null +++ b/changelogs/fragments/83965-action-groups-schema.yml @@ -0,0 +1,2 @@ +minor_changes: + - "runtime-metadata sanity test - improve validation of ``action_groups`` (https://github.com/ansible/ansible/pull/83965)." diff --git a/changelogs/fragments/84008-additional-logging.yml b/changelogs/fragments/84008-additional-logging.yml new file mode 100644 index 00000000000..80bd3a7ddd9 --- /dev/null +++ b/changelogs/fragments/84008-additional-logging.yml @@ -0,0 +1,3 @@ +minor_changes: + - Added a -vvvvv log message indicating when a host fails to produce output within the timeout period. + - SSH Escalation-related -vvv log messages now include the associated host information. diff --git a/changelogs/fragments/84019-ignore_unreachable-loop.yml b/changelogs/fragments/84019-ignore_unreachable-loop.yml new file mode 100644 index 00000000000..da85af7e4b5 --- /dev/null +++ b/changelogs/fragments/84019-ignore_unreachable-loop.yml @@ -0,0 +1,2 @@ +bugfixes: + - Fix returning 'unreachable' for the overall task result. This prevents false positives when a looped task has unignored unreachable items (https://github.com/ansible/ansible/issues/84019). diff --git a/changelogs/fragments/84149-add-flush-cache-for-adhoc-commands.yml b/changelogs/fragments/84149-add-flush-cache-for-adhoc-commands.yml new file mode 100644 index 00000000000..854d2628b64 --- /dev/null +++ b/changelogs/fragments/84149-add-flush-cache-for-adhoc-commands.yml @@ -0,0 +1,3 @@ +minor_changes: +- > + ansible, ansible-console, ansible-pull - add --flush-cache option (https://github.com/ansible/ansible/issues/83749). diff --git a/changelogs/fragments/84229-windows-server-2025.yml b/changelogs/fragments/84229-windows-server-2025.yml new file mode 100644 index 00000000000..82c16371a34 --- /dev/null +++ b/changelogs/fragments/84229-windows-server-2025.yml @@ -0,0 +1,4 @@ +minor_changes: + - >- + Windows - Add support for Windows Server 2025 to Ansible and as an ``ansible-test`` + remote target - https://github.com/ansible/ansible/issues/84229 diff --git a/changelogs/fragments/CleansingNodeVisitor-removal.yml b/changelogs/fragments/CleansingNodeVisitor-removal.yml deleted file mode 100644 index 5214e572e97..00000000000 --- a/changelogs/fragments/CleansingNodeVisitor-removal.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - Remove the ``CleansingNodeVisitor`` class and its usage due to the templating changes that made it superfluous. Also simplify the ``Conditional`` class. diff --git a/changelogs/fragments/a-g-col-install-directory-with-trailing-sep.yml b/changelogs/fragments/a-g-col-install-directory-with-trailing-sep.yml deleted file mode 100644 index be766414ed0..00000000000 --- a/changelogs/fragments/a-g-col-install-directory-with-trailing-sep.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible-galaxy - fix installing collections from directories that have a trailing path separator (https://github.com/ansible/ansible/issues/77803). diff --git a/changelogs/fragments/a-g-col-prevent-reinstalling-satisfied-req.yml b/changelogs/fragments/a-g-col-prevent-reinstalling-satisfied-req.yml deleted file mode 100644 index 9ac32c5d970..00000000000 --- a/changelogs/fragments/a-g-col-prevent-reinstalling-satisfied-req.yml +++ /dev/null @@ -1,7 +0,0 @@ -bugfixes: -- >- - ``ansible-galaxy`` now considers all collection paths when identifying which collection requirements are already installed. - Use the ``COLLECTIONS_PATHS`` and ``COLLECTIONS_SCAN_SYS_PATHS`` config options to modify these. - Previously only the install path was considered when resolving the candidates. - The install path will remain the only one potentially modified. - (https://github.com/ansible/ansible/issues/79767, https://github.com/ansible/ansible/issues/81163) diff --git a/changelogs/fragments/ansible-galaxy-server-timeout.yml b/changelogs/fragments/ansible-galaxy-server-timeout.yml deleted file mode 100644 index 77b19ada990..00000000000 --- a/changelogs/fragments/ansible-galaxy-server-timeout.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - Add a general ``GALAXY_SERVER_TIMEOUT`` config option for distribution servers (https://github.com/ansible/ansible/issues/79833). diff --git a/changelogs/fragments/ansible-runtime-metadata-removal-date.yml b/changelogs/fragments/ansible-runtime-metadata-removal-date.yml deleted file mode 100644 index d60608e9543..00000000000 --- a/changelogs/fragments/ansible-runtime-metadata-removal-date.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - Record ``removal_date`` in runtime metadata as a string instead of a date. diff --git a/changelogs/fragments/ansible-test-added-fedora-38.yml b/changelogs/fragments/ansible-test-added-fedora-38.yml deleted file mode 100644 index 9bb094be4d6..00000000000 --- a/changelogs/fragments/ansible-test-added-fedora-38.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - ansible-test - Add Fedora 38 remote. - - ansible-test - Add Fedora 38 container. diff --git a/changelogs/fragments/ansible-test-argcomplete-3.yml b/changelogs/fragments/ansible-test-argcomplete-3.yml deleted file mode 100644 index 0c0b01b5d06..00000000000 --- a/changelogs/fragments/ansible-test-argcomplete-3.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible-test - Add support for ``argcomplete`` version 3. diff --git a/changelogs/fragments/ansible-test-atexit.yml b/changelogs/fragments/ansible-test-atexit.yml deleted file mode 100644 index e531da47503..00000000000 --- a/changelogs/fragments/ansible-test-atexit.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Use a context manager to perform cleanup at exit instead of using the built-in ``atexit`` module. diff --git a/changelogs/fragments/ansible-test-default-containers.yml b/changelogs/fragments/ansible-test-default-containers.yml deleted file mode 100644 index e093c5a129a..00000000000 --- a/changelogs/fragments/ansible-test-default-containers.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Update the ``default`` containers. diff --git a/changelogs/fragments/ansible-test-deprecated-cleanup.yml b/changelogs/fragments/ansible-test-deprecated-cleanup.yml deleted file mode 100644 index 4f118b8cb8d..00000000000 --- a/changelogs/fragments/ansible-test-deprecated-cleanup.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - ansible-test - Removed the deprecated ``--docker-no-pull`` option. - - ansible-test - Removed the deprecated ``--no-pip-check`` option. - - ansible-test - Removed the deprecated ``foreman`` test plugin. - - ansible-test - Removed the deprecated ``govcsim`` support from the ``vcenter`` test plugin. diff --git a/changelogs/fragments/ansible-test-distro-containers.yml b/changelogs/fragments/ansible-test-distro-containers.yml deleted file mode 100644 index b60fdab541c..00000000000 --- a/changelogs/fragments/ansible-test-distro-containers.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - The openSUSE test container has been updated to openSUSE Leap 15.5. diff --git a/changelogs/fragments/ansible-test-explain-traceback.yml b/changelogs/fragments/ansible-test-explain-traceback.yml deleted file mode 100644 index 09938fa6968..00000000000 --- a/changelogs/fragments/ansible-test-explain-traceback.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - ansible-test - Fix several possible tracebacks when using the ``-e`` option with sanity tests. - - ansible-test - Remove redundant warning about missing programs before attempting to execute them. diff --git a/changelogs/fragments/ansible-test-fedora-37.yml b/changelogs/fragments/ansible-test-fedora-37.yml deleted file mode 100644 index 1f35a8c9164..00000000000 --- a/changelogs/fragments/ansible-test-fedora-37.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - ansible-test - Remove Fedora 37 remote support. - - ansible-test - Remove Fedora 37 test container. diff --git a/changelogs/fragments/ansible-test-fix-command-traceback.yml b/changelogs/fragments/ansible-test-fix-command-traceback.yml new file mode 100644 index 00000000000..d43294006f9 --- /dev/null +++ b/changelogs/fragments/ansible-test-fix-command-traceback.yml @@ -0,0 +1,2 @@ +bugfixes: + - ansible-test - Fix traceback that occurs after an interactive command fails. diff --git a/changelogs/fragments/ansible-test-freebsd-bootstrap-setuptools.yml b/changelogs/fragments/ansible-test-freebsd-bootstrap-setuptools.yml deleted file mode 100644 index bbf280fd4c1..00000000000 --- a/changelogs/fragments/ansible-test-freebsd-bootstrap-setuptools.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible-test - When bootstrapping remote FreeBSD instances, use the OS packaged ``setuptools`` instead of installing the latest version from PyPI. diff --git a/changelogs/fragments/ansible-test-import-sanity-fix.yml b/changelogs/fragments/ansible-test-import-sanity-fix.yml deleted file mode 100644 index bb8c2823d8b..00000000000 --- a/changelogs/fragments/ansible-test-import-sanity-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible-test - The ``import`` sanity test now checks the collection loader for remote-only Python support when testing ansible-core. diff --git a/changelogs/fragments/ansible-test-long-timeout-fix.yml b/changelogs/fragments/ansible-test-long-timeout-fix.yml deleted file mode 100644 index 1fdf2c09fe8..00000000000 --- a/changelogs/fragments/ansible-test-long-timeout-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible-test - Fix handling of timeouts exceeding one day. diff --git a/changelogs/fragments/ansible-test-minimum-setuptools.yml b/changelogs/fragments/ansible-test-minimum-setuptools.yml deleted file mode 100644 index f989b760724..00000000000 --- a/changelogs/fragments/ansible-test-minimum-setuptools.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - The minimum required ``setuptools`` version is now 66.1.0, as it is the oldest version to support Python 3.12. diff --git a/changelogs/fragments/ansible-test-nios-container.yml b/changelogs/fragments/ansible-test-nios-container.yml index 634e0db3015..f4b2a99acdd 100644 --- a/changelogs/fragments/ansible-test-nios-container.yml +++ b/changelogs/fragments/ansible-test-nios-container.yml @@ -1,2 +1,2 @@ minor_changes: - - ansible-test - Update the ``nios-test-container`` to version 2.0.0, which supports API version 2.9. + - ansible-test - Update ``nios-test-container`` to version 6.0.0. diff --git a/changelogs/fragments/ansible-test-probe-error-handling.yml b/changelogs/fragments/ansible-test-probe-error-handling.yml new file mode 100644 index 00000000000..bf4301cc48b --- /dev/null +++ b/changelogs/fragments/ansible-test-probe-error-handling.yml @@ -0,0 +1,3 @@ +minor_changes: + - ansible-test - Improve container runtime probe error handling. + When unexpected probe output is encountered, an error with more useful debugging information is provided. diff --git a/changelogs/fragments/ansible-test-pylint-fix.yml b/changelogs/fragments/ansible-test-pylint-fix.yml new file mode 100644 index 00000000000..877a5944967 --- /dev/null +++ b/changelogs/fragments/ansible-test-pylint-fix.yml @@ -0,0 +1,4 @@ +bugfixes: + - ansible-test - Enable the ``sys.unraisablehook`` work-around for the ``pylint`` sanity test on Python 3.11. + Previously the work-around was only enabled for Python 3.12 and later. + However, the same issue has been discovered on Python 3.11. diff --git a/changelogs/fragments/ansible-test-pylint-update.yml b/changelogs/fragments/ansible-test-pylint-update.yml deleted file mode 100644 index a52dc240470..00000000000 --- a/changelogs/fragments/ansible-test-pylint-update.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - ansible-test - Update ``pylint`` to 2.17.2 to resolve several possible false positives. - - ansible-test - Update ``pylint`` to 2.17.3 to resolve several possible false positives. diff --git a/changelogs/fragments/ansible-test-pytest-forked.yml b/changelogs/fragments/ansible-test-pytest-forked.yml deleted file mode 100644 index f8fae813946..00000000000 --- a/changelogs/fragments/ansible-test-pytest-forked.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - ansible-test - Replace the ``pytest-forked`` pytest plugin with a custom plugin. -bugfixes: - - ansible-test - Unit tests now report warnings generated during test runs. - Previously only warnings generated during test collection were reported. diff --git a/changelogs/fragments/ansible-test-python-3.12.yml b/changelogs/fragments/ansible-test-python-3.12.yml deleted file mode 100644 index 7a8b3592d9e..00000000000 --- a/changelogs/fragments/ansible-test-python-3.12.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - ansible-test - Add support for testing with Python 3.12. - - ansible-test - Update pip to ``23.1.2`` and setuptools to ``67.7.2``. - - Add ``python3.12`` to the default ``INTERPRETER_PYTHON_FALLBACK`` list. diff --git a/changelogs/fragments/ansible-test-pyyaml-build.yml b/changelogs/fragments/ansible-test-pyyaml-build.yml deleted file mode 100644 index 5e971b2a515..00000000000 --- a/changelogs/fragments/ansible-test-pyyaml-build.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible-test - Pre-build a PyYAML wheel before installing requirements to avoid a potential Cython build failure. diff --git a/changelogs/fragments/ansible-test-remotes.yml b/changelogs/fragments/ansible-test-remotes.yml new file mode 100644 index 00000000000..cf3c832c8e8 --- /dev/null +++ b/changelogs/fragments/ansible-test-remotes.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-test - Replace remote FreeBSD 13.3 with 13.4. diff --git a/changelogs/fragments/ansible-test-remove-old-rhel-remotes.yml b/changelogs/fragments/ansible-test-remove-old-rhel-remotes.yml deleted file mode 100644 index d5f7b64b20a..00000000000 --- a/changelogs/fragments/ansible-test-remove-old-rhel-remotes.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "ansible-test - Removed test remotes: rhel/8.7, rhel/9.1" diff --git a/changelogs/fragments/ansible-test-remove-ubuntu-2004.yml b/changelogs/fragments/ansible-test-remove-ubuntu-2004.yml deleted file mode 100644 index b743db9894f..00000000000 --- a/changelogs/fragments/ansible-test-remove-ubuntu-2004.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Removed Ubuntu 20.04 LTS image from the `--remote` option. diff --git a/changelogs/fragments/ansible-test-rhel-9.2-python-3.11.yml b/changelogs/fragments/ansible-test-rhel-9.2-python-3.11.yml deleted file mode 100644 index 717b56d9978..00000000000 --- a/changelogs/fragments/ansible-test-rhel-9.2-python-3.11.yml +++ /dev/null @@ -1,5 +0,0 @@ -minor_changes: - - ansible-test - Add support for RHEL 8.8 remotes. - - ansible-test - RHEL 8.8 provisioning can now be used with the ``--python 3.11`` option. - - ansible-test - RHEL 9.2 provisioning can now be used with the ``--python 3.11`` option. - - ansible-test - Remove Python 3.8 and 3.9 from RHEL 8.8. diff --git a/changelogs/fragments/ansible-test-rhel-9.2.yml b/changelogs/fragments/ansible-test-rhel-9.2.yml deleted file mode 100644 index 5720e3ddd1d..00000000000 --- a/changelogs/fragments/ansible-test-rhel-9.2.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Add support for RHEL 9.2 remotes. diff --git a/changelogs/fragments/ansible-test-sanity-scope.yml b/changelogs/fragments/ansible-test-sanity-scope.yml deleted file mode 100644 index 56e50714d6b..00000000000 --- a/changelogs/fragments/ansible-test-sanity-scope.yml +++ /dev/null @@ -1,7 +0,0 @@ -minor_changes: - - ansible-test - The ``replace-urlopen`` sanity test is now limited to plugins in collections. - Previously any Python file in a collection was checked for ``urlopen`` usage. - - ansible-test - The ``use-compat-six`` sanity test is now limited to plugins in collections. - Previously any Python file in a collection was checked for ``six`` usage. - - ansible-test - The ``no-get-exception`` sanity test is now limited to plugins in collections. - Previously any Python file in a collection was checked for ``get_exception`` usage. diff --git a/changelogs/fragments/ansible-test-source-detection.yml b/changelogs/fragments/ansible-test-source-detection.yml deleted file mode 100644 index 0fd97380d1f..00000000000 --- a/changelogs/fragments/ansible-test-source-detection.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - ansible-test - Fix a traceback that occurs when attempting to test Ansible source using a different ansible-test. - A clear error message is now given when this scenario occurs. diff --git a/changelogs/fragments/ansible-test-timeout-fix.yml b/changelogs/fragments/ansible-test-timeout-fix.yml deleted file mode 100644 index 046d5b46d34..00000000000 --- a/changelogs/fragments/ansible-test-timeout-fix.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - ansible-test - Fix various cases where the test timeout could expire without terminating the tests. -minor_changes: - - ansible-test - Refactored ``env`` command logic and timeout handling. - - ansible-test - Allow float values for the ``--timeout`` option to the ``env`` command. This simplifies testing. diff --git a/changelogs/fragments/ansible-test-unique-container-names.yml b/changelogs/fragments/ansible-test-unique-container-names.yml deleted file mode 100644 index 560090d1aaf..00000000000 --- a/changelogs/fragments/ansible-test-unique-container-names.yml +++ /dev/null @@ -1,6 +0,0 @@ -bugfixes: - - ansible-test - All containers created by ansible-test now include the current test session ID in their name. - This avoids conflicts between concurrent ansible-test invocations using the same container host. -breaking_changes: - - ansible-test - Test plugins that rely on containers no longer support reusing running containers. - The previous behavior was an undocumented, untested feature. diff --git a/changelogs/fragments/ansible-test-update.yml b/changelogs/fragments/ansible-test-update.yml new file mode 100644 index 00000000000..8431887dedb --- /dev/null +++ b/changelogs/fragments/ansible-test-update.yml @@ -0,0 +1,5 @@ +minor_changes: + - ansible-test - Update ``pylint`` sanity test to use version 3.3.1. + - ansible-test - Default to Python 3.13 in the ``base`` and ``default`` containers. + - ansible-test - Disable the ``deprecated-`` prefixed ``pylint`` rules as their results vary by Python version. + - ansible-test - Update the ``base`` and ``default`` containers. diff --git a/changelogs/fragments/ansible-test-use-raise-from.yml b/changelogs/fragments/ansible-test-use-raise-from.yml deleted file mode 100644 index 85716226e4f..00000000000 --- a/changelogs/fragments/ansible-test-use-raise-from.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible-test - Use ``raise ... from ...`` when raising exceptions from within an exception handler. diff --git a/changelogs/fragments/ansible-test-utcnow.yml b/changelogs/fragments/ansible-test-utcnow.yml deleted file mode 100644 index 0781a0cb48a..00000000000 --- a/changelogs/fragments/ansible-test-utcnow.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Use ``datetime.datetime.now`` with ``tz`` specified instead of ``datetime.datetime.utcnow``. diff --git a/changelogs/fragments/ansible-test-winrm-config.yml b/changelogs/fragments/ansible-test-winrm-config.yml deleted file mode 100644 index d974800d631..00000000000 --- a/changelogs/fragments/ansible-test-winrm-config.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Remove obsolete embedded script for configuring WinRM on Windows remotes. diff --git a/changelogs/fragments/ansible_test_alpine_3.18.yml b/changelogs/fragments/ansible_test_alpine_3.18.yml deleted file mode 100644 index b4220baecd9..00000000000 --- a/changelogs/fragments/ansible_test_alpine_3.18.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Add Alpine 3.18 to remotes diff --git a/changelogs/fragments/aptclean_diff.yml b/changelogs/fragments/aptclean_diff.yml deleted file mode 100644 index cf59747cdfd..00000000000 --- a/changelogs/fragments/aptclean_diff.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - apt - return calculated diff while running apt clean operation. diff --git a/changelogs/fragments/become-runas-system-deux.yml b/changelogs/fragments/become-runas-system-deux.yml new file mode 100644 index 00000000000..e8b17f92a4c --- /dev/null +++ b/changelogs/fragments/become-runas-system-deux.yml @@ -0,0 +1,3 @@ +bugfixes: + - >- + runas become - Fix up become logic to still get the SYSTEM token with the most privileges when running as SYSTEM. diff --git a/changelogs/fragments/buildroot.yml b/changelogs/fragments/buildroot.yml new file mode 100644 index 00000000000..18acd5438e0 --- /dev/null +++ b/changelogs/fragments/buildroot.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - user - Create Buildroot subclass as alias to Busybox (https://github.com/ansible/ansible/issues/83665). diff --git a/changelogs/fragments/ci_freebsd_new.yml b/changelogs/fragments/ci_freebsd_new.yml deleted file mode 100644 index fff1a24229b..00000000000 --- a/changelogs/fragments/ci_freebsd_new.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Add FreeBSD 13.2 remote. diff --git a/changelogs/fragments/collections_paths-deprecation.yml b/changelogs/fragments/collections_paths-deprecation.yml deleted file mode 100644 index a0336916ad4..00000000000 --- a/changelogs/fragments/collections_paths-deprecation.yml +++ /dev/null @@ -1,3 +0,0 @@ -deprecated_features: -- Deprecated the env var ``ANSIBLE_COLLECTIONS_PATHS``, use the singular form ``ANSIBLE_COLLECTIONS_PATH`` instead -- Deprecated ini config option ``collections_paths``, use the singular form ``collections_path`` instead diff --git a/changelogs/fragments/colors.yml b/changelogs/fragments/colors.yml deleted file mode 100644 index 250a9b1982d..00000000000 --- a/changelogs/fragments/colors.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - ansible terminal color settings were incorrectly limited to 16 options via 'choices', removing so all 256 can be accessed. diff --git a/changelogs/fragments/command-expand-args.yml b/changelogs/fragments/command-expand-args.yml deleted file mode 100644 index 9ecd7048f39..00000000000 --- a/changelogs/fragments/command-expand-args.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- command - Add option ``expand_argument_vars`` to disable argument expansion and use literal values - https://github.com/ansible/ansible/issues/54162 diff --git a/changelogs/fragments/compat_removal.yml b/changelogs/fragments/compat_removal.yml new file mode 100644 index 00000000000..86da5d9933a --- /dev/null +++ b/changelogs/fragments/compat_removal.yml @@ -0,0 +1,3 @@ +--- +removed_features: + - removed deprecated pycompat24 and compat.importlib. diff --git a/changelogs/fragments/config.yml b/changelogs/fragments/config.yml new file mode 100644 index 00000000000..e7b7d6f808a --- /dev/null +++ b/changelogs/fragments/config.yml @@ -0,0 +1,3 @@ +--- +removed_features: + - Remove deprecated plural form of collection path (https://github.com/ansible/ansible/pull/84156). diff --git a/changelogs/fragments/config_origins_option.yml b/changelogs/fragments/config_origins_option.yml deleted file mode 100644 index ac9263ccf6c..00000000000 --- a/changelogs/fragments/config_origins_option.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - config lookup new option show_origin to also return the origin of a configuration value. diff --git a/changelogs/fragments/connection-type-annotation.yml b/changelogs/fragments/connection-type-annotation.yml deleted file mode 100644 index fabd25b5868..00000000000 --- a/changelogs/fragments/connection-type-annotation.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- Added Python type annotation to connection plugins diff --git a/changelogs/fragments/cron_err.yml b/changelogs/fragments/cron_err.yml new file mode 100644 index 00000000000..5e65a7b68ec --- /dev/null +++ b/changelogs/fragments/cron_err.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - cron - Provide additional error information while writing cron file (https://github.com/ansible/ansible/issues/83223). diff --git a/changelogs/fragments/cve-2024-8775.yml b/changelogs/fragments/cve-2024-8775.yml new file mode 100644 index 00000000000..a292c997044 --- /dev/null +++ b/changelogs/fragments/cve-2024-8775.yml @@ -0,0 +1,5 @@ +security_fixes: + - task result processing - Ensure that action-sourced result masking (``_ansible_no_log=True``) + is preserved. (CVE-2024-8775) + - include_vars action - Ensure that result masking is correctly requested when vault-encrypted + files are read. (CVE-2024-8775) diff --git a/changelogs/fragments/deb822_open_url.yml b/changelogs/fragments/deb822_open_url.yml deleted file mode 100644 index 222268aad26..00000000000 --- a/changelogs/fragments/deb822_open_url.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- deb822_repository - use http-agent for receiving content (https://github.com/ansible/ansible/issues/80809). diff --git a/changelogs/fragments/debconf_empty_password.yml b/changelogs/fragments/debconf_empty_password.yml new file mode 100644 index 00000000000..473dc53e0d5 --- /dev/null +++ b/changelogs/fragments/debconf_empty_password.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - debconf - set empty password values (https://github.com/ansible/ansible/issues/83214). diff --git a/changelogs/fragments/deprecated.yml b/changelogs/fragments/deprecated.yml new file mode 100644 index 00000000000..aa632c0487d --- /dev/null +++ b/changelogs/fragments/deprecated.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - docs - add collection name in message from which the module is being deprecated (https://github.com/ansible/ansible/issues/84116). diff --git a/changelogs/fragments/display_proxy.yml b/changelogs/fragments/display_proxy.yml deleted file mode 100644 index 9bd9252a9cf..00000000000 --- a/changelogs/fragments/display_proxy.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: - - display methods for warning and deprecation are now proxied to main process when issued from a fork. - This allows for the deduplication of warnings and deprecations to work globally. diff --git a/changelogs/fragments/dnf5-cacheonly.yml b/changelogs/fragments/dnf5-cacheonly.yml deleted file mode 100644 index b7e2d753fb2..00000000000 --- a/changelogs/fragments/dnf5-cacheonly.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - dnf5 - enable now implemented ``cacheonly`` functionality diff --git a/changelogs/fragments/dnf5-fix-interpreter-fail-msg.yml b/changelogs/fragments/dnf5-fix-interpreter-fail-msg.yml deleted file mode 100644 index d6db8c39500..00000000000 --- a/changelogs/fragments/dnf5-fix-interpreter-fail-msg.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - dnf5 - fix module and package names in the message following failed module respawn attempt diff --git a/changelogs/fragments/dnf5-gpg-check-api.yml b/changelogs/fragments/dnf5-gpg-check-api.yml deleted file mode 100644 index c2b2ac6f057..00000000000 --- a/changelogs/fragments/dnf5-gpg-check-api.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - dnf5 - Use ``transaction.check_gpg_signatures`` API call to check package signatures AND possibly to recover from when keys are missing. diff --git a/changelogs/fragments/dnf5-gpg-check-builtin.yml b/changelogs/fragments/dnf5-gpg-check-builtin.yml deleted file mode 100644 index 504f2348668..00000000000 --- a/changelogs/fragments/dnf5-gpg-check-builtin.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - Utilize gpg check provided internally by the ``transaction.run`` method as oppose to calling it manually. diff --git a/changelogs/fragments/dnf5-logs-api.yml b/changelogs/fragments/dnf5-logs-api.yml deleted file mode 100644 index 10a19cc8696..00000000000 --- a/changelogs/fragments/dnf5-logs-api.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - dnf5 - use the logs API to determine transaction problems diff --git a/changelogs/fragments/dnf5-plugins-compat.yml b/changelogs/fragments/dnf5-plugins-compat.yml new file mode 100644 index 00000000000..5d42b0f99f1 --- /dev/null +++ b/changelogs/fragments/dnf5-plugins-compat.yml @@ -0,0 +1,2 @@ +bugfixes: + - "dnf5 - fix traceback when ``enable_plugins``/``disable_plugins`` is used on ``python3-libdnf5`` versions that do not support this functionality" diff --git a/changelogs/fragments/dnf5-test-env-groups.yml b/changelogs/fragments/dnf5-test-env-groups.yml deleted file mode 100644 index c0f9fcadde3..00000000000 --- a/changelogs/fragments/dnf5-test-env-groups.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - dnf5 - enable environment groups installation testing in CI as its support was added. diff --git a/changelogs/fragments/fbsd13_1_remove.yml b/changelogs/fragments/fbsd13_1_remove.yml deleted file mode 100644 index a334c1f8f80..00000000000 --- a/changelogs/fragments/fbsd13_1_remove.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-test - Removed `freebsd/13.1` remote. diff --git a/changelogs/fragments/fetch_url-remove-auto-disable-decompress.yml b/changelogs/fragments/fetch_url-remove-auto-disable-decompress.yml deleted file mode 100644 index 9588483317d..00000000000 --- a/changelogs/fragments/fetch_url-remove-auto-disable-decompress.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - "``fetch_url`` - remove auto disabling ``decompress`` when gzip is not available" diff --git a/changelogs/fragments/file_simplify.yml b/changelogs/fragments/file_simplify.yml new file mode 100644 index 00000000000..63e48fbdb9a --- /dev/null +++ b/changelogs/fragments/file_simplify.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - file - make code more readable and simple. diff --git a/changelogs/fragments/find-checksum.yml b/changelogs/fragments/find-checksum.yml new file mode 100644 index 00000000000..c713beabd68 --- /dev/null +++ b/changelogs/fragments/find-checksum.yml @@ -0,0 +1,2 @@ +minor_changes: + - find - add a checksum_algorithm parameter to specify which type of checksum the module will return diff --git a/changelogs/fragments/first_found_fixes.yml b/changelogs/fragments/first_found_fixes.yml deleted file mode 100644 index edf2ef3674c..00000000000 --- a/changelogs/fragments/first_found_fixes.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - first found lookup has been updated to use the normalized argument parsing (pythonic) matching the documented examples. - - first found lookup, fixed an issue with subsequent items clobbering information from previous ones. diff --git a/changelogs/fragments/first_found_template_fix.yml b/changelogs/fragments/first_found_template_fix.yml deleted file mode 100644 index 70fe6b58628..00000000000 --- a/changelogs/fragments/first_found_template_fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - first_found lookup now gets 'untemplated' loop entries and handles templating itself as task_executor was removing even 'templatable' entries and breaking functionality. https://github.com/ansible/ansible/issues/70772 diff --git a/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml b/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml new file mode 100644 index 00000000000..aba789bdadd --- /dev/null +++ b/changelogs/fragments/fix-ansible-galaxy-ignore-certs.yml @@ -0,0 +1,2 @@ +bugfixes: + - Fix disabling SSL verification when installing collections and roles from git repositories. If ``--ignore-certs`` isn't provided, the value for the ``GALAXY_IGNORE_CERTS`` configuration option will be used (https://github.com/ansible/ansible/issues/83326). diff --git a/changelogs/fragments/fix-handlers-callback.yml b/changelogs/fragments/fix-handlers-callback.yml deleted file mode 100644 index b590c208755..00000000000 --- a/changelogs/fragments/fix-handlers-callback.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "handlers - fix ``v2_playbook_on_notify`` callback not being called when notifying handlers" diff --git a/changelogs/fragments/fix-ipv6-pattern.yml b/changelogs/fragments/fix-ipv6-pattern.yml new file mode 100644 index 00000000000..48b18150527 --- /dev/null +++ b/changelogs/fragments/fix-ipv6-pattern.yml @@ -0,0 +1,2 @@ +bugfixes: + - Fix ipv6 pattern bug in lib/ansible/parsing/utils/addresses.py (https://github.com/ansible/ansible/issues/84237) \ No newline at end of file diff --git a/changelogs/fragments/fix-module-utils-facts-timeout.yml b/changelogs/fragments/fix-module-utils-facts-timeout.yml new file mode 100644 index 00000000000..3ecc95dfab3 --- /dev/null +++ b/changelogs/fragments/fix-module-utils-facts-timeout.yml @@ -0,0 +1,2 @@ +bugfixes: + - Use the requested error message in the ansible.module_utils.facts.timeout timeout function instead of hardcoding one. diff --git a/changelogs/fragments/fix-pkg-mgr-in-TencentOS.yml b/changelogs/fragments/fix-pkg-mgr-in-TencentOS.yml deleted file mode 100644 index cd4d2656ce3..00000000000 --- a/changelogs/fragments/fix-pkg-mgr-in-TencentOS.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pkg_mgr.py - Fix `ansible_pkg_mgr` incorrect in TencentOS Server Linux \ No newline at end of file diff --git a/changelogs/fragments/fix-setuptools-warnings.yml b/changelogs/fragments/fix-setuptools-warnings.yml deleted file mode 100644 index 7be3f528497..00000000000 --- a/changelogs/fragments/fix-setuptools-warnings.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - Use ``package_data`` instead of ``include_package_data`` for ``setup.cfg`` to avoid ``setuptools`` warnings. diff --git a/changelogs/fragments/fix_errors.yml b/changelogs/fragments/fix_errors.yml new file mode 100644 index 00000000000..995cc28ffda --- /dev/null +++ b/changelogs/fragments/fix_errors.yml @@ -0,0 +1,2 @@ +bugfixes: + - Errors now preserve stacked error messages even when YAML is involved. diff --git a/changelogs/fragments/gather_facts_fix_parallel.yml b/changelogs/fragments/gather_facts_fix_parallel.yml deleted file mode 100644 index e33571c1a20..00000000000 --- a/changelogs/fragments/gather_facts_fix_parallel.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: - - gather_facts parallel option was doing the reverse of what was stated, now it does run modules in parallel when True and serially when False. -minor_changes: - - gather_facts now will use gather_timeout setting to limit parallel execution of modules that do not themselves use gather_timeout. diff --git a/changelogs/fragments/get_action_args_with_defaults-remove-deprecated-arg.yml b/changelogs/fragments/get_action_args_with_defaults-remove-deprecated-arg.yml deleted file mode 100644 index 1dca468b533..00000000000 --- a/changelogs/fragments/get_action_args_with_defaults-remove-deprecated-arg.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - "``get_action_args_with_defaults`` - remove deprecated ``redirected_names`` method parameter" diff --git a/changelogs/fragments/inventory_cache-remove-deprecated-default-section.yml b/changelogs/fragments/inventory_cache-remove-deprecated-default-section.yml deleted file mode 100644 index e4e1cc66a3e..00000000000 --- a/changelogs/fragments/inventory_cache-remove-deprecated-default-section.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - inventory_cache - remove deprecated ``default.fact_caching_prefix`` ini configuration option, use ``defaults.fact_caching_prefix`` instead. diff --git a/changelogs/fragments/libvirt_lxc.yml b/changelogs/fragments/libvirt_lxc.yml new file mode 100644 index 00000000000..7d575756983 --- /dev/null +++ b/changelogs/fragments/libvirt_lxc.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - base.yml - deprecated libvirt_lxc_noseclabel config. diff --git a/changelogs/fragments/long-collection-paths-fix.yml b/changelogs/fragments/long-collection-paths-fix.yml deleted file mode 100644 index 47a8c5c25a8..00000000000 --- a/changelogs/fragments/long-collection-paths-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- ansible-galaxy - Fix issue installing collections containing directories with more than 100 characters on python versions before 3.10.6 diff --git a/changelogs/fragments/man-page-build-docs-dependency.yml b/changelogs/fragments/man-page-build-docs-dependency.yml deleted file mode 100644 index 3433785f3ca..00000000000 --- a/changelogs/fragments/man-page-build-docs-dependency.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - man page build - Remove the dependency on the ``docs`` directory for building man pages. diff --git a/changelogs/fragments/manifest-in-cleanup.yml b/changelogs/fragments/manifest-in-cleanup.yml deleted file mode 100644 index 457b17f8e3f..00000000000 --- a/changelogs/fragments/manifest-in-cleanup.yml +++ /dev/null @@ -1,9 +0,0 @@ -minor_changes: - - Removed ``exclude`` and ``recursive-exclude`` commands for generated files from the ``MANIFEST.in`` file. - These excludes were unnecessary since releases are expected to be built with a clean worktree. - - Removed ``exclude`` commands for sanity test files from the ``MANIFEST.in`` file. - These tests were previously excluded because they did not pass when run from an sdist. - However, sanity tests are not expected to pass from an sdist, so excluding some (but not all) of the failing tests makes little sense. - - Removed redundant ``include`` commands from the ``MANIFEST.in`` file. - These includes either duplicated default behavior or another command. - - Use ``include`` where ``recursive-include`` is unnecessary in the ``MANIFEST.in`` file. diff --git a/changelogs/fragments/mc_from_config.yml b/changelogs/fragments/mc_from_config.yml deleted file mode 100644 index df31596f271..00000000000 --- a/changelogs/fragments/mc_from_config.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - module compression is now sourced directly via config, bypassing play_context possibly stale values. diff --git a/changelogs/fragments/no-arbitrary-j2-override.yml b/changelogs/fragments/no-arbitrary-j2-override.yml deleted file mode 100644 index c2fcf1c565f..00000000000 --- a/changelogs/fragments/no-arbitrary-j2-override.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - templating - prevent setting arbitrary attributes on Jinja2 environments via Jinja2 overrides in templates diff --git a/changelogs/fragments/no-return.yml b/changelogs/fragments/no-return.yml new file mode 100644 index 00000000000..b55db43eb2f --- /dev/null +++ b/changelogs/fragments/no-return.yml @@ -0,0 +1,2 @@ +minor_changes: + - module_utils - Add ``NoReturn`` type annotations to functions which never return. diff --git a/changelogs/fragments/os_family.yml b/changelogs/fragments/os_family.yml new file mode 100644 index 00000000000..7126a00c27b --- /dev/null +++ b/changelogs/fragments/os_family.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - facts - skip if distribution file path is directory, instead of raising error (https://github.com/ansible/ansible/issues/84006). diff --git a/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml b/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml new file mode 100644 index 00000000000..8dd037a4e02 --- /dev/null +++ b/changelogs/fragments/package-dnf-action-plugins-facts-fail-msg.yml @@ -0,0 +1,2 @@ +bugfixes: + - "``package``/``dnf`` action plugins - provide the reason behind the failure to gather the ``ansible_pkg_mgr`` fact to identify the package backend" diff --git a/changelogs/fragments/package_facts_fix.yml b/changelogs/fragments/package_facts_fix.yml new file mode 100644 index 00000000000..f1ffbf4d641 --- /dev/null +++ b/changelogs/fragments/package_facts_fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - package_facts module when using 'auto' will return the first package manager found that provides an output, instead of just the first one, as this can be foreign and not have any packages. diff --git a/changelogs/fragments/parsing-splitter-fixes.yml b/changelogs/fragments/parsing-splitter-fixes.yml deleted file mode 100644 index 724ba7bfba8..00000000000 --- a/changelogs/fragments/parsing-splitter-fixes.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - Fix exceptions caused by various inputs when performing arg splitting or parsing key/value pairs. - Resolves issue https://github.com/ansible/ansible/issues/46379 - and issue https://github.com/ansible/ansible/issues/61497 - - Fix incorrect parsing of multi-line Jinja2 blocks when performing arg splitting or parsing key/value pairs. diff --git a/changelogs/fragments/passlib.yml b/changelogs/fragments/passlib.yml new file mode 100644 index 00000000000..b6bf883ae6f --- /dev/null +++ b/changelogs/fragments/passlib.yml @@ -0,0 +1,3 @@ +--- +removed_features: + - encrypt - passing unsupported passlib hashtype now raises AnsibleFilterError. diff --git a/changelogs/fragments/pep517-backend-import-fix.yml b/changelogs/fragments/pep517-backend-import-fix.yml deleted file mode 100644 index e7e2b1d4823..00000000000 --- a/changelogs/fragments/pep517-backend-import-fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - pep517 build backend - Use the documented ``import_module`` import from ``importlib``. diff --git a/changelogs/fragments/pep517-backend-traceback-fix.yml b/changelogs/fragments/pep517-backend-traceback-fix.yml deleted file mode 100644 index cf779f9b465..00000000000 --- a/changelogs/fragments/pep517-backend-traceback-fix.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: - - pep517 build backend - Copy symlinks when copying the source tree. - This avoids tracebacks in various scenarios, such as when a venv is present in the source tree. diff --git a/changelogs/fragments/persist_skip.yml b/changelogs/fragments/persist_skip.yml deleted file mode 100644 index 13164708a05..00000000000 --- a/changelogs/fragments/persist_skip.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - executor now skips persistent connection when it detects an action that does not require a connection. diff --git a/changelogs/fragments/pkg_mgr-default-dnf.yml b/changelogs/fragments/pkg_mgr-default-dnf.yml deleted file mode 100644 index a6269485b7d..00000000000 --- a/changelogs/fragments/pkg_mgr-default-dnf.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - "``pkg_mgr`` - fix the default dnf version detection" diff --git a/changelogs/fragments/pre-release-hint-for-dep-resolution-error.yml b/changelogs/fragments/pre-release-hint-for-dep-resolution-error.yml deleted file mode 100644 index 5a9f2a29e61..00000000000 --- a/changelogs/fragments/pre-release-hint-for-dep-resolution-error.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - ansible-galaxy - add note to the collection dependency resolver error message about pre-releases if ``--pre`` was not provided (https://github.com/ansible/ansible/issues/80048). diff --git a/changelogs/fragments/pylint-deprecated-comment-checker.yml b/changelogs/fragments/pylint-deprecated-comment-checker.yml deleted file mode 100644 index bd315b7191b..00000000000 --- a/changelogs/fragments/pylint-deprecated-comment-checker.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- ansible-test - Add new pylint checker for new ``# deprecated:`` comments within code to trigger errors when time to remove code - that has no user facing deprecation message diff --git a/changelogs/fragments/remove-deprecated-actionbase-_remote_checksum.yml b/changelogs/fragments/remove-deprecated-actionbase-_remote_checksum.yml deleted file mode 100644 index 7d38a216b98..00000000000 --- a/changelogs/fragments/remove-deprecated-actionbase-_remote_checksum.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - ActionBase - remove deprecated ``_remote_checksum`` method diff --git a/changelogs/fragments/remove-deprecated-filelock-class.yml b/changelogs/fragments/remove-deprecated-filelock-class.yml deleted file mode 100644 index fba516040ed..00000000000 --- a/changelogs/fragments/remove-deprecated-filelock-class.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - Remove deprecated ``FileLock`` class diff --git a/changelogs/fragments/remove-include.yml b/changelogs/fragments/remove-include.yml deleted file mode 100644 index 9caddd82976..00000000000 --- a/changelogs/fragments/remove-include.yml +++ /dev/null @@ -1,3 +0,0 @@ -removed_features: -- >- - Removed ``include`` which has been deprecated in Ansible 2.12. Use ``include_tasks`` or ``import_tasks`` instead. diff --git a/changelogs/fragments/remove-play_iterator-deprecated-methods.yml b/changelogs/fragments/remove-play_iterator-deprecated-methods.yml deleted file mode 100644 index 792a6b482fa..00000000000 --- a/changelogs/fragments/remove-play_iterator-deprecated-methods.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - PlayIterator - remove deprecated ``cache_block_tasks`` and ``get_original_task`` methods diff --git a/changelogs/fragments/remove-python3.5.yml b/changelogs/fragments/remove-python3.5.yml deleted file mode 100644 index 3367f4c1c1d..00000000000 --- a/changelogs/fragments/remove-python3.5.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - module_utils/basic.py - Removed Python 3.5 as a supported remote version. Python 2.7 or Python 3.6+ is now required. diff --git a/changelogs/fragments/remove-python3.9-controller-support.yml b/changelogs/fragments/remove-python3.9-controller-support.yml deleted file mode 100644 index 632aa471e82..00000000000 --- a/changelogs/fragments/remove-python3.9-controller-support.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - Removed Python 3.9 as a supported version on the controller. Python 3.10 or newer is required. diff --git a/changelogs/fragments/remove-templar-shared_loader_obj-arg.yml b/changelogs/fragments/remove-templar-shared_loader_obj-arg.yml deleted file mode 100644 index f8a2a030d15..00000000000 --- a/changelogs/fragments/remove-templar-shared_loader_obj-arg.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - "``Templar`` - remove deprecated ``shared_loader_obj`` parameter of ``__init__``" diff --git a/changelogs/fragments/remove-unreachable-include_role-static-err.yml b/changelogs/fragments/remove-unreachable-include_role-static-err.yml deleted file mode 100644 index 2c1749de317..00000000000 --- a/changelogs/fragments/remove-unreachable-include_role-static-err.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - Remove unreachable parser error for removed ``static`` parameter of ``include_role`` diff --git a/changelogs/fragments/remove_ini_ignored_dir.yml b/changelogs/fragments/remove_ini_ignored_dir.yml new file mode 100644 index 00000000000..10a5a8e61ce --- /dev/null +++ b/changelogs/fragments/remove_ini_ignored_dir.yml @@ -0,0 +1,2 @@ +minor_changes: + - INVENTORY_IGNORE_EXTS config, removed ``ini`` from the default list, inventory scripts using a corresponding .ini configuration are rare now and inventory.ini files are more common. Those that need to ignore the ini files for inventory scripts can still add it to configuration. diff --git a/changelogs/fragments/remove_md5.yml b/changelogs/fragments/remove_md5.yml deleted file mode 100644 index e007ad268af..00000000000 --- a/changelogs/fragments/remove_md5.yml +++ /dev/null @@ -1,2 +0,0 @@ -removed_features: - - stat - removed unused `get_md5` parameter. diff --git a/changelogs/fragments/run-command-selectors-prompt-only.yml b/changelogs/fragments/run-command-selectors-prompt-only.yml deleted file mode 100644 index c0855bccea6..00000000000 --- a/changelogs/fragments/run-command-selectors-prompt-only.yml +++ /dev/null @@ -1,4 +0,0 @@ -bugfixes: -- AnsibleModule.run_command - Only use selectors when needed, and rely on Python - stdlib subprocess for the simple task of collecting stdout/stderr when prompt - matching is not required. diff --git a/changelogs/fragments/selector_removal.yml b/changelogs/fragments/selector_removal.yml new file mode 100644 index 00000000000..681686f72e4 --- /dev/null +++ b/changelogs/fragments/selector_removal.yml @@ -0,0 +1,3 @@ +--- +removed_features: + - selector - remove deprecated compat.selector related files (https://github.com/ansible/ansible/pull/84155). diff --git a/changelogs/fragments/server2012-deprecation.yml b/changelogs/fragments/server2012-deprecation.yml deleted file mode 100644 index 5370b13f19b..00000000000 --- a/changelogs/fragments/server2012-deprecation.yml +++ /dev/null @@ -1,8 +0,0 @@ -deprecated_features: -- >- - Support for Windows Server 2012 and 2012 R2 has been removed as the support end of life from Microsoft is October - 10th 2023. These versions of Windows will no longer be tested in this Ansible release and it cannot be guaranteed - that they will continue to work going forward. - -removed_features: -- ansible-test - Removed support for the remote Windows targets 2012 and 2012-R2 diff --git a/changelogs/fragments/service_facts_fbsd.yml b/changelogs/fragments/service_facts_fbsd.yml new file mode 100644 index 00000000000..6f06ab79f23 --- /dev/null +++ b/changelogs/fragments/service_facts_fbsd.yml @@ -0,0 +1,2 @@ +minor_changes: + - service_facts module got freebsd support added. diff --git a/changelogs/fragments/service_facts_rcctl.yml b/changelogs/fragments/service_facts_rcctl.yml deleted file mode 100644 index 9b6dbed429f..00000000000 --- a/changelogs/fragments/service_facts_rcctl.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - service_facts now returns more info for rcctl managed systesm (OpenBSD). diff --git a/changelogs/fragments/service_facts_simpleinit_msb.yml b/changelogs/fragments/service_facts_simpleinit_msb.yml deleted file mode 100644 index 2b8047e2df5..00000000000 --- a/changelogs/fragments/service_facts_simpleinit_msb.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - Update ``ansible_service_mgr`` fact to include init system for SMGL OS family diff --git a/changelogs/fragments/service_fix_obsd.yml b/changelogs/fragments/service_fix_obsd.yml deleted file mode 100644 index ad4b890af72..00000000000 --- a/changelogs/fragments/service_fix_obsd.yml +++ /dev/null @@ -1,7 +0,0 @@ -bugfixes: - - service module, does not permanently configure flags flags on Openbsd when enabling/disabling a service. - - service module, enable/disable is not a exclusive action in checkmode anymore. -breaking_changes: - - service module will not permanently configure variables/flags for openbsd when doing enable/disable operation anymore, - this module was never meant to do this type of work, just to manage the service state itself. A rcctl_config or similar - module should be created and used instead. diff --git a/changelogs/fragments/setup_facter_fix.yml b/changelogs/fragments/setup_facter_fix.yml deleted file mode 100644 index 78a6b005a4a..00000000000 --- a/changelogs/fragments/setup_facter_fix.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - setup module (fact gathering) will now try to be smarter about different versions of facter emitting error when --puppet flag is used w/o puppet. diff --git a/changelogs/fragments/simple-result-queue.yml b/changelogs/fragments/simple-result-queue.yml deleted file mode 100644 index 300e1495cb3..00000000000 --- a/changelogs/fragments/simple-result-queue.yml +++ /dev/null @@ -1,3 +0,0 @@ -bugfixes: -- Switch result queue from a ``multiprocessing.queues.Queue` to ``multiprocessing.queues.SimpleQueue``, primarily to allow properly handling - pickling errors, to prevent an infinite hang waiting for task results diff --git a/changelogs/fragments/skip-handlers-tagged-play.yml b/changelogs/fragments/skip-handlers-tagged-play.yml new file mode 100644 index 00000000000..755308eafbe --- /dev/null +++ b/changelogs/fragments/skip-handlers-tagged-play.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Do not run implicit ``flush_handlers`` meta tasks when the whole play is excluded from the run due to tags specified." diff --git a/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml b/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml new file mode 100644 index 00000000000..a4c913791d2 --- /dev/null +++ b/changelogs/fragments/skip-implicit-flush_handlers-no-notify.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Improve performance on large inventories by reducing the number of implicit meta tasks." diff --git a/changelogs/fragments/skip-role-task-iterator.yml b/changelogs/fragments/skip-role-task-iterator.yml new file mode 100644 index 00000000000..1cf6b4cbb84 --- /dev/null +++ b/changelogs/fragments/skip-role-task-iterator.yml @@ -0,0 +1,2 @@ +minor_changes: + - PlayIterator - do not return tasks from already executed roles so specific strategy plugins do not have to do the filtering of such tasks themselves diff --git a/changelogs/fragments/smart_connection_bye.yml b/changelogs/fragments/smart_connection_bye.yml deleted file mode 100644 index b9339960aff..00000000000 --- a/changelogs/fragments/smart_connection_bye.yml +++ /dev/null @@ -1,4 +0,0 @@ -minor_changes: - - DEFAULT_TRANSPORT now defaults to 'ssh', the old 'smart' option is being deprecated as versions of OpenSSH without control persist are basically not present anymore. -deprecated_features: - - the 'smart' option for setting a connection plugin is being removed as it's main purpose (choosing between ssh and paramiko) is now irrelevant. diff --git a/changelogs/fragments/string_conversion.yml b/changelogs/fragments/string_conversion.yml new file mode 100644 index 00000000000..58032896171 --- /dev/null +++ b/changelogs/fragments/string_conversion.yml @@ -0,0 +1,3 @@ +--- +removed_features: + - Removed deprecated STRING_CONVERSION_ACTION (https://github.com/ansible/ansible/issues/84220). diff --git a/changelogs/fragments/templar-globals-dict.yml b/changelogs/fragments/templar-globals-dict.yml deleted file mode 100644 index 4c7f0fa486c..00000000000 --- a/changelogs/fragments/templar-globals-dict.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - "``Templar`` - do not add the ``dict`` constructor to ``globals`` as all required Jinja2 versions already do so" diff --git a/changelogs/fragments/templating_fixes.yml b/changelogs/fragments/templating_fixes.yml deleted file mode 100644 index caab02999c5..00000000000 --- a/changelogs/fragments/templating_fixes.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - templating escape and single var optimization now use correct delimiters when custom ones are provided either via task or template header. diff --git a/changelogs/fragments/text-converters.yml b/changelogs/fragments/text-converters.yml deleted file mode 100644 index 8dafc2fedec..00000000000 --- a/changelogs/fragments/text-converters.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - Use ``ansible.module_utils.common.text.converters`` instead of ``ansible.module_utils._text``. diff --git a/changelogs/fragments/timeout_config_fix.yml b/changelogs/fragments/timeout_config_fix.yml deleted file mode 100644 index 5cebe81bf69..00000000000 --- a/changelogs/fragments/timeout_config_fix.yml +++ /dev/null @@ -1,5 +0,0 @@ -bugfixes: - - connection timeouts defined in ansible.cfg will now be properly used, the --timeout cli option was obscuring them by always being set. -breaking_changes: - - Any plugin using the config system and the `cli` entry to use the `timeout` from the command line, will see the value change if the use had configured it in any of the lower precedence methods. - If relying on this behaviour to consume the global/generic timeout from the DEFAULT_TIMEOUT constant, please consult the documentation on plugin configuration to add the overlaping entries. diff --git a/changelogs/fragments/update-maybe-json-uri.yml b/changelogs/fragments/update-maybe-json-uri.yml deleted file mode 100644 index 7cf693d2ce2..00000000000 --- a/changelogs/fragments/update-maybe-json-uri.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- uri - fix search for JSON type to include complex strings containing '+' diff --git a/changelogs/fragments/update-resolvelib-lt-2_0_0.yml b/changelogs/fragments/update-resolvelib-lt-2_0_0.yml new file mode 100644 index 00000000000..10c4f1a0838 --- /dev/null +++ b/changelogs/fragments/update-resolvelib-lt-2_0_0.yml @@ -0,0 +1,2 @@ +minor_changes: + - ansible-galaxy - support ``resolvelib >= 0.5.3, < 2.0.0`` (https://github.com/ansible/ansible/issues/84217). diff --git a/changelogs/fragments/urls-client-cert-py12.yml b/changelogs/fragments/urls-client-cert-py12.yml deleted file mode 100644 index aab129ed96e..00000000000 --- a/changelogs/fragments/urls-client-cert-py12.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- urls.py - fixed cert_file and key_file parameters when running on Python 3.12 - https://github.com/ansible/ansible/issues/80490 diff --git a/changelogs/fragments/urls-unit-test-latest-cryptography.yml b/changelogs/fragments/urls-unit-test-latest-cryptography.yml deleted file mode 100644 index a3a195f33db..00000000000 --- a/changelogs/fragments/urls-unit-test-latest-cryptography.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - Update module_utils.urls unit test to work with cryptography >= 41.0.0. diff --git a/changelogs/fragments/user-add-password-exp-warning.yml b/changelogs/fragments/user-add-password-exp-warning.yml deleted file mode 100644 index 77acc59e81d..00000000000 --- a/changelogs/fragments/user-add-password-exp-warning.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: - - user - add new option ``password_expire_warn`` (supported on Linux only) to set the number of days of warning before a password change is required (https://github.com/ansible/ansible/issues/79882). diff --git a/changelogs/fragments/user_action_fix.yml b/changelogs/fragments/user_action_fix.yml new file mode 100644 index 00000000000..64ee997d688 --- /dev/null +++ b/changelogs/fragments/user_action_fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - user module now avoids changing ownership of files symlinked in provided home dir skeleton diff --git a/changelogs/fragments/user_ssh_fix.yml b/changelogs/fragments/user_ssh_fix.yml new file mode 100644 index 00000000000..b2c47d60e3a --- /dev/null +++ b/changelogs/fragments/user_ssh_fix.yml @@ -0,0 +1,4 @@ +bugfixes: + - user action will now require O(force) to overwrite the public part of an ssh key when generating ssh keys, as was already the case for the private part. +security_fixes: + - user action won't allow ssh-keygen, chown and chmod to run on existing ssh public key file, avoiding traversal on existing symlinks (CVE-2024-9902). diff --git a/changelogs/fragments/v2.16.0-initial-commit.yaml b/changelogs/fragments/v2.19.0-initial-commit.yaml similarity index 100% rename from changelogs/fragments/v2.16.0-initial-commit.yaml rename to changelogs/fragments/v2.19.0-initial-commit.yaml diff --git a/changelogs/fragments/yum-repository-docs-fixes.yml b/changelogs/fragments/yum-repository-docs-fixes.yml deleted file mode 100644 index 2982ffc4ddd..00000000000 --- a/changelogs/fragments/yum-repository-docs-fixes.yml +++ /dev/null @@ -1,2 +0,0 @@ -minor_changes: -- yum_repository - Align module documentation with parameters diff --git a/hacking/README.md b/hacking/README.md index 51f17202ed5..534a7e4db0e 100644 --- a/hacking/README.md +++ b/hacking/README.md @@ -5,7 +5,7 @@ env-setup --------- The 'env-setup' script modifies your environment to allow you to run -ansible from a git checkout using python >= 3.10. +ansible from a git checkout using python >= 3.11. First, set up your environment to run from the checkout: @@ -18,7 +18,7 @@ and do not wish to install them from your operating system package manager, you can install them from pip ```shell -easy_install pip # if pip is not already available +python -Im ensurepip # if pip is not already available pip install -r requirements.txt ``` diff --git a/hacking/ansible-profile b/hacking/ansible-profile.py similarity index 69% rename from hacking/ansible-profile rename to hacking/ansible-profile.py index 9856e7ab5c9..7016ebe098c 100755 --- a/hacking/ansible-profile +++ b/hacking/ansible-profile.py @@ -1,24 +1,22 @@ #!/usr/bin/env python -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import cProfile import sys import traceback -import ansible.constants as C from ansible.module_utils.common.text.converters import to_text target = sys.argv.pop(1) myclass = "%sCLI" % target.capitalize() +module_name = f'ansible.cli.{target}' try: # define cli - mycli = getattr(__import__("ansible.cli.%s" % target, fromlist=[myclass]), myclass) + mycli = getattr(__import__(module_name, fromlist=[myclass]), myclass) except ImportError as e: - msg = getattr(e, 'msg', getattr(e, message, '')) - if msg.endswith(' %s' % target): - raise Exception("Ansible sub-program not implemented: %s" % target) + if module_name in e.msg: + raise Exception("Ansible sub-program not implemented: %s" % target) from None else: raise diff --git a/hacking/azp/README.md b/hacking/azp/README.md index 6e833fefafa..fbb531844f5 100644 --- a/hacking/azp/README.md +++ b/hacking/azp/README.md @@ -15,7 +15,7 @@ This directory contains the following scripts: Incidental testing and code coverage occurs when a test covers one or more portions of code as an unintentional side-effect of testing another portion of code. -For example, the ``yum`` integration test intentionally tests the ``yum`` Ansible module. +For example, the ``dnf`` integration test intentionally tests the ``dnf`` Ansible module. However, in doing so it also uses, and unintentionally tests the ``file`` module as well. As part of the process of migrating modules and plugins into collections, integration tests were identified that provided exclusive incidental code coverage. diff --git a/hacking/azp/download.py b/hacking/azp/download.py index e0de99a27aa..47ebf39b11d 100755 --- a/hacking/azp/download.py +++ b/hacking/azp/download.py @@ -19,8 +19,7 @@ # along with Ansible. If not, see . """CLI tool for downloading results from Azure Pipelines CI runs.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # noinspection PyCompatibility import argparse diff --git a/hacking/azp/get_recent_coverage_runs.py b/hacking/azp/get_recent_coverage_runs.py index 1be867da1e4..b479179e711 100755 --- a/hacking/azp/get_recent_coverage_runs.py +++ b/hacking/azp/get_recent_coverage_runs.py @@ -17,8 +17,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.utils.color import stringc import requests diff --git a/hacking/azp/incidental.py b/hacking/azp/incidental.py index 87d4d213c94..5fc83e3d1ad 100755 --- a/hacking/azp/incidental.py +++ b/hacking/azp/incidental.py @@ -18,8 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """CLI tool for reporting on incidental test coverage.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # noinspection PyCompatibility import argparse diff --git a/hacking/azp/run.py b/hacking/azp/run.py index 00a177944f8..c5e248beb3b 100755 --- a/hacking/azp/run.py +++ b/hacking/azp/run.py @@ -20,8 +20,7 @@ """CLI tool for starting new CI runs.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # noinspection PyCompatibility import argparse diff --git a/hacking/backport/README.md b/hacking/backport/README.md index 3fb212b33e0..ce7112b22f9 100644 --- a/hacking/backport/README.md +++ b/hacking/backport/README.md @@ -4,7 +4,7 @@ This directory contains scripts useful for dealing with and maintaining backports. Scripts in it depend on pygithub, and expect a valid environment variable called `GITHUB_TOKEN`. -To generate a Github token, go to https://github.com/settings/tokens/new +To generate a Github token, go to ## `backport_of_line_adder.py` diff --git a/hacking/backport/backport_of_line_adder.py b/hacking/backport/backport_of_line_adder.py index ef77ddcf400..70d03efd2c6 100755 --- a/hacking/backport/backport_of_line_adder.py +++ b/hacking/backport/backport_of_line_adder.py @@ -16,9 +16,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from github.PullRequest import PullRequest from github import Github @@ -34,14 +32,14 @@ TICKET_NUMBER = re.compile(r'(?:^|\s)#(\d+)') def normalize_pr_url(pr, allow_non_ansible_ansible=False, only_number=False): - ''' + """ Given a PullRequest, or a string containing a PR number, PR URL, or internal PR URL (e.g. ansible-collections/community.general#1234), return either a full github URL to the PR (if only_number is False), or an int containing the PR number (if only_number is True). Throws if it can't parse the input. - ''' + """ if isinstance(pr, PullRequest): return pr.html_url @@ -73,10 +71,10 @@ def normalize_pr_url(pr, allow_non_ansible_ansible=False, only_number=False): def url_to_org_repo(url): - ''' + """ Given a full Github PR URL, extract the user/org and repo name. Return them in the form: "user/repo" - ''' + """ match = PULL_HTTP_URL_RE.match(url) if not match: return '' @@ -84,7 +82,7 @@ def url_to_org_repo(url): def generate_new_body(pr, source_pr): - ''' + """ Given the new PR (the backport) and the originating (source) PR, construct the new body for the backport PR. @@ -95,7 +93,7 @@ def generate_new_body(pr, source_pr): This function does not side-effect, it simply returns the new body as a string. - ''' + """ backport_text = '\nBackport of {0}\n'.format(source_pr) body_lines = pr.body.split('\n') new_body_lines = [] @@ -117,10 +115,10 @@ def generate_new_body(pr, source_pr): def get_prs_for_commit(g, commit): - ''' + """ Given a commit hash, attempt to find the hash in any repo in the ansible orgs, and then use it to determine what, if any, PR it appeared in. - ''' + """ commits = g.search_commits( 'hash:{0} org:ansible org:ansible-collections is:public'.format(commit) @@ -134,7 +132,7 @@ def get_prs_for_commit(g, commit): def search_backport(pr, g, ansible_ansible): - ''' + """ Do magic. This is basically the "brain" of 'auto'. It will search the PR (the newest PR - the backport) and try to find where it originated. @@ -150,7 +148,7 @@ def search_backport(pr, g, ansible_ansible): It will take all of the above, and return a list of "possibilities", which is a list of PullRequest objects. - ''' + """ possibilities = [] @@ -200,20 +198,20 @@ def search_backport(pr, g, ansible_ansible): def prompt_add(): - ''' + """ Prompt the user and return whether or not they agree. - ''' + """ res = input('Shall I add the reference? [Y/n]: ') return res.lower() in ('', 'y', 'yes') def commit_edit(new_pr, pr): - ''' + """ Given the new PR (the backport), and the "possibility" that we have decided on, prompt the user and then add the reference to the body of the new PR. This method does the actual "destructive" work of editing the PR body. - ''' + """ print('I think this PR might have come from:') print(pr.title) print('-' * 50) diff --git a/hacking/create-bulk-issues.py b/hacking/create-bulk-issues.py index d2651415df1..09c79590e22 100755 --- a/hacking/create-bulk-issues.py +++ b/hacking/create-bulk-issues.py @@ -35,6 +35,7 @@ class Issue: body: str project: str labels: list[str] | None = None + assignee: str | None = None def create(self) -> str: cmd = ['gh', 'issue', 'create', '--title', self.title, '--body', self.body, '--project', self.project] @@ -43,8 +44,18 @@ class Issue: for label in self.labels: cmd.extend(('--label', label)) - process = subprocess.run(cmd, capture_output=True, check=True) - url = process.stdout.decode().strip() + if self.assignee: + cmd.extend(('--assignee', self.assignee)) + + try: + process = subprocess.run(cmd, capture_output=True, check=True, text=True) + except subprocess.CalledProcessError as ex: + print('>>> Note') + print(f"You may need to run 'gh auth refresh -s project' if 'gh' reports it cannot find the project {self.project!r} when it exists.") + print(f'>>> Standard Output\n{ex.stdout.strip()}\n>>> Standard Error\n{ex.stderr.strip()}\n>>> Exception') + raise + + url = process.stdout.strip() return url @@ -54,6 +65,7 @@ class Feature: summary: str component: str labels: list[str] | None = None + assignee: str | None = None @staticmethod def from_dict(data: dict[str, t.Any]) -> Feature: @@ -61,6 +73,7 @@ class Feature: summary = data.get('summary') component = data.get('component') labels = data.get('labels') + assignee = data.get('assignee') if not isinstance(title, str): raise RuntimeError(f'`title` is not `str`: {title}') @@ -71,6 +84,9 @@ class Feature: if not isinstance(component, str): raise RuntimeError(f'`component` is not `str`: {component}') + if not isinstance(assignee, (str, type(None))): + raise RuntimeError(f'`assignee` is not `str`: {assignee}') + if not isinstance(labels, list) or not all(isinstance(item, str) for item in labels): raise RuntimeError(f'`labels` is not `list[str]`: {labels}') @@ -79,6 +95,7 @@ class Feature: summary=summary, component=component, labels=labels, + assignee=assignee, ) def create_issue(self, project: str) -> Issue: @@ -102,6 +119,7 @@ Feature Idea body=body.strip(), project=project, labels=self.labels, + assignee=self.assignee, ) @@ -297,7 +315,21 @@ def create_deprecation_parser(subparser) -> None: def create_feature_parser(subparser) -> None: - parser: argparse.ArgumentParser = subparser.add_parser('feature') + epilog = """ +Example source YAML: + +default: + component: ansible-test + labels: + - ansible-test + - feature + assignee: "@me" +features: + - title: Some title goes here + summary: A summary goes here. +""" + + parser: argparse.ArgumentParser = subparser.add_parser('feature', epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) parser.set_defaults(type=FeatureArgs) parser.set_defaults(command=feature_command) diff --git a/hacking/env-setup b/hacking/env-setup index 0a86e0fe4fb..df1ea4020f2 100644 --- a/hacking/env-setup +++ b/hacking/env-setup @@ -57,22 +57,6 @@ expr "$PYTHONPATH" : "${ANSIBLE_TEST_PREFIX_PYTHONPATH}.*" > /dev/null || prepen expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || prepend_path PATH "$PREFIX_PATH" expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || prepend_path MANPATH "$PREFIX_MANPATH" -# -# Generate egg_info so that pkg_resources works -# - -# Do the work in a function so we don't repeat ourselves later -gen_egg_info() -{ - # check for current and past egg-info directory names - if ls "$PREFIX_PYTHONPATH"/ansible*.egg-info >/dev/null 2>&1; then - # bypass shell aliases with leading backslash - # see https://github.com/ansible/ansible/pull/11967 - \rm -rf "$PREFIX_PYTHONPATH"/ansible*.egg-info - fi - "$PYTHON_BIN" setup.py egg_info -} - if [ "$ANSIBLE_DEV_HOME" != "$PWD" ] ; then current_dir="$PWD" else @@ -81,10 +65,8 @@ fi ( cd "$ANSIBLE_DEV_HOME" if [ "$verbosity" = silent ] ; then - gen_egg_info > /dev/null 2>&1 & find . -type f -name "*.pyc" -exec rm -f {} \; > /dev/null 2>&1 else - gen_egg_info find . -type f -name "*.pyc" -exec rm -f {} \; fi cd "$current_dir" diff --git a/hacking/env-setup.fish b/hacking/env-setup.fish index ebc9afcc5dd..fcb739bf0cd 100644 --- a/hacking/env-setup.fish +++ b/hacking/env-setup.fish @@ -3,9 +3,23 @@ # Description: Modifies the environment for running Ansible from a checkout # Usage: . ./hacking/env-setup [-q] +# Set PYTHON_BIN +if not set -q PYTHON_BIN + for exe in python3 python + if command -v $exe > /dev/null + set -gx PYTHON_BIN (command -v $exe) + break + end + end + if not set -q PYTHON_BIN + echo "No valid Python found" + exit 1 + end +end + # Retrieve the path of the current directory where the script resides set HACKING_DIR (dirname (status -f)) -set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))") +set FULL_PATH ($PYTHON_BIN -c "import os; print(os.path.realpath('$HACKING_DIR'))") set ANSIBLE_HOME (dirname $FULL_PATH) # Set quiet flag @@ -23,16 +37,16 @@ set -gx PREFIX_MANPATH $ANSIBLE_HOME/docs/man # Set PYTHONPATH if not set -q PYTHONPATH set -gx PYTHONPATH $PREFIX_PYTHONPATH -else if not string match -qr "$PREFIX_PYTHONPATH($|:)" $PYTHONPATH - if not $QUIET +else if not string match -qr $PREFIX_PYTHONPATH'($|:)' $PYTHONPATH + if not test -n "$QUIET" echo "Appending PYTHONPATH" end set -gx PYTHONPATH "$PREFIX_PYTHONPATH:$PYTHONPATH" end # Set ansible_test PYTHONPATH -if not string match -qr "$ANSIBLE_TEST_PREFIX_PYTHONPATH($|:)" $PYTHONPATH - if not $QUIET +if not string match -qr $ANSIBLE_TEST_PREFIX_PYTHONPATH'($|:)' $PYTHONPATH + if not test -n "$QUIET" echo "Appending PYTHONPATH" end set -gx PYTHONPATH "$ANSIBLE_TEST_PREFIX_PYTHONPATH:$PYTHONPATH" @@ -46,43 +60,15 @@ end # Set MANPATH if not set -q MANPATH set -gx MANPATH $PREFIX_MANPATH -else if not string match -qr "$PREFIX_MANPATH($|:)" $MANPATH +else if not string match -qr $PREFIX_MANPATH'($|:)' $MANPATH set -gx MANPATH "$PREFIX_MANPATH:$MANPATH" end -# Set PYTHON_BIN -if not set -q PYTHON_BIN - for exe in python3 python - if command -v $exe > /dev/null - set -gx PYTHON_BIN (command -v $exe) - break - end - end - if not set -q PYTHON_BIN - echo "No valid Python found" - exit 1 - end -end - -# Generate egg_info so that pkg_resources works -function gen_egg_info - # Check if ansible*.egg-info directory exists and remove if found - if test -d $PREFIX_PYTHONPATH/ansible*.egg-info - rm -rf $PREFIX_PYTHONPATH/ansible*.egg-info - end - # Execute setup.py egg_info using the chosen Python interpreter - (eval $PYTHON_BIN setup.py egg_info) -end - pushd $ANSIBLE_HOME -if $QUIET - # Run gen_egg_info in the background and redirect output to /dev/null - gen_egg_info &> /dev/null +if test -n "$QUIET" # Remove any .pyc files found find . -type f -name "*.pyc" -exec rm -f '{}' ';' &> /dev/null else - # Run gen_egg_info - gen_egg_info # Remove any .pyc files found find . -type f -name "*.pyc" -exec rm -f '{}' ';' # Display setup details diff --git a/hacking/report.py b/hacking/report.py index 58b3a6b915a..f968c41aa11 100755 --- a/hacking/report.py +++ b/hacking/report.py @@ -2,8 +2,7 @@ # PYTHON_ARGCOMPLETE_OK """A tool to aggregate data about Ansible source and testing into a sqlite DB for reporting.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import argparse import json diff --git a/hacking/return_skeleton_generator.py b/hacking/return_skeleton_generator.py index 7002b7899d5..875b5f06501 100755 --- a/hacking/return_skeleton_generator.py +++ b/hacking/return_skeleton_generator.py @@ -26,8 +26,7 @@ # You will likely want to adjust this to remove sensitive data or # ensure the `returns` value is correct, and to write a useful description -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from collections import OrderedDict import json diff --git a/hacking/test-module b/hacking/test-module deleted file mode 120000 index 1deb52b4677..00000000000 --- a/hacking/test-module +++ /dev/null @@ -1 +0,0 @@ -test-module.py \ No newline at end of file diff --git a/hacking/test-module.py b/hacking/test-module.py index 7a329b4b2df..a9df1a79b8f 100755 --- a/hacking/test-module.py +++ b/hacking/test-module.py @@ -28,8 +28,7 @@ # ./hacking/test-module.py -m lib/ansible/modules/lineinfile.py -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check # ./hacking/test-module.py -m lib/ansible/modules/command.py -a "echo hello" -n -o "test_hello" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import glob import optparse @@ -39,6 +38,8 @@ import sys import traceback import shutil +from pathlib import Path + from ansible.release import __version__ import ansible.utils.vars as utils_vars from ansible.parsing.dataloader import DataLoader @@ -90,13 +91,11 @@ def parse(): def write_argsfile(argstring, json=False): """ Write args to a file for old-style module's use. """ - argspath = os.path.expanduser("~/.ansible_test_module_arguments") - argsfile = open(argspath, 'w') + argspath = Path("~/.ansible_test_module_arguments").expanduser() if json: args = parse_kv(argstring) argstring = jsonify(args) - argsfile.write(argstring) - argsfile.close() + argspath.write_text(argstring) return argspath @@ -170,9 +169,8 @@ def boilerplate_module(modfile, args, interpreters, check, destfile): print("* including generated source, if any, saving to: %s" % modfile2_path) if module_style not in ('ansiballz', 'old'): print("* this may offset any line numbers in tracebacks/debuggers!") - modfile2 = open(modfile2_path, 'wb') - modfile2.write(module_data) - modfile2.close() + with open(modfile2_path, 'wb') as modfile2: + modfile2.write(module_data) modfile = modfile2_path return (modfile2_path, modname, module_style) diff --git a/hacking/tests/gen_distribution_version_testcase.py b/hacking/tests/gen_distribution_version_testcase.py index e75c78ad919..57903180c11 100755 --- a/hacking/tests/gen_distribution_version_testcase.py +++ b/hacking/tests/gen_distribution_version_testcase.py @@ -9,8 +9,7 @@ and the current ansible_facts regarding the distribution version. This assumes a working ansible version in the path. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import os.path diff --git a/hacking/ticket_stubs/bug_internal_api.md b/hacking/ticket_stubs/bug_internal_api.md index 76a3bb085ca..89162558ca0 100644 --- a/hacking/ticket_stubs/bug_internal_api.md +++ b/hacking/ticket_stubs/bug_internal_api.md @@ -13,11 +13,11 @@ but this does not seem to match that case. If you really need a stable API target to use Ansible, consider using ansible-runner: -* https://github.com/ansible/ansible-runner +* Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Thank you once again for this and your interest in Ansible! diff --git a/hacking/ticket_stubs/bug_wrong_repo.md b/hacking/ticket_stubs/bug_wrong_repo.md index b711e85f3ae..ed115232a20 100644 --- a/hacking/ticket_stubs/bug_wrong_repo.md +++ b/hacking/ticket_stubs/bug_wrong_repo.md @@ -8,29 +8,28 @@ This appears to be something that should be filed against another project or bug << CHOOSE AS APPROPRIATE >> -* https://github.com/ansible-community/ansible-lint -* https://github.com/ansible/ansible-runner -* https://github.com/ansible/ansible-navigator -* https://github.com/ansible-community/antsibull -* https://github.com/ansible-community/ara -* https://github.com/ansible/awx -* https://github.com/ansible-collections/community.general -* https://github.com/ansible-community/molecule -* For AAP or Tower licensees report issues via your Red Hat representative or https://issues.redhat.com +* +* +* +* +* +* +* +* +* For AAP Customer issues please see If you can stop by the tracker or forum for one of those projects, we'd appreciate it. Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. Should you still wish to discuss things further, or if you disagree with our thought process, please stop by one of our two mailing lists: -* https://groups.google.com/forum/#!forum/ansible-devel +* [ansible-core on the Ansible Forum](https://forum.ansible.com/tag/ansible-core) * Matrix: [#devel:ansible.im](https://matrix.to/#/#devel:ansible.im) -* IRC: #ansible-devel on [irc.libera.chat](https://libera.chat/) We'd be happy to discuss things. See this page for a complete list of communication channels and their purposes: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Thank you once again! diff --git a/hacking/ticket_stubs/collections.md b/hacking/ticket_stubs/collections.md index eecd8151f15..3698ea14bd9 100644 --- a/hacking/ticket_stubs/collections.md +++ b/hacking/ticket_stubs/collections.md @@ -2,15 +2,16 @@ Hi! Thank you very much for your submission to Ansible. It means a lot to us that you've taken the time to contribute. -Since Ansible 2.10 we are no longer accepting new modules/plugins into Ansible core. However, we recommend looking into providing this functionality through Ansible Galaxy via Ansible Collections. You can find more information about collections at: +Since Ansible 2.10 we are no longer accepting new modules/plugins into Ansible core. +However, we recommend looking into providing this functionality through Ansible Galaxy via Ansible Collections. You can find more information about collections at: -* https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html. +* . Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. The mailing list and irc are great ways to ask questions, or post if you don't think this particular issue is resolved. See this page for a complete and up to date list of communication channels and their purposes: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Thank you once again for this and your interest in Ansible! diff --git a/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md b/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md index 3f4de70d1ff..708eedc53d1 100644 --- a/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md +++ b/hacking/ticket_stubs/guide_newbie_about_gh_and_contributing_to_ansible.md @@ -9,13 +9,13 @@ Assuming that you wanted to create actual contribution, I think that you may want to learn and read through the following articles I've gathered for you: -• https://opensource.guide/how-to-contribute/ -• https://docs.ansible.com/ansible/devel/community/ +• +• Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below: - https://docs.ansible.com/ansible/latest/community/communication.html + Have a nice day! diff --git a/hacking/ticket_stubs/no_thanks.md b/hacking/ticket_stubs/no_thanks.md index 2e2143fe619..8c32b6bc4f9 100644 --- a/hacking/ticket_stubs/no_thanks.md +++ b/hacking/ticket_stubs/no_thanks.md @@ -11,8 +11,9 @@ However, we're absolutely always up for discussion. Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below: -* https://docs.ansible.com/ansible/latest/community/communication.html +* -In the future, sometimes starting a discussion on the development list prior to implementing a feature can make getting things included a little easier, but it's not always necessary. +In the future, sometimes starting a discussion on the development list prior to implementing +a feature can make getting things included a little easier, but it's not always necessary. Thank you once again for this and your interest in Ansible! diff --git a/hacking/ticket_stubs/pr_duplicate.md b/hacking/ticket_stubs/pr_duplicate.md index 01a2a72809f..080e4e4abf1 100644 --- a/hacking/ticket_stubs/pr_duplicate.md +++ b/hacking/ticket_stubs/pr_duplicate.md @@ -15,6 +15,6 @@ In the future, sometimes starting a discussion on the development list prior to Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. If you or anyone else has any further questions, please let us know by using any of the communication methods listed in the page below: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Thank you once again for this and your interest in Ansible! diff --git a/hacking/ticket_stubs/pr_merged.md b/hacking/ticket_stubs/pr_merged.md index 0183ee90630..5d354e3586f 100644 --- a/hacking/ticket_stubs/pr_merged.md +++ b/hacking/ticket_stubs/pr_merged.md @@ -1,7 +1,7 @@ Hi! This has been merged in, and will also be included in the next major release. -For more info on our process see https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html#ansible-core-workflow +For more info on our process see If you or anyone else has any further questions, please let us know by stopping by one of the mailing lists or chat channels, as appropriate. @@ -10,6 +10,6 @@ The mailing list and irc are great ways to ask questions, or post if you don't t See this page for a complete and up to date list of communication channels and their purposes: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Thank you! diff --git a/hacking/ticket_stubs/proposal.md b/hacking/ticket_stubs/proposal.md index 25d4cb403fe..2d8182f12be 100644 --- a/hacking/ticket_stubs/proposal.md +++ b/hacking/ticket_stubs/proposal.md @@ -3,16 +3,15 @@ Hi! Ansible has a Proposal process for large feature ideas or changes in current design and functionality, such as this. If you are still interested in seeing this new feature get into Ansible, please submit a proposal for it using this process. -https://github.com/ansible/proposals/blob/master/proposals_process_proposal.md + Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. -The mailing list and irc are great ways to ask questions, or post if you don't think this particular issue is resolved. +The Forum is the best ways to ask questions, or post if you don't think this particular issue is resolved. -* #ansible-devel on [irc.libera.chat](https://libera.chat/) -* https://groups.google.com/forum/#!forum/ansible-devel +* Or check this page for a more complete list of communication channels and their purposes: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Thank you! diff --git a/hacking/ticket_stubs/question_not_bug.md b/hacking/ticket_stubs/question_not_bug.md index f4b143fbb60..dab0d2edba1 100644 --- a/hacking/ticket_stubs/question_not_bug.md +++ b/hacking/ticket_stubs/question_not_bug.md @@ -2,14 +2,13 @@ Hi! Thanks very much for your interest in Ansible. It means a lot to us. -This appears to be a user question, and we'd like to direct these kinds of things to either the mailing list or the IRC channel. +This appears to be a user question, and we'd like to direct these topic to the Ansible Forum. -* IRC: #ansible on [irc.libera.chat](https://libera.chat/) -* mailing list: https://groups.google.com/forum/#!forum/ansible-project +* [Ansible Forum](https://forum.ansible.com) -See this page for a complete and up to date list of communication channels and their purposes: +See this page for a complete and up to date list of communication channels and their purposes: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. If don't you think this particular issue is resolved, you should still stop by there first, we'd appreciate it. diff --git a/hacking/ticket_stubs/resolved.md b/hacking/ticket_stubs/resolved.md index 8eedbcfc156..f040d6d05a4 100644 --- a/hacking/ticket_stubs/resolved.md +++ b/hacking/ticket_stubs/resolved.md @@ -11,6 +11,6 @@ The mailing list and irc are great ways to ask questions, or post if you don't t See this page for a complete list of communication channels and their purposes: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Thank you! diff --git a/hacking/ticket_stubs/wider_discussion.md b/hacking/ticket_stubs/wider_discussion.md index 74585816fc7..3ab9073f443 100644 --- a/hacking/ticket_stubs/wider_discussion.md +++ b/hacking/ticket_stubs/wider_discussion.md @@ -8,14 +8,13 @@ Reasons for this include: * INSERT REASONS! Because this project is very active, we're unlikely to see comments made on closed tickets and we lock them after some time. -Can you please post on ansible-development list so we can talk about this idea with the wider group? +Can you please post Ansible Forum so we can talk about this idea with the wider group? -* https://groups.google.com/forum/#!forum/ansible-devel +* [Ansible Core on the Ansible Forum](https://forum.ansible.com/tag/ansible-core) * Matrix: [#devel:ansible.im](https://matrix.to/#/#devel:ansible.im) -* #ansible-devel on [irc.libera.chat](https://libera.chat/) For other alternatives, check this page for a more complete list of communication channels and their purposes: -* https://docs.ansible.com/ansible/latest/community/communication.html +* Thank you once again for this and your interest in Ansible! diff --git a/hacking/update-sanity-requirements.py b/hacking/update-sanity-requirements.py index 5861590beaf..aaaa803cde8 100755 --- a/hacking/update-sanity-requirements.py +++ b/hacking/update-sanity-requirements.py @@ -15,6 +15,7 @@ import venv import packaging.version import packaging.specifiers +import packaging.requirements try: import argcomplete @@ -34,6 +35,11 @@ class SanityTest: source_path: pathlib.Path def freeze_requirements(self) -> None: + source_requirements = [packaging.requirements.Requirement(re.sub(' #.*$', '', line)) for line in self.source_path.read_text().splitlines()] + + install_packages = {requirement.name for requirement in source_requirements} + exclude_packages = {'distribute', 'pip', 'setuptools', 'wheel'} - install_packages + with tempfile.TemporaryDirectory() as venv_dir: venv.create(venv_dir, with_pip=True) @@ -46,16 +52,8 @@ class SanityTest: if pip_freeze.stdout: raise Exception(f'Initial virtual environment is not empty:\n{pip_freeze.stdout}') - subprocess.run(pip + ['install', 'wheel'], env=env, check=True) # make bdist_wheel available during pip install subprocess.run(pip + ['install', '-r', self.source_path], env=env, check=True) - keep_setuptools = any(line.startswith('setuptools ') for line in self.source_path.read_text().splitlines()) - - exclude_packages = ['pip', 'distribute', 'wheel'] - - if not keep_setuptools: - exclude_packages.append('setuptools') - freeze_options = ['--all'] for exclude_package in exclude_packages: diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py index e4905a18532..2ded3913f51 100644 --- a/lib/ansible/__init__.py +++ b/lib/ansible/__init__.py @@ -15,9 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # make vendored top-level modules accessible EARLY import ansible._vendor diff --git a/lib/ansible/__main__.py b/lib/ansible/__main__.py index 5a753ec05c7..afdd2849739 100644 --- a/lib/ansible/__main__.py +++ b/lib/ansible/__main__.py @@ -1,10 +1,8 @@ # Copyright: (c) 2021, Matt Martz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import annotations import argparse -import importlib -import os -import sys from importlib.metadata import distribution @@ -18,22 +16,10 @@ def main(): ep_map = {_short_name(ep.name): ep for ep in dist.entry_points if ep.group == 'console_scripts'} parser = argparse.ArgumentParser(prog='python -m ansible', add_help=False) - parser.add_argument('entry_point', choices=list(ep_map) + ['test']) + parser.add_argument('entry_point', choices=list(ep_map)) args, extra = parser.parse_known_args() - if args.entry_point == 'test': - ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - source_root = os.path.join(ansible_root, 'test', 'lib') - - if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', '__init__.py')): - # running from source, use that version of ansible-test instead of any version that may already be installed - sys.path.insert(0, source_root) - - module = importlib.import_module('ansible_test._util.target.cli.ansible_test_cli_stub') - main = module.main - else: - main = ep_map[args.entry_point].load() - + main = ep_map[args.entry_point].load() main([args.entry_point] + extra) diff --git a/lib/ansible/_vendor/__init__.py b/lib/ansible/_vendor/__init__.py index a31957b6724..405d8def78e 100644 --- a/lib/ansible/_vendor/__init__.py +++ b/lib/ansible/_vendor/__init__.py @@ -1,8 +1,7 @@ # (c) 2020 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import pkgutil diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 91d6a969618..03a2b3e854a 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -3,9 +3,7 @@ # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import locale import os @@ -13,9 +11,9 @@ import sys # Used for determining if the system is running a new enough python version # and should only restrict on our documented minimum versions -if sys.version_info < (3, 10): +if sys.version_info < (3, 11): raise SystemExit( - 'ERROR: Ansible requires Python 3.10 or newer on the controller. ' + 'ERROR: Ansible requires Python 3.11 or newer on the controller. ' 'Current version: %s' % ''.join(sys.version.splitlines()) ) @@ -118,7 +116,7 @@ except ImportError: class CLI(ABC): - ''' code behind bin/ansible* programs ''' + """ code behind bin/ansible* programs """ PAGER = C.config.get_config_value('PAGER') @@ -169,19 +167,7 @@ class CLI(ABC): else: display.v(u"No config file found; using defaults") - # warn about deprecated config options - for deprecated in C.config.DEPRECATED: - name = deprecated[0] - why = deprecated[1]['why'] - if 'alternatives' in deprecated[1]: - alt = ', use %s instead' % deprecated[1]['alternatives'] - else: - alt = '' - ver = deprecated[1].get('version') - date = deprecated[1].get('date') - collection_name = deprecated[1].get('collection_name') - display.deprecated("%s option, %s%s" % (name, why, alt), - version=ver, date=date, collection_name=collection_name) + C.handle_config_noise(display) @staticmethod def split_vault_id(vault_id): @@ -196,8 +182,7 @@ class CLI(ABC): @staticmethod def build_vault_ids(vault_ids, vault_password_files=None, - ask_vault_pass=None, create_new_password=None, - auto_prompt=True): + ask_vault_pass=None, auto_prompt=True): vault_password_files = vault_password_files or [] vault_ids = vault_ids or [] @@ -220,7 +205,6 @@ class CLI(ABC): return vault_ids - # TODO: remove the now unused args @staticmethod def setup_vault_secrets(loader, vault_ids, vault_password_files=None, ask_vault_pass=None, create_new_password=False, @@ -254,7 +238,6 @@ class CLI(ABC): vault_ids = CLI.build_vault_ids(vault_ids, vault_password_files, ask_vault_pass, - create_new_password, auto_prompt=auto_prompt) last_exception = found_vault_secret = None @@ -334,7 +317,7 @@ class CLI(ABC): @staticmethod def ask_passwords(): - ''' prompt for connection and become passwords if needed ''' + """ prompt for connection and become passwords if needed """ op = context.CLIARGS sshpass = None @@ -364,7 +347,7 @@ class CLI(ABC): return (sshpass, becomepass) def validate_conflicts(self, op, runas_opts=False, fork_opts=False): - ''' check for conflicting options ''' + """ check for conflicting options """ if fork_opts: if op.forks < 1: @@ -430,6 +413,10 @@ class CLI(ABC): skip_tags.add(tag.strip()) options.skip_tags = list(skip_tags) + # Make sure path argument doesn't have a backslash + if hasattr(options, 'action') and options.action in ['install', 'download'] and hasattr(options, 'args'): + options.args = [path.rstrip("/") for path in options.args] + # process inventory options except for CLIs that require their own processing if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS: @@ -472,7 +459,7 @@ class CLI(ABC): @staticmethod def version_info(gitinfo=False): - ''' return full ansible version info ''' + """ return full ansible version info """ if gitinfo: # expensive call, user with care ansible_version_string = opt_help.version() @@ -498,7 +485,7 @@ class CLI(ABC): @staticmethod def pager(text): - ''' find reasonable way to display text ''' + """ find reasonable way to display text """ # this is a much simpler form of what is in pydoc.py if not sys.stdout.isatty(): display.display(text, screen_only=True) @@ -517,7 +504,7 @@ class CLI(ABC): @staticmethod def pager_pipe(text): - ''' pipe text through a pager ''' + """ pipe text through a pager """ if 'less' in CLI.PAGER: os.environ['LESS'] = CLI.LESS_OPTS try: @@ -567,8 +554,19 @@ class CLI(ABC): # the code, ensuring a consistent view of global variables variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False)) + # flush fact cache if requested + if options['flush_cache']: + CLI._flush_cache(inventory, variable_manager) + return loader, inventory, variable_manager + @staticmethod + def _flush_cache(inventory, variable_manager): + variable_manager.clear_facts('localhost') + for host in inventory.list_hosts(): + hostname = host.get_name() + variable_manager.clear_facts(hostname) + @staticmethod def get_host_list(inventory, subset, pattern='all'): @@ -606,7 +604,7 @@ class CLI(ABC): try: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: - raise AnsibleError("Problem occured when trying to run the password script %s (%s)." + raise AnsibleError("Problem occurred when trying to run the password script %s (%s)." " If this is not a script, remove the executable bit from the file." % (pwd_file, e)) stdout, stderr = p.communicate() @@ -616,9 +614,8 @@ class CLI(ABC): else: try: - f = open(b_pwd_file, "rb") - secret = f.read().strip() - f.close() + with open(b_pwd_file, "rb") as f: + secret = f.read().strip() except (OSError, IOError) as e: raise AnsibleError("Could not read password file %s: %s" % (pwd_file, e)) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index a54dacb70c2..830e5823cfd 100755 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI @@ -25,14 +24,14 @@ display = Display() class AdHocCLI(CLI): - ''' is an extra-simple tool/framework/API for doing 'remote things'. + """ is an extra-simple tool/framework/API for doing 'remote things'. this command allows you to define and run a single task 'playbook' against a set of hosts - ''' + """ name = 'ansible' def init_parser(self): - ''' create an options parser for bin/ansible ''' + """ create an options parser for bin/ansible """ super(AdHocCLI, self).init_parser(usage='%prog [options]', desc="Define and run a single task 'playbook' against a set of hosts", epilog="Some actions do not make sense in Ad-Hoc (include, meta, etc)") @@ -61,7 +60,7 @@ class AdHocCLI(CLI): self.parser.add_argument('args', metavar='pattern', help='host pattern') def post_process_args(self, options): - '''Post process and validate options for bin/ansible ''' + """Post process and validate options for bin/ansible """ options = super(AdHocCLI, self).post_process_args(options) @@ -99,7 +98,7 @@ class AdHocCLI(CLI): tasks=[mytask]) def run(self): - ''' create and execute the single task playbook ''' + """ create and execute the single task playbook """ super(AdHocCLI, self).run() diff --git a/lib/ansible/cli/arguments/__init__.py b/lib/ansible/cli/arguments/__init__.py index 7398e33fa30..47b93f9822b 100644 --- a/lib/ansible/cli/arguments/__init__.py +++ b/lib/ansible/cli/arguments/__init__.py @@ -1,5 +1,4 @@ # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations diff --git a/lib/ansible/cli/arguments/option_helpers.py b/lib/ansible/cli/arguments/option_helpers.py index eef461f5fbf..18adc16455a 100644 --- a/lib/ansible/cli/arguments/option_helpers.py +++ b/lib/ansible/cli/arguments/option_helpers.py @@ -1,8 +1,7 @@ # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import copy import operator @@ -298,14 +297,14 @@ def add_inventory_options(parser): help='outputs a list of matching hosts; does not execute anything else') parser.add_argument('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', help='further limit selected hosts to an additional pattern') + parser.add_argument('--flush-cache', dest='flush_cache', action='store_true', + help="clear the fact cache for every host in inventory") def add_meta_options(parser): """Add options for commands which can launch meta tasks from the command line""" parser.add_argument('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true', help="run handlers even if a task fails") - parser.add_argument('--flush-cache', dest='flush_cache', action='store_true', - help="clear the fact cache for every host in inventory") def add_module_options(parser): @@ -393,7 +392,7 @@ def add_vault_options(parser): parser.add_argument('--vault-id', default=[], dest='vault_ids', action='append', type=str, help='the vault identity to use') base_group = parser.add_mutually_exclusive_group() - base_group.add_argument('--ask-vault-password', '--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true', + base_group.add_argument('-J', '--ask-vault-password', '--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true', help='ask for vault password') base_group.add_argument('--vault-password-file', '--vault-pass-file', default=[], dest='vault_password_files', help="vault password file", type=unfrack_path(follow=False), action='append') diff --git a/lib/ansible/cli/config.py b/lib/ansible/cli/config.py index 5a6865e8b15..cd801212fca 100755 --- a/lib/ansible/cli/config.py +++ b/lib/ansible/cli/config.py @@ -3,16 +3,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI import os -import yaml import shlex import subprocess +import sys +import yaml from collections.abc import Mapping @@ -22,7 +22,7 @@ import ansible.plugins.loader as plugin_loader from ansible import constants as C from ansible.cli.arguments import option_helpers as opt_help from ansible.config.manager import ConfigManager, Setting -from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleRequiredOptionError from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes from ansible.module_utils.common.json import json_dump from ansible.module_utils.six import string_types @@ -35,6 +35,9 @@ from ansible.utils.path import unfrackpath display = Display() +_IGNORE_CHANGED = frozenset({'_terms', '_input'}) + + def yaml_dump(data, default_flow_style=False, default_style=None): return yaml.dump(data, Dumper=AnsibleDumper, default_flow_style=default_flow_style, default_style=default_style) @@ -44,12 +47,43 @@ def yaml_short(data): def get_constants(): - ''' helper method to ensure we can template based on existing constants ''' + """ helper method to ensure we can template based on existing constants """ if not hasattr(get_constants, 'cvars'): get_constants.cvars = {k: getattr(C, k) for k in dir(C) if not k.startswith('__')} return get_constants.cvars +def _ansible_env_vars(varname): + """ return true or false depending if variable name is possibly a 'configurable' ansible env variable """ + return all( + [ + varname.startswith("ANSIBLE_"), + not varname.startswith(("ANSIBLE_TEST_", "ANSIBLE_LINT_")), + varname not in ("ANSIBLE_CONFIG", "ANSIBLE_DEV_HOME"), + ] + ) + + +def _get_evar_list(settings): + data = [] + for setting in settings: + if 'env' in settings[setting] and settings[setting]['env']: + for varname in settings[setting]['env']: + data.append(varname.get('name')) + return data + + +def _get_ini_entries(settings): + data = {} + for setting in settings: + if 'ini' in settings[setting] and settings[setting]['ini']: + for kv in settings[setting]['ini']: + if not kv['section'] in data: + data[kv['section']] = set() + data[kv['section']].add(kv['key']) + return data + + class ConfigCLI(CLI): """ Config command line class """ @@ -100,9 +134,13 @@ class ConfigCLI(CLI): init_parser.add_argument('--disabled', dest='commented', action='store_true', default=False, help='Prefixes all entries with a comment character to disable them') - # search_parser = subparsers.add_parser('find', help='Search configuration') - # search_parser.set_defaults(func=self.execute_search) - # search_parser.add_argument('args', help='Search term', metavar='') + validate_parser = subparsers.add_parser('validate', + help='Validate the configuration file and environment variables. ' + 'By default it only checks the base settings without accounting for plugins (see -t).', + parents=[common]) + validate_parser.set_defaults(func=self.execute_validate) + validate_parser.add_argument('--format', '-f', dest='format', action='store', choices=['ini', 'env'] , default='ini', + help='Output format for init') def post_process_args(self, options): options = super(ConfigCLI, self).post_process_args(options) @@ -114,6 +152,10 @@ class ConfigCLI(CLI): super(ConfigCLI, self).run() + # initialize each galaxy server's options from known listed servers + self._galaxy_servers = [s for s in C.GALAXY_SERVER_LIST or [] if s] # clean list, reused later here + C.config.load_galaxy_server_defs(self._galaxy_servers) + if context.CLIARGS['config_file']: self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False) b_config = to_bytes(self.config_file) @@ -146,9 +188,9 @@ class ConfigCLI(CLI): context.CLIARGS['func']() def execute_update(self): - ''' + """ Updates a single setting in the specified ansible.cfg - ''' + """ raise AnsibleError("Option not implemented yet") # pylint: disable=unreachable @@ -170,9 +212,9 @@ class ConfigCLI(CLI): ]) def execute_view(self): - ''' + """ Displays the current config file - ''' + """ try: with open(self.config_file, 'rb') as f: self.pager(to_text(f.read(), errors='surrogate_or_strict')) @@ -180,9 +222,9 @@ class ConfigCLI(CLI): raise AnsibleError("Failed to open config file: %s" % to_native(e)) def execute_edit(self): - ''' + """ Opens ansible.cfg in the default EDITOR - ''' + """ raise AnsibleError("Option not implemented yet") # pylint: disable=unreachable @@ -224,14 +266,20 @@ class ConfigCLI(CLI): return entries def _list_entries_from_args(self): - ''' + """ build a dict with the list requested configs - ''' + """ + config_entries = {} if context.CLIARGS['type'] in ('base', 'all'): # this dumps main/common configs config_entries = self.config.get_configuration_definitions(ignore_private=True) + # for base and all, we include galaxy servers + config_entries['GALAXY_SERVERS'] = {} + for server in self._galaxy_servers: + config_entries['GALAXY_SERVERS'][server] = self.config.get_configuration_definitions('galaxy_server', server) + if context.CLIARGS['type'] != 'base': config_entries['PLUGINS'] = {} @@ -240,14 +288,15 @@ class ConfigCLI(CLI): for ptype in C.CONFIGURABLE_PLUGINS: config_entries['PLUGINS'][ptype.upper()] = self._list_plugin_settings(ptype) elif context.CLIARGS['type'] != 'base': + # only for requested types config_entries['PLUGINS'][context.CLIARGS['type']] = self._list_plugin_settings(context.CLIARGS['type'], context.CLIARGS['args']) return config_entries def execute_list(self): - ''' + """ list and output available configs - ''' + """ config_entries = self._list_entries_from_args() if context.CLIARGS['format'] == 'yaml': @@ -270,7 +319,7 @@ class ConfigCLI(CLI): if not settings[setting].get('description'): continue - default = settings[setting].get('default', '') + default = self.config.template_default(settings[setting].get('default', ''), get_constants()) if subkey == 'env': stype = settings[setting].get('type', '') if stype == 'boolean': @@ -314,7 +363,7 @@ class ConfigCLI(CLI): return data - def _get_settings_ini(self, settings): + def _get_settings_ini(self, settings, seen): sections = {} for o in sorted(settings.keys()): @@ -327,7 +376,7 @@ class ConfigCLI(CLI): if not opt.get('description'): # its a plugin - new_sections = self._get_settings_ini(opt) + new_sections = self._get_settings_ini(opt, seen) for s in new_sections: if s in sections: sections[s].extend(new_sections[s]) @@ -343,36 +392,45 @@ class ConfigCLI(CLI): if 'ini' in opt and opt['ini']: entry = opt['ini'][-1] + if entry['section'] not in seen: + seen[entry['section']] = [] if entry['section'] not in sections: sections[entry['section']] = [] - default = opt.get('default', '') - if opt.get('type', '') == 'list' and not isinstance(default, string_types): - # python lists are not valid ini ones - default = ', '.join(default) - elif default is None: - default = '' + # avoid dupes + if entry['key'] not in seen[entry['section']]: + seen[entry['section']].append(entry['key']) - if context.CLIARGS['commented']: - entry['key'] = ';%s' % entry['key'] + default = self.config.template_default(opt.get('default', ''), get_constants()) + if opt.get('type', '') == 'list' and not isinstance(default, string_types): + # python lists are not valid ini ones + default = ', '.join(default) + elif default is None: + default = '' - key = desc + '\n%s=%s' % (entry['key'], default) - sections[entry['section']].append(key) + if context.CLIARGS.get('commented', False): + entry['key'] = ';%s' % entry['key'] + + key = desc + '\n%s=%s' % (entry['key'], default) + + sections[entry['section']].append(key) return sections def execute_init(self): + """Create initial configuration""" + seen = {} data = [] config_entries = self._list_entries_from_args() plugin_types = config_entries.pop('PLUGINS', None) if context.CLIARGS['format'] == 'ini': - sections = self._get_settings_ini(config_entries) + sections = self._get_settings_ini(config_entries, seen) if plugin_types: for ptype in plugin_types: - plugin_sections = self._get_settings_ini(plugin_types[ptype]) + plugin_sections = self._get_settings_ini(plugin_types[ptype], seen) for s in plugin_sections: if s in sections: sections[s].extend(plugin_sections[s]) @@ -400,19 +458,21 @@ class ConfigCLI(CLI): entries = [] for setting in sorted(config): - changed = (config[setting].origin not in ('default', 'REQUIRED')) + changed = (config[setting].origin not in ('default', 'REQUIRED') and setting not in _IGNORE_CHANGED) if context.CLIARGS['format'] == 'display': if isinstance(config[setting], Setting): # proceed normally - if config[setting].origin == 'default': + value = config[setting].value + if config[setting].origin == 'default' or setting in _IGNORE_CHANGED: color = 'green' + value = self.config.template_default(value, get_constants()) elif config[setting].origin == 'REQUIRED': # should include '_terms', '_input', etc color = 'red' else: color = 'yellow' - msg = "%s(%s) = %s" % (setting, config[setting].origin, config[setting].value) + msg = "%s(%s) = %s" % (setting, config[setting].origin, value) else: color = 'green' msg = "%s(%s) = %s" % (setting, 'default', config[setting].get('default')) @@ -421,6 +481,8 @@ class ConfigCLI(CLI): else: entry = {} for key in config[setting]._fields: + if key == 'type': + continue entry[key] = getattr(config[setting], key) if not context.CLIARGS['only_changed'] or changed: @@ -429,7 +491,10 @@ class ConfigCLI(CLI): return entries def _get_global_configs(self): - config = self.config.get_configuration_definitions(ignore_private=True).copy() + + # Add base + config = self.config.get_configuration_definitions(ignore_private=True) + # convert to settings for setting in config.keys(): v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, variables=get_constants()) config[setting] = Setting(setting, v, o, None) @@ -441,7 +506,7 @@ class ConfigCLI(CLI): # prep loading loader = getattr(plugin_loader, '%s_loader' % ptype) - # acumulators + # accumulators output = [] config_entries = {} @@ -458,7 +523,7 @@ class ConfigCLI(CLI): plugin_cs = loader.all(class_only=True) for plugin in plugin_cs: - # in case of deprecastion they diverge + # in case of deprecation they diverge finalname = name = plugin._load_name if name.startswith('_'): if os.path.islink(plugin._original_path): @@ -481,12 +546,9 @@ class ConfigCLI(CLI): for setting in config_entries[finalname].keys(): try: v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, plugin_type=ptype, plugin_name=name, variables=get_constants()) - except AnsibleError as e: - if to_text(e).startswith('No setting was provided for required configuration'): - v = None - o = 'REQUIRED' - else: - raise e + except AnsibleRequiredOptionError: + v = None + o = 'REQUIRED' if v is None and o is None: # not all cases will be error @@ -506,17 +568,60 @@ class ConfigCLI(CLI): return output + def _get_galaxy_server_configs(self): + + output = [] + # add galaxy servers + for server in self._galaxy_servers: + server_config = {} + s_config = self.config.get_configuration_definitions('galaxy_server', server) + for setting in s_config.keys(): + try: + v, o = C.config.get_config_value_and_origin(setting, plugin_type='galaxy_server', plugin_name=server, cfile=self.config_file) + except AnsibleError as e: + if s_config[setting].get('required', False): + v = None + o = 'REQUIRED' + else: + raise e + if v is None and o is None: + # not all cases will be error + o = 'REQUIRED' + server_config[setting] = Setting(setting, v, o, None) + if context.CLIARGS['format'] == 'display': + if not context.CLIARGS['only_changed'] or server_config: + equals = '=' * len(server) + output.append(f'\n{server}\n{equals}') + output.extend(self._render_settings(server_config)) + else: + output.append({server: server_config}) + + return output + def execute_dump(self): - ''' + """ Shows the current settings, merges ansible.cfg if specified - ''' - if context.CLIARGS['type'] == 'base': - # deal with base - output = self._get_global_configs() - elif context.CLIARGS['type'] == 'all': + """ + output = [] + if context.CLIARGS['type'] in ('base', 'all'): # deal with base output = self._get_global_configs() - # deal with plugins + + # add galaxy servers + server_config_list = self._get_galaxy_server_configs() + if context.CLIARGS['format'] == 'display': + output.append('\nGALAXY_SERVERS:\n') + output.extend(server_config_list) + else: + configs = {} + for server_config in server_config_list: + server = list(server_config.keys())[0] + server_reduced_config = server_config.pop(server) + configs[server] = server_reduced_config + output.append({'GALAXY_SERVERS': configs}) + + if context.CLIARGS['type'] == 'all': + # add all plugins for ptype in C.CONFIGURABLE_PLUGINS: plugin_list = self._get_plugin_configs(ptype, context.CLIARGS['args']) if context.CLIARGS['format'] == 'display': @@ -529,8 +634,9 @@ class ConfigCLI(CLI): else: pname = '%s_PLUGINS' % ptype.upper() output.append({pname: plugin_list}) - else: - # deal with plugins + + elif context.CLIARGS['type'] != 'base': + # deal with specific plugin output = self._get_plugin_configs(context.CLIARGS['type'], context.CLIARGS['args']) if context.CLIARGS['format'] == 'display': @@ -542,6 +648,73 @@ class ConfigCLI(CLI): self.pager(to_text(text, errors='surrogate_or_strict')) + def execute_validate(self): + + found = False + config_entries = self._list_entries_from_args() + plugin_types = config_entries.pop('PLUGINS', None) + galaxy_servers = config_entries.pop('GALAXY_SERVERS', None) + + if context.CLIARGS['format'] == 'ini': + if C.CONFIG_FILE is not None: + # validate ini config since it is found + + sections = _get_ini_entries(config_entries) + # Also from plugins + if plugin_types: + for ptype in plugin_types: + for plugin in plugin_types[ptype].keys(): + plugin_sections = _get_ini_entries(plugin_types[ptype][plugin]) + for s in plugin_sections: + if s in sections: + sections[s].update(plugin_sections[s]) + else: + sections[s] = plugin_sections[s] + if galaxy_servers: + for server in galaxy_servers: + server_sections = _get_ini_entries(galaxy_servers[server]) + for s in server_sections: + if s in sections: + sections[s].update(server_sections[s]) + else: + sections[s] = server_sections[s] + if sections: + p = C.config._parsers[C.CONFIG_FILE] + for s in p.sections(): + # check for valid sections + if s not in sections: + display.error(f"Found unknown section '{s}' in '{C.CONFIG_FILE}.") + found = True + continue + + # check keys in valid sections + for k in p.options(s): + if k not in sections[s]: + display.error(f"Found unknown key '{k}' in section '{s}' in '{C.CONFIG_FILE}.") + found = True + + elif context.CLIARGS['format'] == 'env': + # validate any 'ANSIBLE_' env vars found + evars = [varname for varname in os.environ.keys() if _ansible_env_vars(varname)] + if evars: + data = _get_evar_list(config_entries) + if plugin_types: + for ptype in plugin_types: + for plugin in plugin_types[ptype].keys(): + data.extend(_get_evar_list(plugin_types[ptype][plugin])) + + for evar in evars: + if evar not in data: + display.error(f"Found unknown environment variable '{evar}'.") + found = True + + # we found discrepancies! + if found: + sys.exit(1) + + # allsgood + display.display("All configurations seem valid!") + def main(args=None): ConfigCLI.cli_executor(args) diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py index 2325bf05d6d..6f355938aa5 100755 --- a/lib/ansible/cli/console.py +++ b/lib/ansible/cli/console.py @@ -5,8 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI @@ -36,7 +35,7 @@ display = Display() class ConsoleCLI(CLI, cmd.Cmd): - ''' + """ A REPL that allows for running ad-hoc tasks against a chosen inventory from a nice shell with built-in tab completion (based on dominis' ``ansible-shell``). @@ -63,7 +62,7 @@ class ConsoleCLI(CLI, cmd.Cmd): - ``help [command/module]``: display documentation for the command or module - ``exit``: exit ``ansible-console`` - ''' + """ name = 'ansible-console' modules = [] # type: list[str] | None @@ -546,7 +545,7 @@ class ConsoleCLI(CLI, cmd.Cmd): if path: module_loader.add_directory(path) - # dynamically add 'cannonical' modules as commands, aliases coudld be used and dynamically loaded + # dynamically add 'canonical' modules as commands, aliases could be used and dynamically loaded self.modules = self.list_modules() for module in self.modules: setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg)) @@ -580,7 +579,7 @@ class ConsoleCLI(CLI, cmd.Cmd): self.cmdloop() def __getattr__(self, name): - ''' handle not found to populate dynamically a module function if module matching name exists ''' + """ handle not found to populate dynamically a module function if module matching name exists """ attr = None if name.startswith('do_'): diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 06c931272b7..52ec8a6c7b1 100755 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -4,12 +4,12 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI +import importlib import pkgutil import os import os.path @@ -30,7 +30,6 @@ from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.collections import is_sequence from ansible.module_utils.common.json import json_dump from ansible.module_utils.common.yaml import yaml_dump -from ansible.module_utils.compat import importlib from ansible.module_utils.six import string_types from ansible.parsing.plugin_docs import read_docstub from ansible.parsing.utils.yaml import from_yaml @@ -39,6 +38,7 @@ from ansible.plugins.list import list_plugins from ansible.plugins.loader import action_loader, fragment_loader from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path +from ansible.utils.color import stringc from ansible.utils.display import Display from ansible.utils.plugin_docs import get_plugin_docs, get_docstring, get_versioned_doclink @@ -46,14 +46,39 @@ display = Display() TARGET_OPTIONS = C.DOCUMENTABLE_PLUGINS + ('role', 'keyword',) -PB_OBJECTS = ['Play', 'Role', 'Block', 'Task'] +PB_OBJECTS = ['Play', 'Role', 'Block', 'Task', 'Handler'] PB_LOADED = {} SNIPPETS = ['inventory', 'lookup', 'module'] - -def add_collection_plugins(plugin_list, plugin_type, coll_filter=None): - display.deprecated("add_collection_plugins method, use ansible.plugins.list functions instead.", version='2.17') - plugin_list.update(list_plugins(plugin_type, coll_filter)) +# hardcoded from ascii values +STYLE = { + 'BLINK': '\033[5m', + 'BOLD': '\033[1m', + 'HIDE': '\033[8m', + # 'NORMAL': '\x01b[0m', # newer? + 'NORMAL': '\033[0m', + 'RESET': "\033[0;0m", + # 'REVERSE':"\033[;7m", # newer? + 'REVERSE': "\033[7m", + 'UNDERLINE': '\033[4m', +} + +# previously existing string identifiers +NOCOLOR = { + 'BOLD': r'*%s*', + 'UNDERLINE': r'`%s`', + 'MODULE': r'[%s]', + 'PLUGIN': r'[%s]', +} + +ref_style = { + 'MODULE': C.COLOR_DOC_MODULE, + 'REF': C.COLOR_DOC_REFERENCE, + 'LINK': C.COLOR_DOC_LINK, + 'DEP': C.COLOR_DOC_DEPRECATED, + 'CONSTANT': C.COLOR_DOC_CONSTANT, + 'PLUGIN': C.COLOR_DOC_PLUGIN, +} def jdump(text): @@ -72,37 +97,27 @@ class RoleMixin(object): # Potential locations of the role arg spec file in the meta subdir, with main.yml # having the lowest priority. - ROLE_ARGSPEC_FILES = ['argument_specs' + e for e in C.YAML_FILENAME_EXTENSIONS] + ["main" + e for e in C.YAML_FILENAME_EXTENSIONS] + ROLE_METADATA_FILES = ["main" + e for e in C.YAML_FILENAME_EXTENSIONS] + ROLE_ARGSPEC_FILES = ['argument_specs' + e for e in C.YAML_FILENAME_EXTENSIONS] + ROLE_METADATA_FILES - def _load_argspec(self, role_name, collection_path=None, role_path=None): - """Load the role argument spec data from the source file. + def _load_role_data(self, root, files, role_name, collection): + """ Load and process the YAML for the first found of a set of role files + :param str root: The root path to get the files from + :param str files: List of candidate file names in order of precedence :param str role_name: The name of the role for which we want the argspec data. - :param str collection_path: Path to the collection containing the role. This - will be None for standard roles. - :param str role_path: Path to the standard role. This will be None for - collection roles. - - We support two files containing the role arg spec data: either meta/main.yml - or meta/argument_spec.yml. The argument_spec.yml file will take precedence - over the meta/main.yml file, if it exists. Data is NOT combined between the - two files. + :param str collection: collection name or None in case of stand alone roles - :returns: A dict of all data underneath the ``argument_specs`` top-level YAML - key in the argspec data file. Empty dict is returned if there is no data. + :returns: A dict that contains the data requested, empty if no data found """ - if collection_path: - meta_path = os.path.join(collection_path, 'roles', role_name, 'meta') - elif role_path: - meta_path = os.path.join(role_path, 'meta') + if collection: + meta_path = os.path.join(root, 'roles', role_name, 'meta') else: - raise AnsibleError("A path is required to load argument specs for role '%s'" % role_name) - - path = None + meta_path = os.path.join(root, 'meta') # Check all potential spec files - for specfile in self.ROLE_ARGSPEC_FILES: + for specfile in files: full_path = os.path.join(meta_path, specfile) if os.path.exists(full_path): path = full_path @@ -116,9 +131,50 @@ class RoleMixin(object): data = from_yaml(f.read(), file_name=path) if data is None: data = {} - return data.get('argument_specs', {}) except (IOError, OSError) as e: - raise AnsibleParserError("An error occurred while trying to read the file '%s': %s" % (path, to_native(e)), orig_exc=e) + raise AnsibleParserError("Could not read the role '%s' (at %s)" % (role_name, path), orig_exc=e) + + return data + + def _load_metadata(self, role_name, role_path, collection): + """Load the roles metadata from the source file. + + :param str role_name: The name of the role for which we want the argspec data. + :param str role_path: Path to the role/collection root. + :param str collection: collection name or None in case of stand alone roles + + :returns: A dict of all role meta data, except ``argument_specs`` or an empty dict + """ + + data = self._load_role_data(role_path, self.ROLE_METADATA_FILES, role_name, collection) + del data['argument_specs'] + + return data + + def _load_argspec(self, role_name, role_path, collection): + """Load the role argument spec data from the source file. + + :param str role_name: The name of the role for which we want the argspec data. + :param str role_path: Path to the role/collection root. + :param str collection: collection name or None in case of stand alone roles + + We support two files containing the role arg spec data: either meta/main.yml + or meta/argument_spec.yml. The argument_spec.yml file will take precedence + over the meta/main.yml file, if it exists. Data is NOT combined between the + two files. + + :returns: A dict of all data underneath the ``argument_specs`` top-level YAML + key in the argspec data file. Empty dict is returned if there is no data. + """ + + try: + data = self._load_role_data(role_path, self.ROLE_ARGSPEC_FILES, role_name, collection) + data = data.get('argument_specs', {}) + + except Exception as e: + # we keep error info, but let caller deal with it + data = {'error': 'Failed to process role (%s): %s' % (role_name, to_native(e)), 'exception': e} + return data def _find_all_normal_roles(self, role_paths, name_filters=None): """Find all non-collection roles that have an argument spec file. @@ -147,10 +203,13 @@ class RoleMixin(object): full_path = os.path.join(role_path, 'meta', specfile) if os.path.exists(full_path): if name_filters is None or entry in name_filters: + # select first-found role if entry not in found_names: - found.add((entry, role_path)) - found_names.add(entry) - # select first-found + found_names.add(entry) + # None here stands for 'colleciton', which stand alone roles dont have + # makes downstream code simpler by having same structure as collection roles + found.add((entry, None, role_path)) + # only read first existing spec break return found @@ -163,8 +222,8 @@ class RoleMixin(object): might be fully qualified with the collection name (e.g., community.general.roleA) or not (e.g., roleA). - :param collection_filter: A string containing the FQCN of a collection which will be - used to limit results. This filter will take precedence over the name_filters. + :param collection_filter: A list of strings containing the FQCN of a collection which will + be used to limit results. This filter will take precedence over the name_filters. :returns: A set of tuples consisting of: role name, collection name, collection path """ @@ -196,7 +255,7 @@ class RoleMixin(object): break return found - def _build_summary(self, role, collection, argspec): + def _build_summary(self, role, collection, meta, argspec): """Build a summary dict for a role. Returns a simplified role arg spec containing only the role entry points and their @@ -204,17 +263,24 @@ class RoleMixin(object): :param role: The simple role name. :param collection: The collection containing the role (None or empty string if N/A). + :param meta: dictionary with galaxy information (None or empty string if N/A). :param argspec: The complete role argspec data dict. :returns: A tuple with the FQCN role name and a summary dict. """ + + if meta and meta.get('galaxy_info'): + summary = meta['galaxy_info'] + else: + summary = {'description': 'UNDOCUMENTED'} + summary['entry_points'] = {} + if collection: fqcn = '.'.join([collection, role]) + summary['collection'] = collection else: fqcn = role - summary = {} - summary['collection'] = collection - summary['entry_points'] = {} + for ep in argspec.keys(): entry_spec = argspec[ep] or {} summary['entry_points'][ep] = entry_spec.get('short_description', '') @@ -228,15 +294,18 @@ class RoleMixin(object): doc = {} doc['path'] = path doc['collection'] = collection - doc['entry_points'] = {} - for ep in argspec.keys(): - if entry_point is None or ep == entry_point: - entry_spec = argspec[ep] or {} - doc['entry_points'][ep] = entry_spec + if 'error' in argspec: + doc.update(argspec) + else: + doc['entry_points'] = {} + for ep in argspec.keys(): + if entry_point is None or ep == entry_point: + entry_spec = argspec[ep] or {} + doc['entry_points'][ep] = entry_spec - # If we didn't add any entry points (b/c of filtering), ignore this entry. - if len(doc['entry_points'].keys()) == 0: - doc = None + # If we didn't add any entry points (b/c of filtering), ignore this entry. + if len(doc['entry_points'].keys()) == 0: + doc = None return (fqcn, doc) @@ -275,34 +344,29 @@ class RoleMixin(object): if not collection_filter: roles = self._find_all_normal_roles(roles_path) else: - roles = [] + roles = set() collroles = self._find_all_collection_roles(collection_filter=collection_filter) result = {} - for role, role_path in roles: - try: - argspec = self._load_argspec(role, role_path=role_path) - fqcn, summary = self._build_summary(role, '', argspec) - result[fqcn] = summary - except Exception as e: - if fail_on_errors: - raise - result[role] = { - 'error': 'Error while loading role argument spec: %s' % to_native(e), - } + for role, collection, role_path in (roles | collroles): - for role, collection, collection_path in collroles: try: - argspec = self._load_argspec(role, collection_path=collection_path) - fqcn, summary = self._build_summary(role, collection, argspec) - result[fqcn] = summary + meta = self._load_metadata(role, role_path, collection) except Exception as e: + display.vvv('No metadata for role (%s) due to: %s' % (role, to_native(e)), True) + meta = {} + + argspec = self._load_argspec(role, role_path, collection) + if 'error' in argspec: if fail_on_errors: - raise - result['%s.%s' % (collection, role)] = { - 'error': 'Error while loading role argument spec: %s' % to_native(e), - } + raise argspec['exception'] + else: + display.warning('Skipping role (%s) due to: %s' % (role, argspec['error']), True) + continue + + fqcn, summary = self._build_summary(role, collection, meta, argspec) + result[fqcn] = summary return result @@ -321,41 +385,64 @@ class RoleMixin(object): result = {} - for role, role_path in roles: - try: - argspec = self._load_argspec(role, role_path=role_path) - fqcn, doc = self._build_doc(role, role_path, '', argspec, entry_point) - if doc: - result[fqcn] = doc - except Exception as e: # pylint:disable=broad-except - result[role] = { - 'error': 'Error while processing role: %s' % to_native(e), - } - - for role, collection, collection_path in collroles: - try: - argspec = self._load_argspec(role, collection_path=collection_path) - fqcn, doc = self._build_doc(role, collection_path, collection, argspec, entry_point) - if doc: - result[fqcn] = doc - except Exception as e: # pylint:disable=broad-except - result['%s.%s' % (collection, role)] = { - 'error': 'Error while processing role: %s' % to_native(e), - } + for role, collection, role_path in (roles | collroles): + argspec = self._load_argspec(role, role_path, collection) + if 'error' in argspec: + if fail_on_errors: + raise argspec['exception'] + else: + display.warning('Skipping role (%s) due to: %s' % (role, argspec['error']), True) + continue + fqcn, doc = self._build_doc(role, role_path, collection, argspec, entry_point) + if doc: + result[fqcn] = doc return result +def _doclink(url): + # assume that if it is relative, it is for docsite, ignore rest + if not url.startswith(("http", "..")): + url = get_versioned_doclink(url) + return url + + +def _format(string, *args): + + """ add ascii formatting or delimiters """ + + for style in args: + + if style not in ref_style and style.upper() not in STYLE and style not in C.COLOR_CODES: + raise KeyError("Invalid format value supplied: %s" % style) + + if C.ANSIBLE_NOCOLOR: + # ignore most styles, but some already had 'identifier strings' + if style in NOCOLOR: + string = NOCOLOR[style] % string + elif style in C.COLOR_CODES: + string = stringc(string, style) + elif style in ref_style: + # assumes refs are also always colors + string = stringc(string, ref_style[style]) + else: + # start specific style and 'end' with normal + string = '%s%s%s' % (STYLE[style.upper()], string, STYLE['NORMAL']) + + return string + + class DocCLI(CLI, RoleMixin): - ''' displays information on modules installed in Ansible libraries. + """ displays information on modules installed in Ansible libraries. It displays a terse listing of plugins and their short descriptions, provides a printout of their DOCUMENTATION strings, - and it can create a short "snippet" which can be pasted into a playbook. ''' + and it can create a short "snippet" which can be pasted into a playbook. """ name = 'ansible-doc' # default ignore list for detailed views - IGNORE = ('module', 'docuri', 'version_added', 'version_added_collection', 'short_description', 'now_date', 'plainexamples', 'returndocs', 'collection') + IGNORE = ('module', 'docuri', 'version_added', 'version_added_collection', 'short_description', + 'now_date', 'plainexamples', 'returndocs', 'collection', 'plugin_name') # Warning: If you add more elements here, you also need to add it to the docsite build (in the # ansible-community/antsibull repo) @@ -424,23 +511,20 @@ class DocCLI(CLI, RoleMixin): return f"`{text}' (of {plugin})" return f"`{text}'" - @classmethod - def find_plugins(cls, path, internal, plugin_type, coll_filter=None): - display.deprecated("find_plugins method as it is incomplete/incorrect. use ansible.plugins.list functions instead.", version='2.17') - return list_plugins(plugin_type, coll_filter, [path]).keys() - @classmethod def tty_ify(cls, text): # general formatting - t = cls._ITALIC.sub(r"`\1'", text) # I(word) => `word' - t = cls._BOLD.sub(r"*\1*", t) # B(word) => *word* - t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] + t = cls._ITALIC.sub(_format(r"\1", 'UNDERLINE'), text) # no ascii code for this + t = cls._BOLD.sub(_format(r"\1", 'BOLD'), t) + t = cls._MODULE.sub(_format(r"\1", 'MODULE'), t) # M(word) => [word] t = cls._URL.sub(r"\1", t) # U(word) => word t = cls._LINK.sub(r"\1 <\2>", t) # L(word, url) => word - t = cls._PLUGIN.sub("[" + r"\1" + "]", t) # P(word#type) => [word] - t = cls._REF.sub(r"\1", t) # R(word, sphinx-ref) => word - t = cls._CONST.sub(r"`\1'", t) # C(word) => `word' + + t = cls._PLUGIN.sub(_format("[" + r"\1" + "]", 'PLUGIN'), t) # P(word#type) => [word] + + t = cls._REF.sub(_format(r"\1", 'REF'), t) # R(word, sphinx-ref) => word + t = cls._CONST.sub(_format(r"`\1'", 'CONSTANT'), t) t = cls._SEM_OPTION_NAME.sub(cls._tty_ify_sem_complex, t) # O(expr) t = cls._SEM_OPTION_VALUE.sub(cls._tty_ify_sem_simle, t) # V(expr) t = cls._SEM_ENV_VARIABLE.sub(cls._tty_ify_sem_simle, t) # E(expr) @@ -449,10 +533,16 @@ class DocCLI(CLI, RoleMixin): # remove rst t = cls._RST_SEEALSO.sub(r"See also:", t) # seealso to See also: - t = cls._RST_NOTE.sub(r"Note:", t) # .. note:: to note: + t = cls._RST_NOTE.sub(_format(r"Note:", 'bold'), t) # .. note:: to note: t = cls._RST_ROLES.sub(r"`", t) # remove :ref: and other tags, keep tilde to match ending one t = cls._RST_DIRECTIVES.sub(r"", t) # remove .. stuff:: in general + # handle docsite refs + # U(word) => word + t = re.sub(cls._URL, lambda m: _format(r"%s" % _doclink(m.group(1)), 'LINK'), t) + # L(word, url) => word + t = re.sub(cls._LINK, lambda m: r"%s <%s>" % (m.group(1), _format(_doclink(m.group(2)), 'LINK')), t) + return t def init_parser(self): @@ -485,8 +575,9 @@ class DocCLI(CLI, RoleMixin): action=opt_help.PrependListAction, help='The path to the directory containing your roles.') - # modifiers + # exclusive modifiers exclusive = self.parser.add_mutually_exclusive_group() + # TODO: warn if not used with -t roles exclusive.add_argument("-e", "--entry-point", dest="entry_point", help="Select the entry point for role(s).") @@ -503,6 +594,7 @@ class DocCLI(CLI, RoleMixin): exclusive.add_argument("--metadata-dump", action="store_true", default=False, dest='dump', help='**For internal use only** Dump json metadata for all entries, ignores other options.') + # generic again self.parser.add_argument("--no-fail-on-errors", action="store_true", default=False, dest='no_fail_on_errors', help='**For internal use only** Only used for --metadata-dump. ' 'Do not fail on errors. Report the error message in the JSON instead.') @@ -567,7 +659,7 @@ class DocCLI(CLI, RoleMixin): Output is: fqcn role name, entry point, short description """ roles = list(list_json.keys()) - entry_point_names = set() + entry_point_names = set() # to find max len for role in roles: for entry_point in list_json[role]['entry_points'].keys(): entry_point_names.add(entry_point) @@ -575,8 +667,6 @@ class DocCLI(CLI, RoleMixin): max_role_len = 0 max_ep_len = 0 - if roles: - max_role_len = max(len(x) for x in roles) if entry_point_names: max_ep_len = max(len(x) for x in entry_point_names) @@ -584,12 +674,15 @@ class DocCLI(CLI, RoleMixin): text = [] for role in sorted(roles): - for entry_point, desc in list_json[role]['entry_points'].items(): - if len(desc) > linelimit: - desc = desc[:linelimit] + '...' - text.append("%-*s %-*s %s" % (max_role_len, role, - max_ep_len, entry_point, - desc)) + if list_json[role]['entry_points']: + text.append('%s:' % role) + text.append(' specs:') + for entry_point, desc in list_json[role]['entry_points'].items(): + if len(desc) > linelimit: + desc = desc[:linelimit] + '...' + text.append(" %-*s: %s" % (max_ep_len, entry_point, desc)) + else: + text.append('%s' % role) # display results DocCLI.pager("\n".join(text)) @@ -598,7 +691,14 @@ class DocCLI(CLI, RoleMixin): roles = list(role_json.keys()) text = [] for role in roles: - text += self.get_role_man_text(role, role_json[role]) + try: + if 'error' in role_json[role]: + display.warning("Skipping role '%s' due to: %s" % (role, role_json[role]['error']), True) + continue + text += self.get_role_man_text(role, role_json[role]) + except AnsibleParserError as e: + # TODO: warn and skip role? + raise AnsibleParserError("Role '%s" % (role), orig_exc=e) # display results DocCLI.pager("\n".join(text)) @@ -678,12 +778,11 @@ class DocCLI(CLI, RoleMixin): def _get_collection_filter(self): coll_filter = None - if len(context.CLIARGS['args']) == 1: - coll_filter = context.CLIARGS['args'][0] - if not AnsibleCollectionRef.is_valid_collection_name(coll_filter): - raise AnsibleError('Invalid collection name (must be of the form namespace.collection): {0}'.format(coll_filter)) - elif len(context.CLIARGS['args']) > 1: - raise AnsibleOptionsError("Only a single collection filter is supported.") + if len(context.CLIARGS['args']) >= 1: + coll_filter = context.CLIARGS['args'] + for coll_name in coll_filter: + if not AnsibleCollectionRef.is_valid_collection_name(coll_name): + raise AnsibleError('Invalid collection name (must be of the form namespace.collection): {0}'.format(coll_name)) return coll_filter @@ -751,14 +850,14 @@ class DocCLI(CLI, RoleMixin): return plugin_docs def _get_roles_path(self): - ''' + """ Add any 'roles' subdir in playbook dir to the roles search path. And as a last resort, add the playbook dir itself. Order being: - 'roles' subdir of playbook dir - DEFAULT_ROLES_PATH (default in cliargs) - playbook dir (basedir) NOTE: This matches logic in RoleDefinition._load_role_path() method. - ''' + """ roles_path = context.CLIARGS['roles_path'] if context.CLIARGS['basedir'] is not None: subdir = os.path.join(context.CLIARGS['basedir'], "roles") @@ -769,7 +868,7 @@ class DocCLI(CLI, RoleMixin): @staticmethod def _prep_loader(plugin_type): - ''' return a plugint type specific loader ''' + """ return a plugint type specific loader """ loader = getattr(plugin_loader, '%s_loader' % plugin_type) # add to plugin paths from command line @@ -794,6 +893,7 @@ class DocCLI(CLI, RoleMixin): plugin_type = context.CLIARGS['type'].lower() do_json = context.CLIARGS['json_format'] or context.CLIARGS['dump'] listing = context.CLIARGS['list_files'] or context.CLIARGS['list_dir'] + no_fail = bool(not context.CLIARGS['no_fail_on_errors']) if context.CLIARGS['list_files']: content = 'files' @@ -816,7 +916,6 @@ class DocCLI(CLI, RoleMixin): docs['all'] = {} for ptype in ptypes: - no_fail = bool(not context.CLIARGS['no_fail_on_errors']) if ptype == 'role': roles = self._create_role_list(fail_on_errors=no_fail) docs['all'][ptype] = self._create_role_doc(roles.keys(), context.CLIARGS['entry_point'], fail_on_errors=no_fail) @@ -826,12 +925,12 @@ class DocCLI(CLI, RoleMixin): else: plugin_names = self._list_plugins(ptype, None) docs['all'][ptype] = self._get_plugins_docs(ptype, plugin_names, fail_ok=(ptype in ('test', 'filter')), fail_on_errors=no_fail) - # reset list after each type to avoid polution + # reset list after each type to avoid pollution elif listing: if plugin_type == 'keyword': docs = DocCLI._list_keywords() elif plugin_type == 'role': - docs = self._create_role_list() + docs = self._create_role_list(fail_on_errors=False) else: docs = self._list_plugins(plugin_type, content) else: @@ -842,7 +941,7 @@ class DocCLI(CLI, RoleMixin): if plugin_type == 'keyword': docs = DocCLI._get_keywords_docs(context.CLIARGS['args']) elif plugin_type == 'role': - docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point']) + docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'], fail_on_errors=no_fail) else: # display specific plugin docs docs = self._get_plugins_docs(plugin_type, context.CLIARGS['args']) @@ -959,7 +1058,7 @@ class DocCLI(CLI, RoleMixin): @staticmethod def format_snippet(plugin, plugin_type, doc): - ''' return heavily commented plugin use to insert into play ''' + """ return heavily commented plugin use to insert into play """ if plugin_type == 'inventory' and doc.get('options', {}).get('plugin'): # these do not take a yaml config that we can write a snippet for raise ValueError('The {0} inventory plugin does not take YAML type config source' @@ -996,7 +1095,7 @@ class DocCLI(CLI, RoleMixin): text = DocCLI.get_man_text(doc, collection_name, plugin_type) except Exception as e: display.vvv(traceback.format_exc()) - raise AnsibleError("Unable to retrieve documentation from '%s' due to: %s" % (plugin, to_native(e)), orig_exc=e) + raise AnsibleError("Unable to retrieve documentation from '%s'" % (plugin), orig_exc=e) return text @@ -1041,7 +1140,7 @@ class DocCLI(CLI, RoleMixin): @staticmethod def print_paths(finder): - ''' Returns a string suitable for printing of the search path ''' + """ Returns a string suitable for printing of the search path """ # Uses a list to get the order right ret = [] @@ -1071,7 +1170,16 @@ class DocCLI(CLI, RoleMixin): return 'version %s' % (version_added, ) @staticmethod - def add_fields(text, fields, limit, opt_indent, return_values=False, base_indent=''): + def warp_fill(text, limit, initial_indent='', subsequent_indent='', **kwargs): + result = [] + for paragraph in text.split('\n\n'): + result.append(textwrap.fill(paragraph, limit, initial_indent=initial_indent, subsequent_indent=subsequent_indent, + break_on_hyphens=False, break_long_words=False, drop_whitespace=True, **kwargs)) + initial_indent = subsequent_indent + return '\n'.join(result) + + @staticmethod + def add_fields(text, fields, limit, opt_indent, return_values=False, base_indent='', man=False): for o in sorted(fields): # Create a copy so we don't modify the original (in case YAML anchors have been used) @@ -1081,25 +1189,38 @@ class DocCLI(CLI, RoleMixin): required = opt.pop('required', False) if not isinstance(required, bool): raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required) + + opt_leadin = ' ' + key = '' if required: - opt_leadin = "=" + if C.ANSIBLE_NOCOLOR: + opt_leadin = "=" + key = "%s%s %s" % (base_indent, opt_leadin, _format(o, 'bold', 'red')) else: - opt_leadin = "-" + if C.ANSIBLE_NOCOLOR: + opt_leadin = "-" + key = "%s%s %s" % (base_indent, opt_leadin, _format(o, 'yellow')) - text.append("%s%s %s" % (base_indent, opt_leadin, o)) - - # description is specifically formated and can either be string or list of strings + # description is specifically formatted and can either be string or list of strings if 'description' not in opt: raise AnsibleError("All (sub-)options and return values must have a 'description' field") + text.append('') + + # TODO: push this to top of for and sort by size, create indent on largest key? + inline_indent = base_indent + ' ' * max((len(opt_indent) - len(o)) - len(base_indent), 2) + sub_indent = inline_indent + ' ' * (len(o) + 3) if is_sequence(opt['description']): for entry_idx, entry in enumerate(opt['description'], 1): if not isinstance(entry, string_types): raise AnsibleError("Expected string in description of %s at index %s, got %s" % (o, entry_idx, type(entry))) - text.append(textwrap.fill(DocCLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) + if entry_idx == 1: + text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(entry), limit, initial_indent=inline_indent, subsequent_indent=sub_indent)) + else: + text.append(DocCLI.warp_fill(DocCLI.tty_ify(entry), limit, initial_indent=sub_indent, subsequent_indent=sub_indent)) else: if not isinstance(opt['description'], string_types): raise AnsibleError("Expected string in description of %s, got %s" % (o, type(opt['description']))) - text.append(textwrap.fill(DocCLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) + text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(opt['description']), limit, initial_indent=inline_indent, subsequent_indent=sub_indent)) del opt['description'] suboptions = [] @@ -1118,6 +1239,8 @@ class DocCLI(CLI, RoleMixin): conf[config] = [dict(item) for item in opt.pop(config)] for ignore in DocCLI.IGNORE: for item in conf[config]: + if display.verbosity > 0 and 'version_added' in item: + item['added_in'] = DocCLI._format_version_added(item['version_added'], item.get('version_added_colleciton', 'ansible-core')) if ignore in item: del item[ignore] @@ -1149,18 +1272,15 @@ class DocCLI(CLI, RoleMixin): else: text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k: opt[k]}), opt_indent)) - if version_added: - text.append("%sadded in: %s\n" % (opt_indent, DocCLI._format_version_added(version_added, version_added_collection))) + if version_added and not man: + text.append("%sadded in: %s" % (opt_indent, DocCLI._format_version_added(version_added, version_added_collection))) for subkey, subdata in suboptions: - text.append('') - text.append("%s%s:\n" % (opt_indent, subkey.upper())) - DocCLI.add_fields(text, subdata, limit, opt_indent + ' ', return_values, opt_indent) - if not suboptions: - text.append('') + text.append("%s%s:" % (opt_indent, subkey)) + DocCLI.add_fields(text, subdata, limit, opt_indent + ' ', return_values, opt_indent) def get_role_man_text(self, role, role_json): - '''Generate text for the supplied role suitable for display. + """Generate text for the supplied role suitable for display. This is similar to get_man_text(), but roles are different enough that we have a separate method for formatting their display. @@ -1169,54 +1289,78 @@ class DocCLI(CLI, RoleMixin): :param role_json: The JSON for the given role as returned from _create_role_doc(). :returns: A array of text suitable for displaying to screen. - ''' + """ text = [] - opt_indent = " " + opt_indent = " " pad = display.columns * 0.20 limit = max(display.columns - int(pad), 70) - text.append("> %s (%s)\n" % (role.upper(), role_json.get('path'))) + text.append("> ROLE: %s (%s)" % (_format(role, 'BOLD'), role_json.get('path'))) for entry_point in role_json['entry_points']: doc = role_json['entry_points'][entry_point] - + desc = '' if doc.get('short_description'): - text.append("ENTRY POINT: %s - %s\n" % (entry_point, doc.get('short_description'))) - else: - text.append("ENTRY POINT: %s\n" % entry_point) + desc = "- %s" % (doc.get('short_description')) + text.append('') + text.append("ENTRY POINT: %s %s" % (_format(entry_point, "BOLD"), desc)) + text.append('') if doc.get('description'): if isinstance(doc['description'], list): - desc = " ".join(doc['description']) + descs = doc['description'] else: - desc = doc['description'] + descs = [doc['description']] + for desc in descs: + text.append("%s" % DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) + text.append('') - text.append("%s\n" % textwrap.fill(DocCLI.tty_ify(desc), - limit, initial_indent=opt_indent, - subsequent_indent=opt_indent)) if doc.get('options'): - text.append("OPTIONS (= is mandatory):\n") + text.append(_format("Options", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red')) DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent) - text.append('') - if doc.get('attributes'): - text.append("ATTRIBUTES:\n") - text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc.pop('attributes')), opt_indent)) - text.append('') + if doc.get('attributes', False): + display.deprecated( + f'The role {role}\'s argument spec {entry_point} contains the key "attributes", ' + 'which will not be displayed by ansible-doc in the future. ' + 'This was unintentionally allowed when plugin attributes were added, ' + 'but the feature does not map well to role argument specs.', + version='2.20', + collection_name='ansible.builtin', + ) + text.append("") + text.append(_format("ATTRIBUTES:", 'bold')) + for k in doc['attributes'].keys(): + text.append('') + text.append(DocCLI.warp_fill(DocCLI.tty_ify(_format('%s:' % k, 'UNDERLINE')), limit - 6, initial_indent=opt_indent, + subsequent_indent=opt_indent)) + text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc['attributes'][k]), opt_indent)) + del doc['attributes'] # generic elements we will handle identically for k in ('author',): if k not in doc: continue + text.append('') if isinstance(doc[k], string_types): - text.append('%s: %s' % (k.upper(), textwrap.fill(DocCLI.tty_ify(doc[k]), + text.append('%s: %s' % (k.upper(), DocCLI.warp_fill(DocCLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent))) elif isinstance(doc[k], (list, tuple)): text.append('%s: %s' % (k.upper(), ', '.join(doc[k]))) else: # use empty indent since this affects the start of the yaml doc, not it's keys text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), '')) + + if doc.get('examples', False): text.append('') + text.append(_format("EXAMPLES:", 'bold')) + if isinstance(doc['examples'], string_types): + text.append(doc.pop('examples').strip()) + else: + try: + text.append(yaml_dump(doc.pop('examples'), indent=2, default_flow_style=False)) + except Exception as e: + raise AnsibleParserError("Unable to parse examples section", orig_exc=e) return text @@ -1227,136 +1371,137 @@ class DocCLI(CLI, RoleMixin): DocCLI.IGNORE = DocCLI.IGNORE + (context.CLIARGS['type'],) opt_indent = " " + base_indent = " " text = [] pad = display.columns * 0.20 limit = max(display.columns - int(pad), 70) - plugin_name = doc.get(context.CLIARGS['type'], doc.get('name')) or doc.get('plugin_type') or plugin_type - if collection_name: - plugin_name = '%s.%s' % (collection_name, plugin_name) - - text.append("> %s (%s)\n" % (plugin_name.upper(), doc.pop('filename'))) + text.append("> %s %s (%s)" % (plugin_type.upper(), _format(doc.pop('plugin_name'), 'bold'), doc.pop('filename'))) if isinstance(doc['description'], list): - desc = " ".join(doc.pop('description')) + descs = doc.pop('description') else: - desc = doc.pop('description') + descs = [doc.pop('description')] - text.append("%s\n" % textwrap.fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent, - subsequent_indent=opt_indent)) + text.append('') + for desc in descs: + text.append(DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=base_indent, subsequent_indent=base_indent)) - if 'version_added' in doc: - version_added = doc.pop('version_added') - version_added_collection = doc.pop('version_added_collection', None) - text.append("ADDED IN: %s\n" % DocCLI._format_version_added(version_added, version_added_collection)) + if display.verbosity > 0: + doc['added_in'] = DocCLI._format_version_added(doc.pop('version_added', 'historical'), doc.pop('version_added_collection', 'ansible-core')) if doc.get('deprecated', False): - text.append("DEPRECATED: \n") + text.append(_format("DEPRECATED: ", 'bold', 'DEP')) if isinstance(doc['deprecated'], dict): - if 'removed_at_date' in doc['deprecated']: - text.append( - "\tReason: %(why)s\n\tWill be removed in a release after %(removed_at_date)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated') - ) - else: - if 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']: - doc['deprecated']['removed_in'] = doc['deprecated']['version'] - text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated')) + if 'removed_at_date' not in doc['deprecated'] and 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']: + doc['deprecated']['removed_in'] = doc['deprecated']['version'] + try: + text.append('\t' + C.config.get_deprecated_msg_from_config(doc['deprecated'], True, collection_name=collection_name)) + except KeyError as e: + raise AnsibleError("Invalid deprecation documentation structure", orig_exc=e) else: - text.append("%s" % doc.pop('deprecated')) - text.append("\n") + text.append("%s" % doc['deprecated']) + del doc['deprecated'] if doc.pop('has_action', False): - text.append(" * note: %s\n" % "This module has a corresponding action plugin.") + text.append("") + text.append(_format(" * note:", 'bold') + " This module has a corresponding action plugin.") if doc.get('options', False): - text.append("OPTIONS (= is mandatory):\n") - DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent) - text.append('') + text.append("") + text.append(_format("OPTIONS", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red')) + DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent, man=(display.verbosity == 0)) if doc.get('attributes', False): - text.append("ATTRIBUTES:\n") - text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc.pop('attributes')), opt_indent)) - text.append('') + text.append("") + text.append(_format("ATTRIBUTES:", 'bold')) + for k in doc['attributes'].keys(): + text.append('') + text.append(DocCLI.warp_fill(DocCLI.tty_ify(_format('%s:' % k, 'UNDERLINE')), limit - 6, initial_indent=opt_indent, + subsequent_indent=opt_indent)) + text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc['attributes'][k]), opt_indent)) + del doc['attributes'] if doc.get('notes', False): - text.append("NOTES:") + text.append("") + text.append(_format("NOTES:", 'bold')) for note in doc['notes']: - text.append(textwrap.fill(DocCLI.tty_ify(note), limit - 6, - initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent)) - text.append('') - text.append('') + text.append(DocCLI.warp_fill(DocCLI.tty_ify(note), limit - 6, + initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent)) del doc['notes'] if doc.get('seealso', False): - text.append("SEE ALSO:") + text.append("") + text.append(_format("SEE ALSO:", 'bold')) for item in doc['seealso']: if 'module' in item: - text.append(textwrap.fill(DocCLI.tty_ify('Module %s' % item['module']), + text.append(DocCLI.warp_fill(DocCLI.tty_ify('Module %s' % item['module']), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent)) description = item.get('description') if description is None and item['module'].startswith('ansible.builtin.'): description = 'The official documentation on the %s module.' % item['module'] if description is not None: - text.append(textwrap.fill(DocCLI.tty_ify(description), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(description), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' ')) if item['module'].startswith('ansible.builtin.'): relative_url = 'collections/%s_module.html' % item['module'].replace('.', '/', 2) - text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink(relative_url)), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink(relative_url)), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent)) elif 'plugin' in item and 'plugin_type' in item: plugin_suffix = ' plugin' if item['plugin_type'] not in ('module', 'role') else '' - text.append(textwrap.fill(DocCLI.tty_ify('%s%s %s' % (item['plugin_type'].title(), plugin_suffix, item['plugin'])), + text.append(DocCLI.warp_fill(DocCLI.tty_ify('%s%s %s' % (item['plugin_type'].title(), plugin_suffix, item['plugin'])), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent)) description = item.get('description') if description is None and item['plugin'].startswith('ansible.builtin.'): description = 'The official documentation on the %s %s%s.' % (item['plugin'], item['plugin_type'], plugin_suffix) if description is not None: - text.append(textwrap.fill(DocCLI.tty_ify(description), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(description), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' ')) if item['plugin'].startswith('ansible.builtin.'): relative_url = 'collections/%s_%s.html' % (item['plugin'].replace('.', '/', 2), item['plugin_type']) - text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink(relative_url)), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink(relative_url)), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent)) elif 'name' in item and 'link' in item and 'description' in item: - text.append(textwrap.fill(DocCLI.tty_ify(item['name']), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['name']), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent)) - text.append(textwrap.fill(DocCLI.tty_ify(item['description']), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['description']), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' ')) - text.append(textwrap.fill(DocCLI.tty_ify(item['link']), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['link']), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' ')) elif 'ref' in item and 'description' in item: - text.append(textwrap.fill(DocCLI.tty_ify('Ansible documentation [%s]' % item['ref']), + text.append(DocCLI.warp_fill(DocCLI.tty_ify('Ansible documentation [%s]' % item['ref']), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent)) - text.append(textwrap.fill(DocCLI.tty_ify(item['description']), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['description']), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' ')) - text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink('/#stq=%s&stp=1' % item['ref'])), + text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink('/#stq=%s&stp=1' % item['ref'])), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' ')) - text.append('') - text.append('') del doc['seealso'] if doc.get('requirements', False): + text.append('') req = ", ".join(doc.pop('requirements')) - text.append("REQUIREMENTS:%s\n" % textwrap.fill(DocCLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent)) + text.append(_format("REQUIREMENTS:", 'bold') + "%s\n" % DocCLI.warp_fill(DocCLI.tty_ify(req), limit - 16, initial_indent=" ", + subsequent_indent=opt_indent)) # Generic handler for k in sorted(doc): - if k in DocCLI.IGNORE or not doc[k]: + if not doc[k] or k in DocCLI.IGNORE: continue + text.append('') + header = _format(k.upper(), 'bold') if isinstance(doc[k], string_types): - text.append('%s: %s' % (k.upper(), textwrap.fill(DocCLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent))) + text.append('%s: %s' % (header, DocCLI.warp_fill(DocCLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent))) elif isinstance(doc[k], (list, tuple)): - text.append('%s: %s' % (k.upper(), ', '.join(doc[k]))) + text.append('%s: %s' % (header, ', '.join(doc[k]))) else: # use empty indent since this affects the start of the yaml doc, not it's keys - text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), '')) + text.append('%s: ' % header + DocCLI._indent_lines(DocCLI._dump_yaml(doc[k]), ' ' * (len(k) + 2))) del doc[k] - text.append('') if doc.get('plainexamples', False): - text.append("EXAMPLES:") text.append('') + text.append(_format("EXAMPLES:", 'bold')) if isinstance(doc['plainexamples'], string_types): text.append(doc.pop('plainexamples').strip()) else: @@ -1364,13 +1509,13 @@ class DocCLI(CLI, RoleMixin): text.append(yaml_dump(doc.pop('plainexamples'), indent=2, default_flow_style=False)) except Exception as e: raise AnsibleParserError("Unable to parse examples section", orig_exc=e) - text.append('') - text.append('') if doc.get('returndocs', False): - text.append("RETURN VALUES:") - DocCLI.add_fields(text, doc.pop('returndocs'), limit, opt_indent, return_values=True) + text.append('') + text.append(_format("RETURN VALUES:", 'bold')) + DocCLI.add_fields(text, doc.pop('returndocs'), limit, opt_indent, return_values=True, man=(display.verbosity == 0)) + text.append('\n') return "\n".join(text) @@ -1407,14 +1552,14 @@ def _do_yaml_snippet(doc): if module: if required: desc = "(required) %s" % desc - text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent))) + text.append(" %-20s # %s" % (o, DocCLI.warp_fill(desc, limit, subsequent_indent=subdent))) else: if required: default = '(required)' else: default = opt.get('default', 'None') - text.append("%s %-9s # %s" % (o, default, textwrap.fill(desc, limit, subsequent_indent=subdent, max_lines=3))) + text.append("%s %-9s # %s" % (o, default, DocCLI.warp_fill(desc, limit, subsequent_indent=subdent, max_lines=3))) return text diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 81f6df22e7b..5e2bef6f151 100755 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -4,13 +4,13 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI import argparse +import functools import json import os.path import pathlib @@ -55,37 +55,16 @@ from ansible.module_utils.common.yaml import yaml_dump, yaml_load from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils import six from ansible.parsing.dataloader import DataLoader -from ansible.parsing.yaml.loader import AnsibleLoader from ansible.playbook.role.requirement import RoleRequirement from ansible.template import Templar from ansible.utils.collection_loader import AnsibleCollectionConfig from ansible.utils.display import Display from ansible.utils.plugin_docs import get_versioned_doclink +from ansible.utils.vars import load_extra_vars display = Display() urlparse = six.moves.urllib.parse.urlparse -# config definition by position: name, required, type -SERVER_DEF = [ - ('url', True, 'str'), - ('username', False, 'str'), - ('password', False, 'str'), - ('token', False, 'str'), - ('auth_url', False, 'str'), - ('api_version', False, 'int'), - ('validate_certs', False, 'bool'), - ('client_id', False, 'str'), - ('timeout', False, 'int'), -] - -# config definition fields -SERVER_ADDITIONAL = { - 'api_version': {'default': None, 'choices': [2, 3]}, - 'validate_certs': {'cli': [{'name': 'validate_certs'}]}, - 'timeout': {'default': C.GALAXY_SERVER_TIMEOUT, 'cli': [{'name': 'timeout'}]}, - 'token': {'default': None}, -} - def with_collection_artifacts_manager(wrapped_method): """Inject an artifacts manager if not passed explicitly. @@ -94,6 +73,7 @@ def with_collection_artifacts_manager(wrapped_method): the related temporary directory auto-cleanup around the target method invocation. """ + @functools.wraps(wrapped_method) def method_wrapper(*args, **kwargs): if 'artifacts_manager' in kwargs: return wrapped_method(*args, **kwargs) @@ -197,11 +177,11 @@ class RoleDistributionServer: class GalaxyCLI(CLI): - '''Command to manage Ansible roles and collections. + """Command to manage Ansible roles and collections. None of the CLI tools are designed to run concurrently with themselves. Use an external scheduler and/or locking to ensure there are no clashing operations. - ''' + """ name = 'ansible-galaxy' @@ -232,7 +212,7 @@ class GalaxyCLI(CLI): super(GalaxyCLI, self).__init__(args) def init_parser(self): - ''' create an options parser for bin/ansible ''' + """ create an options parser for bin/ansible """ super(GalaxyCLI, self).init_parser( desc="Perform various Role and Collection related operations.", @@ -293,6 +273,7 @@ class GalaxyCLI(CLI): # Add sub parser for the Galaxy collection actions collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.') + collection.set_defaults(func=self.execute_collection) # to satisfy doc build collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action') collection_parser.required = True self.add_download_options(collection_parser, parents=[common, cache_options]) @@ -305,6 +286,7 @@ class GalaxyCLI(CLI): # Add sub parser for the Galaxy role actions role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.') + role.set_defaults(func=self.execute_role) # to satisfy doc build role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action') role_parser.required = True self.add_init_options(role_parser, parents=[common, force, offline]) @@ -363,6 +345,7 @@ class GalaxyCLI(CLI): init_parser.add_argument('--type', dest='role_type', action='store', default='default', help="Initialize using an alternate role type. Valid types include: 'container', " "'apb' and 'network'.") + opt_help.add_runtask_options(init_parser) def add_remove_options(self, parser, parents=None): remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.') @@ -463,12 +446,15 @@ class GalaxyCLI(CLI): valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \ 'or all to signify that all signatures must be used to verify the collection. ' \ 'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).' - ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \ - 'Provide this option multiple times to ignore a list of status codes. ' \ - 'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).' + ignore_gpg_status_help = 'A space separated list of status codes to ignore during signature verification (for example, NO_PUBKEY FAILURE). ' \ + 'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).' \ + 'Note: specify these after positional arguments or use -- to separate them.' verify_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count, help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT) verify_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append', + help=opt_help.argparse.SUPPRESS, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES, + choices=list(GPG_ERROR_MAP.keys())) + verify_parser.add_argument('--ignore-signature-status-codes', dest='ignore_gpg_errors', type=str, action='extend', nargs='+', help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES, choices=list(GPG_ERROR_MAP.keys())) @@ -482,12 +468,31 @@ class GalaxyCLI(CLI): ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \ 'collection. This will not ignore dependency conflict errors.' else: - args_kwargs['help'] = 'Role name, URL or tar file' + args_kwargs['help'] = 'Role name, URL or tar file. This is mutually exclusive with -r.' ignore_errors_help = 'Ignore errors and continue with the next specified role.' + if self._implicit_role: + # might install both roles and collections + description_text = ( + 'Install roles and collections from file(s), URL(s) or Ansible ' + 'Galaxy to the first entry in the config COLLECTIONS_PATH for collections ' + 'and first entry in the config ROLES_PATH for roles. ' + 'The first entry in the config ROLES_PATH can be overridden by --roles-path ' + 'or -p, but this will result in only roles being installed.' + ) + prog = 'ansible-galaxy install' + else: + prog = f"ansible-galaxy {galaxy_type} install" + description_text = ( + 'Install {0}(s) from file(s), URL(s) or Ansible ' + 'Galaxy to the first entry in the config {1}S_PATH ' + 'unless overridden by --{0}s-path.'.format(galaxy_type, galaxy_type.upper()) + ) install_parser = parser.add_parser('install', parents=parents, help='Install {0}(s) from file(s), URL(s) or Ansible ' - 'Galaxy'.format(galaxy_type)) + 'Galaxy'.format(galaxy_type), + description=description_text, + prog=prog,) install_parser.set_defaults(func=self.execute_install) install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs) @@ -504,9 +509,9 @@ class GalaxyCLI(CLI): valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \ 'or -1 to signify that all signatures must be used to verify the collection. ' \ 'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).' - ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \ - 'Provide this option multiple times to ignore a list of status codes. ' \ - 'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).' + ignore_gpg_status_help = 'A space separated list of status codes to ignore during signature verification (for example, NO_PUBKEY FAILURE). ' \ + 'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).' \ + 'Note: specify these after positional arguments or use -- to separate them.' if galaxy_type == 'collection': install_parser.add_argument('-p', '--collections-path', dest='collections_path', @@ -530,6 +535,9 @@ class GalaxyCLI(CLI): install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count, help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT) install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append', + help=opt_help.argparse.SUPPRESS, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES, + choices=list(GPG_ERROR_MAP.keys())) + install_parser.add_argument('--ignore-signature-status-codes', dest='ignore_gpg_errors', type=str, action='extend', nargs='+', help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES, choices=list(GPG_ERROR_MAP.keys())) install_parser.add_argument('--offline', dest='offline', action='store_true', default=False, @@ -537,8 +545,12 @@ class GalaxyCLI(CLI): 'This does not apply to collections in remote Git repositories or URLs to remote tarballs.' ) else: - install_parser.add_argument('-r', '--role-file', dest='requirements', - help='A file containing a list of roles to be installed.') + if self._implicit_role: + install_parser.add_argument('-r', '--role-file', dest='requirements', + help='A file containing a list of collections and roles to be installed.') + else: + install_parser.add_argument('-r', '--role-file', dest='requirements', + help='A file containing a list of roles to be installed.') r_re = re.compile(r'^(?] for the values url, username, password, and token. - config_dict = dict((k, server_config_def(server_key, k, req, ensure_type)) for k, req, ensure_type in SERVER_DEF) - defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data() - C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs) # resolve the config created options above with existing config and user options - server_options = C.config.get_plugin_options('galaxy_server', server_key) + server_options = C.config.get_plugin_options(plugin_type='galaxy_server', name=server_key) # auth_url is used to create the token, but not directly by GalaxyAPI, so # it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here @@ -822,7 +813,7 @@ class GalaxyCLI(CLI): for role_req in file_requirements: requirements['roles'] += parse_role_req(role_req) - else: + elif isinstance(file_requirements, dict): # Newer format with a collections and/or roles key extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections'])) if extra_keys: @@ -841,6 +832,9 @@ class GalaxyCLI(CLI): for collection_req in file_requirements.get('collections') or [] ] + else: + raise AnsibleError(f"Expecting requirements yaml to be a list or dictionary but got {type(file_requirements).__name__}") + return requirements def _init_coll_req_dict(self, coll_req): @@ -1039,6 +1033,7 @@ class GalaxyCLI(CLI): @with_collection_artifacts_manager def execute_download(self, artifacts_manager=None): + """Download collections and their dependencies as a tarball for an offline install.""" collections = context.CLIARGS['args'] no_deps = context.CLIARGS['no_deps'] download_path = context.CLIARGS['download_path'] @@ -1155,6 +1150,7 @@ class GalaxyCLI(CLI): ) loader = DataLoader() + inject_data.update(load_extra_vars(loader)) templar = Templar(loader, variables=inject_data) # create role directory @@ -1198,7 +1194,11 @@ class GalaxyCLI(CLI): src_template = os.path.join(root, f) dest_file = os.path.join(obj_path, rel_root, filename) template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict') - b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict') + try: + b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict') + except AnsibleError as e: + shutil.rmtree(b_obj_path) + raise AnsibleError(f"Failed to create {galaxy_type.title()} {obj_name}. Templating {src_template} failed with the error: {e}") from e with open(dest_file, 'wb') as df: df.write(b_rendered) else: @@ -1251,6 +1251,9 @@ class GalaxyCLI(CLI): if remote_data: role_info.update(remote_data) + else: + data = u"- the role %s was not found" % role + break elif context.CLIARGS['offline'] and not gr._exists: data = u"- the role %s was not found" % role @@ -1270,6 +1273,7 @@ class GalaxyCLI(CLI): @with_collection_artifacts_manager def execute_verify(self, artifacts_manager=None): + """Compare checksums with the collection(s) found on the server and the installed copy. This does not verify dependencies.""" collections = context.CLIARGS['args'] search_paths = AnsibleCollectionConfig.collection_paths @@ -1307,8 +1311,6 @@ class GalaxyCLI(CLI): You can pass in a list (roles or collections) or use the file option listed below (these are mutually exclusive). If you pass in a list, it can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file. - - :param artifacts_manager: Artifacts manager. """ install_items = context.CLIARGS['args'] requirements_file = context.CLIARGS['requirements'] @@ -1719,7 +1721,7 @@ class GalaxyCLI(CLI): publish_collection(collection_path, self.api, wait, timeout) def execute_search(self): - ''' searches for roles on the Ansible Galaxy server''' + """ searches for roles on the Ansible Galaxy server""" page_size = 1000 search = None @@ -1773,6 +1775,7 @@ class GalaxyCLI(CLI): github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict') github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict') + rc = 0 if context.CLIARGS['check_status']: task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) else: @@ -1790,7 +1793,7 @@ class GalaxyCLI(CLI): display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo), color=C.COLOR_CHANGED) - return 0 + return rc # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) if not context.CLIARGS['wait']: @@ -1807,12 +1810,13 @@ class GalaxyCLI(CLI): if msg['id'] not in msg_list: display.display(msg['message_text'], color=colors[msg['message_type']]) msg_list.append(msg['id']) - if task[0]['state'] in ['SUCCESS', 'FAILED']: + if (state := task[0]['state']) in ['SUCCESS', 'FAILED']: + rc = ['SUCCESS', 'FAILED'].index(state) finished = True else: time.sleep(10) - return 0 + return rc def execute_setup(self): """ Setup an integration from Github or Travis for Ansible Galaxy roles""" diff --git a/lib/ansible/cli/inventory.py b/lib/ansible/cli/inventory.py index ede6288df55..5d99d24ed68 100755 --- a/lib/ansible/cli/inventory.py +++ b/lib/ansible/cli/inventory.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI @@ -25,34 +24,13 @@ from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_ display = Display() -INTERNAL_VARS = frozenset(['ansible_diff_mode', - 'ansible_config_file', - 'ansible_facts', - 'ansible_forks', - 'ansible_inventory_sources', - 'ansible_limit', - 'ansible_playbook_python', - 'ansible_run_tags', - 'ansible_skip_tags', - 'ansible_verbosity', - 'ansible_version', - 'inventory_dir', - 'inventory_file', - 'inventory_hostname', - 'inventory_hostname_short', - 'groups', - 'group_names', - 'omit', - 'playbook_dir', ]) - class InventoryCLI(CLI): - ''' used to display or dump the configured inventory as Ansible sees it ''' + """ used to display or dump the configured inventory as Ansible sees it """ name = 'ansible-inventory' - ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list', - 'group': 'The name of a group in the inventory, relevant when using --graph', } + ARGUMENTS = {'group': 'The name of a group in the inventory, relevant when using --graph', } def __init__(self, args): @@ -63,8 +41,8 @@ class InventoryCLI(CLI): def init_parser(self): super(InventoryCLI, self).init_parser( - usage='usage: %prog [options] [host|group]', - epilog='Show Ansible inventory information, by default it uses the inventory script JSON format') + usage='usage: %prog [options] [group]', + desc='Show Ansible inventory information, by default it uses the inventory script JSON format') opt_help.add_inventory_options(self.parser) opt_help.add_vault_options(self.parser) @@ -74,7 +52,7 @@ class InventoryCLI(CLI): # remove unused default options self.parser.add_argument('--list-hosts', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument) - self.parser.add_argument('args', metavar='host|group', nargs='?') + self.parser.add_argument('args', metavar='group', nargs='?', help='The name of a group in the inventory, relevant when using --graph') # Actions action_group = self.parser.add_argument_group("Actions", "One of following must be used on invocation, ONLY ONE!") @@ -95,12 +73,12 @@ class InventoryCLI(CLI): # list self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export', - help="When doing an --list, represent in a way that is optimized for export," + help="When doing --list, represent in a way that is optimized for export," "not as an accurate representation of how Ansible has processed it") self.parser.add_argument('--output', default=None, dest='output_file', help="When doing --list, send the inventory to a file instead of to the screen") # self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins', - # help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/") + # help="When doing --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/") def post_process_args(self, options): options = super(InventoryCLI, self).post_process_args(options) @@ -247,7 +225,7 @@ class InventoryCLI(CLI): @staticmethod def _remove_internal(dump): - for internal in INTERNAL_VARS: + for internal in C.INTERNAL_STATIC_VARS: if internal in dump: del dump[internal] @@ -325,7 +303,7 @@ class InventoryCLI(CLI): return results hosts = self.inventory.get_hosts(top.name) - results = format_group(top, [h.name for h in hosts]) + results = format_group(top, frozenset(h.name for h in hosts)) # populate meta results['_meta'] = {'hostvars': {}} @@ -381,7 +359,7 @@ class InventoryCLI(CLI): return results - available_hosts = [h.name for h in self.inventory.get_hosts(top.name)] + available_hosts = frozenset(h.name for h in self.inventory.get_hosts(top.name)) return format_group(top, available_hosts) def toml_inventory(self, top): @@ -425,7 +403,7 @@ class InventoryCLI(CLI): return results - available_hosts = [h.name for h in self.inventory.get_hosts(top.name)] + available_hosts = frozenset(h.name for h in self.inventory.get_hosts(top.name)) results = format_group(top, available_hosts) return results diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index e63785b0587..a2ad80bfa27 100755 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI @@ -30,8 +29,8 @@ display = Display() class PlaybookCLI(CLI): - ''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system. - See the project home page (https://docs.ansible.com) for more information. ''' + """ the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system. + See the project home page (https://docs.ansible.com) for more information. """ name = 'ansible-playbook' @@ -144,10 +143,6 @@ class PlaybookCLI(CLI): # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) CLI.get_host_list(inventory, context.CLIARGS['subset']) - # flush fact cache if requested - if context.CLIARGS['flush_cache']: - self._flush_cache(inventory, variable_manager) - # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory, variable_manager=variable_manager, loader=loader, @@ -229,12 +224,6 @@ class PlaybookCLI(CLI): else: return results - @staticmethod - def _flush_cache(inventory, variable_manager): - for host in inventory.list_hosts(): - hostname = host.get_name() - variable_manager.clear_facts(hostname) - def main(args=None): PlaybookCLI.cli_executor(args) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index c7497b0b695..ee24c9ff9aa 100755 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI @@ -13,7 +12,7 @@ from ansible.cli import CLI import datetime import os import platform -import random +import secrets import shlex import shutil import socket @@ -29,11 +28,12 @@ from ansible.plugins.loader import module_loader from ansible.utils.cmd_functions import run_cmd from ansible.utils.display import Display + display = Display() class PullCLI(CLI): - ''' Used to pull a remote copy of ansible on each managed node, + """ Used to pull a remote copy of ansible on each managed node, each set to run via cron and update playbook source via a source repository. This inverts the default *push* architecture of ansible into a *pull* architecture, which has near-limitless scaling potential. @@ -45,7 +45,7 @@ class PullCLI(CLI): This is useful both for extreme scale-out as well as periodic remediation. Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an excellent way to gather and analyze remote logs from ansible-pull. - ''' + """ name = 'ansible-pull' @@ -56,9 +56,9 @@ class PullCLI(CLI): 1: 'File does not exist', 2: 'File is not readable', } - ARGUMENTS = {'playbook.yml': 'The name of one the YAML format files to run as an Ansible playbook.' - 'This can be a relative path within the checkout. By default, Ansible will' - "look for a playbook based on the host's fully-qualified domain name," + ARGUMENTS = {'playbook.yml': 'The name of one the YAML format files to run as an Ansible playbook. ' + 'This can be a relative path within the checkout. By default, Ansible will ' + "look for a playbook based on the host's fully-qualified domain name, " 'on the host hostname and finally a playbook named *local.yml*.', } SKIP_INVENTORY_DEFAULTS = True @@ -76,11 +76,11 @@ class PullCLI(CLI): return inv_opts def init_parser(self): - ''' create an options parser for bin/ansible ''' + """ create an options parser for bin/ansible """ super(PullCLI, self).init_parser( usage='%prog -U [options] []', - desc="pulls playbooks from a VCS repo and executes them for the local host") + desc="pulls playbooks from a VCS repo and executes them on target host") # Do not add check_options as there's a conflict with --checkout/-C opt_help.add_connect_options(self.parser) @@ -102,8 +102,8 @@ class PullCLI(CLI): 'This is a useful way to disperse git requests') self.parser.add_argument('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') - self.parser.add_argument('-d', '--directory', dest='dest', default=None, - help='absolute path of repository checkout directory (relative paths are not supported)') + self.parser.add_argument('-d', '--directory', dest='dest', default=None, type=opt_help.unfrack_path(), + help='path to the directory to which Ansible will checkout the repository.') self.parser.add_argument('-U', '--url', dest='url', default=None, help='URL of the playbook repository') self.parser.add_argument('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.') self.parser.add_argument('-C', '--checkout', dest='checkout', @@ -134,14 +134,13 @@ class PullCLI(CLI): hostname = socket.getfqdn() # use a hostname dependent directory, in case of $HOME on nfs options.dest = os.path.join(C.ANSIBLE_HOME, 'pull', hostname) - options.dest = os.path.expandvars(os.path.expanduser(options.dest)) if os.path.exists(options.dest) and not os.path.isdir(options.dest): raise AnsibleOptionsError("%s is not a valid or accessible directory." % options.dest) if options.sleep: try: - secs = random.randint(0, int(options.sleep)) + secs = secrets.randbelow(int(options.sleep)) options.sleep = secs except ValueError: raise AnsibleOptionsError("%s is not a number." % options.sleep) @@ -158,7 +157,7 @@ class PullCLI(CLI): return options def run(self): - ''' use Runner lib to do SSH things ''' + """ use Runner lib to do SSH things """ super(PullCLI, self).run() @@ -275,8 +274,15 @@ class PullCLI(CLI): for vault_id in context.CLIARGS['vault_ids']: cmd += " --vault-id=%s" % vault_id + if context.CLIARGS['become_password_file']: + cmd += " --become-password-file=%s" % context.CLIARGS['become_password_file'] + + if context.CLIARGS['connection_password_file']: + cmd += " --connection-password-file=%s" % context.CLIARGS['connection_password_file'] + for ev in context.CLIARGS['extra_vars']: cmd += ' -e %s' % shlex.quote(ev) + if context.CLIARGS['become_ask_pass']: cmd += ' --ask-become-pass' if context.CLIARGS['skip_tags']: @@ -292,6 +298,9 @@ class PullCLI(CLI): if context.CLIARGS['diff']: cmd += ' -D' + if context.CLIARGS['flush_cache']: + cmd += ' --flush-cache' + os.chdir(context.CLIARGS['dest']) # redo inventory options as new files might exist now @@ -307,6 +316,7 @@ class PullCLI(CLI): if context.CLIARGS['purge']: os.chdir('/') try: + display.debug("removing: %s" % context.CLIARGS['dest']) shutil.rmtree(context.CLIARGS['dest']) except Exception as e: display.error(u"Failed to remove %s: %s" % (context.CLIARGS['dest'], to_text(e))) diff --git a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py old mode 100755 new mode 100644 index b1ed18c9c69..0c8baa9871f --- a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py +++ b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py @@ -1,13 +1,8 @@ -#!/usr/bin/env python # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - +from __future__ import annotations import fcntl -import hashlib import io import os import pickle @@ -43,13 +38,6 @@ def read_stream(byte_stream): if len(data) < size: raise Exception("EOF found before data was complete") - data_hash = to_text(byte_stream.readline().strip()) - if data_hash != hashlib.sha1(data).hexdigest(): - raise Exception("Read {0} bytes, but data did not match checksum".format(size)) - - # restore escaped loose \r characters - data = data.replace(br'\r', b'\r') - return data @@ -69,10 +57,10 @@ def file_lock(lock_path): class ConnectionProcess(object): - ''' + """ The connection process wraps around a Connection object that manages the connection to a remote device that persists over the playbook - ''' + """ def __init__(self, fd, play_context, socket_path, original_path, task_uuid=None, ansible_playbook_pid=None): self.play_context = play_context self.socket_path = socket_path @@ -224,7 +212,7 @@ def main(args=None): """ Called to initiate the connect to the remote device """ - parser = opt_help.create_base_parser(prog='ansible-connection') + parser = opt_help.create_base_parser(prog=None) opt_help.add_verbosity_options(parser) parser.add_argument('playbook_pid') parser.add_argument('task_uuid') diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index cf2c9dd901e..8b6dc88a3de 100755 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -4,8 +4,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # PYTHON_ARGCOMPLETE_OK -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first from ansible.cli import CLI @@ -26,7 +25,7 @@ display = Display() class VaultCLI(CLI): - ''' can encrypt any structured data file used by Ansible. + """ can encrypt any structured data file used by Ansible. This can include *group_vars/* or *host_vars/* inventory variables, variables loaded by *include_vars* or *vars_files*, or variable files passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*. @@ -34,7 +33,7 @@ class VaultCLI(CLI): Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault. If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted. - ''' + """ name = 'ansible-vault' @@ -253,7 +252,7 @@ class VaultCLI(CLI): os.umask(old_umask) def execute_encrypt(self): - ''' encrypt the supplied file using the provided vault secret ''' + """ encrypt the supplied file using the provided vault secret """ if not context.CLIARGS['args'] and sys.stdin.isatty(): display.display("Reading plaintext input from stdin", stderr=True) @@ -287,7 +286,7 @@ class VaultCLI(CLI): return yaml_ciphertext def execute_encrypt_string(self): - ''' encrypt the supplied string using the provided vault secret ''' + """ encrypt the supplied string using the provided vault secret """ b_plaintext = None # Holds tuples (the_text, the_source_of_the_string, the variable name if its provided). @@ -432,7 +431,7 @@ class VaultCLI(CLI): return output def execute_decrypt(self): - ''' decrypt the supplied file using the provided vault secret ''' + """ decrypt the supplied file using the provided vault secret """ if not context.CLIARGS['args'] and sys.stdin.isatty(): display.display("Reading ciphertext input from stdin", stderr=True) @@ -444,7 +443,7 @@ class VaultCLI(CLI): display.display("Decryption successful", stderr=True) def execute_create(self): - ''' create and open a file in an editor that will be encrypted with the provided vault secret when closed''' + """ create and open a file in an editor that will be encrypted with the provided vault secret when closed""" if len(context.CLIARGS['args']) != 1: raise AnsibleOptionsError("ansible-vault create can take only one filename argument") @@ -456,12 +455,12 @@ class VaultCLI(CLI): raise AnsibleOptionsError("not a tty, editor cannot be opened") def execute_edit(self): - ''' open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed''' + """ open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed""" for f in context.CLIARGS['args']: self.editor.edit_file(f) def execute_view(self): - ''' open, decrypt and view an existing vaulted file using a pager using the supplied vault secret ''' + """ open, decrypt and view an existing vaulted file using a pager using the supplied vault secret """ for f in context.CLIARGS['args']: # Note: vault should return byte strings because it could encrypt @@ -473,7 +472,7 @@ class VaultCLI(CLI): self.pager(to_text(plaintext)) def execute_rekey(self): - ''' re-encrypt a vaulted file with a new secret, the previous secret is required ''' + """ re-encrypt a vaulted file with a new secret, the previous secret is required """ for f in context.CLIARGS['args']: # FIXME: plumb in vault_id, use the default new_vault_secret for now self.editor.rekey_file(f, self.new_encrypt_secret, diff --git a/lib/ansible/collections/list.py b/lib/ansible/collections/list.py index dd428e11f8b..473c56d0945 100644 --- a/lib/ansible/collections/list.py +++ b/lib/ansible/collections/list.py @@ -1,5 +1,6 @@ # (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import annotations from ansible.errors import AnsibleError from ansible.cli.galaxy import with_collection_artifacts_manager @@ -32,16 +33,31 @@ def list_collection_dirs(search_paths=None, coll_filter=None, artifacts_manager= namespace_filter = None collection_filter = None + has_pure_namespace_filter = False # whether at least one coll_filter is a namespace-only filter if coll_filter is not None: - if '.' in coll_filter: - try: - namespace_filter, collection_filter = coll_filter.split('.') - except ValueError: - raise AnsibleError("Invalid collection pattern supplied: %s" % coll_filter) - else: - namespace_filter = coll_filter + if isinstance(coll_filter, str): + coll_filter = [coll_filter] + namespace_filter = set() + for coll_name in coll_filter: + if '.' in coll_name: + try: + namespace, collection = coll_name.split('.') + except ValueError: + raise AnsibleError("Invalid collection pattern supplied: %s" % coll_name) + namespace_filter.add(namespace) + if not has_pure_namespace_filter: + if collection_filter is None: + collection_filter = [] + collection_filter.append(collection) + else: + namespace_filter.add(coll_name) + has_pure_namespace_filter = True + collection_filter = None + namespace_filter = sorted(namespace_filter) for req in find_existing_collections(search_paths, artifacts_manager, namespace_filter=namespace_filter, collection_filter=collection_filter, dedupe=dedupe): + if not has_pure_namespace_filter and coll_filter is not None and req.fqcn not in coll_filter: + continue yield to_bytes(req.src) diff --git a/lib/ansible/compat/__init__.py b/lib/ansible/compat/__init__.py index 2990c6f54c2..3f39dd4c6a0 100644 --- a/lib/ansible/compat/__init__.py +++ b/lib/ansible/compat/__init__.py @@ -15,12 +15,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' +""" Compat library for ansible. This contains compatibility definitions for older python When we need to import a module differently depending on python version, do it here. Then in the code we can simply import from compat in order to get what we want. -''' +""" +from __future__ import annotations diff --git a/lib/ansible/compat/importlib_resources.py b/lib/ansible/compat/importlib_resources.py index ed104d6c5d0..0df95f0a518 100644 --- a/lib/ansible/compat/importlib_resources.py +++ b/lib/ansible/compat/importlib_resources.py @@ -1,8 +1,7 @@ # Copyright: Contributors to the Ansible project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys diff --git a/lib/ansible/compat/selectors/__init__.py b/lib/ansible/compat/selectors/__init__.py deleted file mode 100644 index a7b260e348a..00000000000 --- a/lib/ansible/compat/selectors/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# (c) 2014, 2017 Toshio Kuratomi -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# NOT_BUNDLED - -''' -Compat selectors library. Python-3.5 has this builtin. The selectors2 -package exists on pypi to backport the functionality as far as python-2.6. -Implementation previously resided here - maintaining this file after the -move to ansible.module_utils for code backwards compatibility. -''' -import sys -from ansible.module_utils.compat import selectors -sys.modules['ansible.compat.selectors'] = selectors diff --git a/lib/ansible/config/ansible_builtin_runtime.yml b/lib/ansible/config/ansible_builtin_runtime.yml index 570ccb051cf..3630e76ca43 100644 --- a/lib/ansible/config/ansible_builtin_runtime.yml +++ b/lib/ansible/config/ansible_builtin_runtime.yml @@ -9088,6 +9088,8 @@ plugin_routing: tombstone: removal_date: "2023-05-16" warning_text: Use include_tasks or import_tasks instead. + yum: + redirect: ansible.builtin.dnf become: doas: redirect: community.general.doas diff --git a/lib/ansible/config/base.yml b/lib/ansible/config/base.yml index ae846330fd5..24f9464d0a3 100644 --- a/lib/ansible/config/base.yml +++ b/lib/ansible/config/base.yml @@ -1,6 +1,14 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) --- +_ANSIBLE_CONNECTION_PATH: + env: + - name: _ANSIBLE_CONNECTION_PATH + name: Overrides the location of the Ansible persistent connection helper script. + description: + - For internal use only. + type: path + version_added: "2.18" ANSIBLE_HOME: name: The Ansible home path description: @@ -25,17 +33,20 @@ ANSIBLE_CONNECTION_PATH: - {key: ansible_connection_path, section: persistent_connection} yaml: {key: persistent_connection.ansible_connection_path} version_added: "2.8" + deprecated: + why: This setting has no effect. + version: "2.22" ANSIBLE_COW_SELECTION: name: Cowsay filter selection default: default - description: This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them. + description: This allows you to choose a specific cowsay stencil for the banners or use 'random' to cycle through them. env: [{name: ANSIBLE_COW_SELECTION}] ini: - {key: cow_selection, section: defaults} ANSIBLE_COW_ACCEPTLIST: name: Cowsay filter acceptance list default: ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www'] - description: Accept list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates. + description: Accept a list of cowsay templates that are 'safe' to use, set to an empty list if you want to enable all installed templates. env: - name: ANSIBLE_COW_ACCEPTLIST version_added: '2.11' @@ -78,7 +89,7 @@ ANSIBLE_NOCOWS: ANSIBLE_COW_PATH: name: Set path to cowsay command default: null - description: Specify a custom cowsay path or swap in your cowsay implementation of choice + description: Specify a custom cowsay path or swap in your cowsay implementation of choice. env: [{name: ANSIBLE_COW_PATH}] ini: - {key: cowpath, section: defaults} @@ -119,8 +130,9 @@ BECOME_ALLOW_SAME_USER: name: Allow becoming the same user default: False description: - - This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root. - - If executable, it will be run and the resulting stdout will be used as the password. + - When ``False``(default), Ansible will skip using become if the remote user is the same as the become user, as this is normally a redundant operation. + In other words root sudo to root. + - If ``True``, this forces Ansible to use the become plugin anyways as there are cases in which this is needed. env: [{name: ANSIBLE_BECOME_ALLOW_SAME_USER}] ini: - {key: become_allow_same_user, section: privilege_escalation} @@ -130,7 +142,7 @@ BECOME_PASSWORD_FILE: name: Become password file default: ~ description: - - 'The password file to use for the become plugin. --become-password-file.' + - 'The password file to use for the become plugin. ``--become-password-file``.' - If executable, it will be run and the resulting stdout will be used as the password. env: [{name: ANSIBLE_BECOME_PASSWORD_FILE}] ini: @@ -141,7 +153,7 @@ AGNOSTIC_BECOME_PROMPT: name: Display an agnostic become prompt default: True type: boolean - description: Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method + description: Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method. env: [{name: ANSIBLE_AGNOSTIC_BECOME_PROMPT}] ini: - {key: agnostic_become_prompt, section: privilege_escalation} @@ -158,7 +170,7 @@ CACHE_PLUGIN: CACHE_PLUGIN_CONNECTION: name: Cache Plugin URI default: ~ - description: Defines connection or path information for the cache plugin + description: Defines connection or path information for the cache plugin. env: [{name: ANSIBLE_CACHE_PLUGIN_CONNECTION}] ini: - {key: fact_caching_connection, section: defaults} @@ -166,7 +178,7 @@ CACHE_PLUGIN_CONNECTION: CACHE_PLUGIN_PREFIX: name: Cache Plugin table prefix default: ansible_facts - description: Prefix to use for cache plugin files/tables + description: Prefix to use for cache plugin files/tables. env: [{name: ANSIBLE_CACHE_PLUGIN_PREFIX}] ini: - {key: fact_caching_prefix, section: defaults} @@ -174,7 +186,7 @@ CACHE_PLUGIN_PREFIX: CACHE_PLUGIN_TIMEOUT: name: Cache Plugin expiration timeout default: 86400 - description: Expiration timeout for the cache plugin data + description: Expiration timeout for the cache plugin data. env: [{name: ANSIBLE_CACHE_PLUGIN_TIMEOUT}] ini: - {key: fact_caching_timeout, section: defaults} @@ -182,7 +194,7 @@ CACHE_PLUGIN_TIMEOUT: yaml: {key: facts.cache.timeout} COLLECTIONS_SCAN_SYS_PATH: name: Scan PYTHONPATH for installed collections - description: A boolean to enable or disable scanning the sys.path for installed collections + description: A boolean to enable or disable scanning the sys.path for installed collections. default: true type: boolean env: @@ -190,9 +202,9 @@ COLLECTIONS_SCAN_SYS_PATH: ini: - {key: collections_scan_sys_path, section: defaults} COLLECTIONS_PATHS: - name: ordered list of root paths for loading installed Ansible collections content + name: An ordered list of root paths for loading installed Ansible collections content. description: > - Colon separated paths in which Ansible will search for collections content. + Colon-separated paths in which Ansible will search for collections content. Collections must be in nested *subdirectories*, not directly in these directories. For example, if ``COLLECTIONS_PATHS`` includes ``'{{ ANSIBLE_HOME ~ "/collections" }}'``, and you want to add ``my.collection`` to that directory, it must be saved as @@ -200,18 +212,9 @@ COLLECTIONS_PATHS: default: '{{ ANSIBLE_HOME ~ "/collections:/usr/share/ansible/collections" }}' type: pathspec env: - - name: ANSIBLE_COLLECTIONS_PATHS - deprecated: - why: does not fit var naming standard, use the singular form ANSIBLE_COLLECTIONS_PATH instead - version: "2.19" - name: ANSIBLE_COLLECTIONS_PATH version_added: '2.10' ini: - - key: collections_paths - section: defaults - deprecated: - why: does not fit var naming standard, use the singular form collections_path instead - version: "2.19" - key: collections_path section: defaults version_added: '2.10' @@ -229,14 +232,14 @@ COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH: COLOR_CHANGED: name: Color for 'changed' task status default: yellow - description: Defines the color to use on 'Changed' task status + description: Defines the color to use on 'Changed' task status. env: [{name: ANSIBLE_COLOR_CHANGED}] ini: - {key: changed, section: colors} COLOR_CONSOLE_PROMPT: name: "Color for ansible-console's prompt task status" default: white - description: Defines the default color to use for ansible-console + description: Defines the default color to use for ansible-console. env: [{name: ANSIBLE_COLOR_CONSOLE_PROMPT}] ini: - {key: console_prompt, section: colors} @@ -244,21 +247,21 @@ COLOR_CONSOLE_PROMPT: COLOR_DEBUG: name: Color for debug statements default: dark gray - description: Defines the color to use when emitting debug messages + description: Defines the color to use when emitting debug messages. env: [{name: ANSIBLE_COLOR_DEBUG}] ini: - {key: debug, section: colors} COLOR_DEPRECATE: name: Color for deprecation messages default: purple - description: Defines the color to use when emitting deprecation messages + description: Defines the color to use when emitting deprecation messages. env: [{name: ANSIBLE_COLOR_DEPRECATE}] ini: - {key: deprecate, section: colors} COLOR_DIFF_ADD: name: Color for diff added display default: green - description: Defines the color to use when showing added lines in diffs + description: Defines the color to use when showing added lines in diffs. env: [{name: ANSIBLE_COLOR_DIFF_ADD}] ini: - {key: diff_add, section: colors} @@ -266,21 +269,21 @@ COLOR_DIFF_ADD: COLOR_DIFF_LINES: name: Color for diff lines display default: cyan - description: Defines the color to use when showing diffs + description: Defines the color to use when showing diffs. env: [{name: ANSIBLE_COLOR_DIFF_LINES}] ini: - {key: diff_lines, section: colors} COLOR_DIFF_REMOVE: name: Color for diff removed display default: red - description: Defines the color to use when showing removed lines in diffs + description: Defines the color to use when showing removed lines in diffs. env: [{name: ANSIBLE_COLOR_DIFF_REMOVE}] ini: - {key: diff_remove, section: colors} COLOR_ERROR: name: Color for error messages default: red - description: Defines the color to use when emitting error messages + description: Defines the color to use when emitting error messages. env: [{name: ANSIBLE_COLOR_ERROR}] ini: - {key: error, section: colors} @@ -288,49 +291,105 @@ COLOR_ERROR: COLOR_HIGHLIGHT: name: Color for highlighting default: white - description: Defines the color to use for highlighting + description: Defines the color to use for highlighting. env: [{name: ANSIBLE_COLOR_HIGHLIGHT}] ini: - {key: highlight, section: colors} +COLOR_INCLUDED: + name: Color for 'included' task status + default: cyan + description: Defines the color to use when showing 'Included' task status. + env: [{name: ANSIBLE_COLOR_INCLUDED}] + ini: + - {key: included, section: colors} + version_added: '2.18' COLOR_OK: name: Color for 'ok' task status default: green - description: Defines the color to use when showing 'OK' task status + description: Defines the color to use when showing 'OK' task status. env: [{name: ANSIBLE_COLOR_OK}] ini: - {key: ok, section: colors} COLOR_SKIP: name: Color for 'skip' task status default: cyan - description: Defines the color to use when showing 'Skipped' task status + description: Defines the color to use when showing 'Skipped' task status. env: [{name: ANSIBLE_COLOR_SKIP}] ini: - {key: skip, section: colors} COLOR_UNREACHABLE: name: Color for 'unreachable' host state default: bright red - description: Defines the color to use on 'Unreachable' status + description: Defines the color to use on 'Unreachable' status. env: [{name: ANSIBLE_COLOR_UNREACHABLE}] ini: - {key: unreachable, section: colors} COLOR_VERBOSE: name: Color for verbose messages default: blue - description: Defines the color to use when emitting verbose messages. i.e those that show with '-v's. + description: Defines the color to use when emitting verbose messages. In other words, those that show with '-v's. env: [{name: ANSIBLE_COLOR_VERBOSE}] ini: - {key: verbose, section: colors} COLOR_WARN: name: Color for warning messages default: bright purple - description: Defines the color to use when emitting warning messages + description: Defines the color to use when emitting warning messages. env: [{name: ANSIBLE_COLOR_WARN}] ini: - {key: warn, section: colors} +COLOR_DOC_MODULE: + name: Color for module name in the ansible-doc output + default: yellow + description: Defines the color to use when emitting a module name in the ansible-doc output. + env: [{name: ANSIBLE_COLOR_DOC_MODULE}] + ini: + - {key: doc_module, section: colors} + version_added: '2.18' +COLOR_DOC_REFERENCE: + name: Color for cross-reference in the ansible-doc output + default: magenta + description: Defines the color to use when emitting cross-reference in the ansible-doc output. + env: [{name: ANSIBLE_COLOR_DOC_REFERENCE}] + ini: + - {key: doc_reference, section: colors} + version_added: '2.18' +COLOR_DOC_LINK: + name: Color for Link in ansible-doc output + default: cyan + description: Defines the color to use when emitting a link in the ansible-doc output. + env: [{name: ANSIBLE_COLOR_DOC_LINK}] + ini: + - {key: doc_link, section: colors} + version_added: '2.18' +COLOR_DOC_DEPRECATED: + name: Color for deprecated value in ansible-doc output + default: magenta + description: Defines the color to use when emitting a deprecated value in the ansible-doc output. + env: [{name: ANSIBLE_COLOR_DOC_DEPRECATED}] + ini: + - {key: doc_deprecated, section: colors} + version_added: '2.18' +COLOR_DOC_CONSTANT: + name: Color for constant in ansible-doc output + default: dark gray + description: Defines the color to use when emitting a constant in the ansible-doc output. + env: [{name: ANSIBLE_COLOR_DOC_CONSTANT}] + ini: + - {key: doc_constant, section: colors} + version_added: '2.18' +COLOR_DOC_PLUGIN: + name: Color for the plugin in ansible-doc output + default: yellow + description: Defines the color to use when emitting a plugin name in the ansible-doc output. + env: [{name: ANSIBLE_COLOR_DOC_PLUGIN}] + ini: + - {key: doc_plugin, section: colors} + version_added: '2.18' CONNECTION_PASSWORD_FILE: name: Connection password file default: ~ - description: 'The password file to use for the connection plugin. --connection-password-file.' + description: 'The password file to use for the connection plugin. ``--connection-password-file``.' env: [{name: ANSIBLE_CONNECTION_PASSWORD_FILE}] ini: - {key: connection_password_file, section: defaults} @@ -339,7 +398,7 @@ CONNECTION_PASSWORD_FILE: COVERAGE_REMOTE_OUTPUT: name: Sets the output directory and filename prefix to generate coverage run info. description: - - Sets the output directory on the remote host to generate coverage reports to. + - Sets the output directory on the remote host to generate coverage reports into. - Currently only used for remote coverage on PowerShell modules. - This is for internal use only. env: @@ -352,7 +411,7 @@ COVERAGE_REMOTE_PATHS: name: Sets the list of paths to run coverage for. description: - A list of paths for files on the Ansible controller to run coverage for when executing on the remote host. - - Only files that match the path glob will have its coverage collected. + - Only files that match the path glob will have their coverage collected. - Multiple path globs can be specified and are separated by ``:``. - Currently only used for remote coverage on PowerShell modules. - This is for internal use only. @@ -365,7 +424,7 @@ ACTION_WARNINGS: name: Toggle action warnings default: True description: - - By default Ansible will issue a warning when received from a task action (module or action plugin) + - By default, Ansible will issue a warning when received from a task action (module or action plugin). - These warnings can be silenced by adjusting this setting to False. env: [{name: ANSIBLE_ACTION_WARNINGS}] ini: @@ -376,7 +435,7 @@ LOCALHOST_WARNING: name: Warning when using implicit inventory with only localhost default: True description: - - By default Ansible will issue a warning when there are no hosts in the + - By default, Ansible will issue a warning when there are no hosts in the inventory. - These warnings can be silenced by adjusting this setting to False. env: [{name: ANSIBLE_LOCALHOST_WARNING}] @@ -384,11 +443,20 @@ LOCALHOST_WARNING: - {key: localhost_warning, section: defaults} type: boolean version_added: "2.6" +LOG_VERBOSITY: + name: Default log verbosity + description: + - This will set log verbosity if higher than the normal display verbosity, otherwise it will match that. + env: [{name: ANSIBLE_LOG_VERBOSITY}] + ini: + - {key: log_verbosity, section: defaults} + type: int + version_added: "2.17" INVENTORY_UNPARSED_WARNING: name: Warning when no inventory files can be parsed, resulting in an implicit inventory with only localhost default: True description: - - By default Ansible will issue a warning when no inventory was loaded and notes that + - By default, Ansible will issue a warning when no inventory was loaded and notes that it will use an implicit localhost-only inventory. - These warnings can be silenced by adjusting this setting to False. env: [{name: ANSIBLE_INVENTORY_UNPARSED_WARNING}] @@ -399,7 +467,7 @@ INVENTORY_UNPARSED_WARNING: DOC_FRAGMENT_PLUGIN_PATH: name: documentation fragment plugins path default: '{{ ANSIBLE_HOME ~ "/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments" }}' - description: Colon separated paths in which Ansible will search for Documentation Fragments Plugins. + description: Colon-separated paths in which Ansible will search for Documentation Fragments Plugins. env: [{name: ANSIBLE_DOC_FRAGMENT_PLUGINS}] ini: - {key: doc_fragment_plugins, section: defaults} @@ -407,7 +475,7 @@ DOC_FRAGMENT_PLUGIN_PATH: DEFAULT_ACTION_PLUGIN_PATH: name: Action plugins path default: '{{ ANSIBLE_HOME ~ "/plugins/action:/usr/share/ansible/plugins/action" }}' - description: Colon separated paths in which Ansible will search for Action Plugins. + description: Colon-separated paths in which Ansible will search for Action Plugins. env: [{name: ANSIBLE_ACTION_PLUGINS}] ini: - {key: action_plugins, section: defaults} @@ -421,8 +489,8 @@ DEFAULT_ALLOW_UNSAFE_LOOKUPS: to return data that is not marked 'unsafe'." - By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk. This option is provided to allow for backward compatibility, - however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run - through the templating engine late + however, users should first consider adding allow_unsafe=True to any lookups that may be expected to contain data that may be run + through the templating engine late. env: [] ini: - {key: allow_unsafe_lookups, section: defaults} @@ -474,7 +542,7 @@ DEFAULT_BECOME_METHOD: DEFAULT_BECOME_EXE: name: Choose 'become' executable default: ~ - description: 'executable to use for privilege escalation, otherwise Ansible will depend on PATH' + description: 'executable to use for privilege escalation, otherwise Ansible will depend on PATH.' env: [{name: ANSIBLE_BECOME_EXE}] ini: - {key: become_exe, section: privilege_escalation} @@ -488,7 +556,7 @@ DEFAULT_BECOME_FLAGS: BECOME_PLUGIN_PATH: name: Become plugins path default: '{{ ANSIBLE_HOME ~ "/plugins/become:/usr/share/ansible/plugins/become" }}' - description: Colon separated paths in which Ansible will search for Become Plugins. + description: Colon-separated paths in which Ansible will search for Become Plugins. env: [{name: ANSIBLE_BECOME_PLUGINS}] ini: - {key: become_plugins, section: defaults} @@ -506,7 +574,7 @@ DEFAULT_BECOME_USER: DEFAULT_CACHE_PLUGIN_PATH: name: Cache Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/cache:/usr/share/ansible/plugins/cache" }}' - description: Colon separated paths in which Ansible will search for Cache Plugins. + description: Colon-separated paths in which Ansible will search for Cache Plugins. env: [{name: ANSIBLE_CACHE_PLUGINS}] ini: - {key: cache_plugins, section: defaults} @@ -514,7 +582,7 @@ DEFAULT_CACHE_PLUGIN_PATH: DEFAULT_CALLBACK_PLUGIN_PATH: name: Callback Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/callback:/usr/share/ansible/plugins/callback" }}' - description: Colon separated paths in which Ansible will search for Callback Plugins. + description: Colon-separated paths in which Ansible will search for Callback Plugins. env: [{name: ANSIBLE_CALLBACK_PLUGINS}] ini: - {key: callback_plugins, section: defaults} @@ -537,7 +605,7 @@ CALLBACKS_ENABLED: DEFAULT_CLICONF_PLUGIN_PATH: name: Cliconf Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/cliconf:/usr/share/ansible/plugins/cliconf" }}' - description: Colon separated paths in which Ansible will search for Cliconf Plugins. + description: Colon-separated paths in which Ansible will search for Cliconf Plugins. env: [{name: ANSIBLE_CLICONF_PLUGINS}] ini: - {key: cliconf_plugins, section: defaults} @@ -545,7 +613,7 @@ DEFAULT_CLICONF_PLUGIN_PATH: DEFAULT_CONNECTION_PLUGIN_PATH: name: Connection Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/connection:/usr/share/ansible/plugins/connection" }}' - description: Colon separated paths in which Ansible will search for Connection Plugins. + description: Colon-separated paths in which Ansible will search for Connection Plugins. env: [{name: ANSIBLE_CONNECTION_PLUGINS}] ini: - {key: connection_plugins, section: defaults} @@ -556,7 +624,7 @@ DEFAULT_DEBUG: default: False description: - "Toggles debug output in Ansible. This is *very* verbose and can hinder - multiprocessing. Debug output can also include secret information + multiprocessing. Debug output can also include secret information despite no_log settings being enabled, which means debug mode should not be used in production." env: [{name: ANSIBLE_DEBUG}] @@ -567,33 +635,15 @@ DEFAULT_EXECUTABLE: name: Target shell executable default: /bin/sh description: - - "This indicates the command to use to spawn a shell under for Ansible's execution needs on a target. - Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is." + - "This indicates the command to use to spawn a shell under, which is required for Ansible's execution needs on a target. + Users may need to change this in rare instances when shell usage is constrained, but in most cases, it may be left as is." env: [{name: ANSIBLE_EXECUTABLE}] ini: - {key: executable, section: defaults} -DEFAULT_FACT_PATH: - name: local fact path - description: - - "This option allows you to globally configure a custom path for 'local_facts' for the implied :ref:`ansible_collections.ansible.builtin.setup_module` task when using fact gathering." - - "If not set, it will fallback to the default from the ``ansible.builtin.setup`` module: ``/etc/ansible/facts.d``." - - "This does **not** affect user defined tasks that use the ``ansible.builtin.setup`` module." - - The real action being created by the implicit task is currently ``ansible.legacy.gather_facts`` module, which then calls the configured fact modules, - by default this will be ``ansible.builtin.setup`` for POSIX systems but other platforms might have different defaults. - env: [{name: ANSIBLE_FACT_PATH}] - ini: - - {key: fact_path, section: defaults} - type: string - deprecated: - # TODO: when removing set playbook/play.py to default=None - why: the module_defaults keyword is a more generic version and can apply to all calls to the - M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions - version: "2.18" - alternatives: module_defaults DEFAULT_FILTER_PLUGIN_PATH: name: Jinja2 Filter Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/filter:/usr/share/ansible/plugins/filter" }}' - description: Colon separated paths in which Ansible will search for Jinja2 Filter Plugins. + description: Colon-separated paths in which Ansible will search for Jinja2 Filter Plugins. env: [{name: ANSIBLE_FILTER_PLUGINS}] ini: - {key: filter_plugins, section: defaults} @@ -633,39 +683,6 @@ DEFAULT_GATHERING: implicit: "the cache plugin will be ignored and facts will be gathered per play unless 'gather_facts: False' is set." explicit: facts will not be gathered unless directly requested in the play. smart: each new host that has no facts discovered will be scanned, but if the same host is addressed in multiple plays it will not be contacted again in the run. -DEFAULT_GATHER_SUBSET: - name: Gather facts subset - description: - - Set the `gather_subset` option for the :ref:`ansible_collections.ansible.builtin.setup_module` task in the implicit fact gathering. - See the module documentation for specifics. - - "It does **not** apply to user defined ``ansible.builtin.setup`` tasks." - env: [{name: ANSIBLE_GATHER_SUBSET}] - ini: - - key: gather_subset - section: defaults - version_added: "2.1" - type: list - deprecated: - # TODO: when removing set playbook/play.py to default=None - why: the module_defaults keyword is a more generic version and can apply to all calls to the - M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions - version: "2.18" - alternatives: module_defaults -DEFAULT_GATHER_TIMEOUT: - name: Gather facts timeout - description: - - Set the timeout in seconds for the implicit fact gathering, see the module documentation for specifics. - - "It does **not** apply to user defined :ref:`ansible_collections.ansible.builtin.setup_module` tasks." - env: [{name: ANSIBLE_GATHER_TIMEOUT}] - ini: - - {key: gather_timeout, section: defaults} - type: integer - deprecated: - # TODO: when removing set playbook/play.py to default=None - why: the module_defaults keyword is a more generic version and can apply to all calls to the - M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions - version: "2.18" - alternatives: module_defaults DEFAULT_HASH_BEHAVIOUR: name: Hash merge behaviour default: replace @@ -676,10 +693,10 @@ DEFAULT_HASH_BEHAVIOUR: description: - This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible. - This does not affect variables whose values are scalars (integers, strings) or arrays. - - "**WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) non portable, + - "**WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) nonportable, leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it." - We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups - to create merged versions of the individual variables. In our experience this is rarely really needed and a sign that too much + to create merged versions of the individual variables. In our experience, this is rarely needed and is a sign that too much complexity has been introduced into the data structures and plays. - For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars`` that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope, @@ -696,7 +713,7 @@ DEFAULT_HASH_BEHAVIOUR: DEFAULT_HOST_LIST: name: Inventory Source default: /etc/ansible/hosts - description: Comma separated list of Ansible inventory sources + description: Comma-separated list of Ansible inventory sources env: - name: ANSIBLE_INVENTORY expand_relative_paths: True @@ -708,7 +725,7 @@ DEFAULT_HOST_LIST: DEFAULT_HTTPAPI_PLUGIN_PATH: name: HttpApi Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/httpapi:/usr/share/ansible/plugins/httpapi" }}' - description: Colon separated paths in which Ansible will search for HttpApi Plugins. + description: Colon-separated paths in which Ansible will search for HttpApi Plugins. env: [{name: ANSIBLE_HTTPAPI_PLUGINS}] ini: - {key: httpapi_plugins, section: defaults} @@ -724,13 +741,13 @@ DEFAULT_INTERNAL_POLL_INTERVAL: description: - This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values improve performance with large playbooks at the expense of extra CPU load. - Higher values are more suitable for Ansible usage in automation scenarios, + Higher values are more suitable for Ansible usage in automation scenarios when UI responsiveness is not required but CPU usage might be a concern. - "The default corresponds to the value hardcoded in Ansible <= 2.1" DEFAULT_INVENTORY_PLUGIN_PATH: name: Inventory Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/inventory:/usr/share/ansible/plugins/inventory" }}' - description: Colon separated paths in which Ansible will search for Inventory Plugins. + description: Colon-separated paths in which Ansible will search for Inventory Plugins. env: [{name: ANSIBLE_INVENTORY_PLUGINS}] ini: - {key: inventory_plugins, section: defaults} @@ -765,11 +782,10 @@ DEFAULT_KEEP_REMOTE_FILES: - {key: keep_remote_files, section: defaults} type: boolean DEFAULT_LIBVIRT_LXC_NOSECLABEL: - # TODO: move to plugin name: No security label on Lxc default: False description: - - "This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh. + - "This setting causes libvirt to connect to LXC containers by passing ``--noseclabel`` parameter to ``virsh`` command. This is necessary when running on systems which do not have SELinux." env: - name: ANSIBLE_LIBVIRT_LXC_NOSECLABEL @@ -777,6 +793,10 @@ DEFAULT_LIBVIRT_LXC_NOSECLABEL: - {key: libvirt_lxc_noseclabel, section: selinux} type: boolean version_added: "2.1" + deprecated: + why: This option was moved to the plugin itself + version: "2.22" + alternatives: Use the option from the plugin itself. DEFAULT_LOAD_CALLBACK_PLUGINS: name: Load callbacks for adhoc default: False @@ -800,7 +820,9 @@ DEFAULT_LOCAL_TMP: DEFAULT_LOG_PATH: name: Ansible log file path default: ~ - description: File to which Ansible will log on the controller. When empty logging is disabled. + description: + - File to which Ansible will log on the controller. + - When not set the logging is disabled. env: [{name: ANSIBLE_LOG_PATH}] ini: - {key: log_path, section: defaults} @@ -808,14 +830,14 @@ DEFAULT_LOG_PATH: DEFAULT_LOG_FILTER: name: Name filters for python logger default: [] - description: List of logger names to filter out of the log file + description: List of logger names to filter out of the log file. env: [{name: ANSIBLE_LOG_FILTER}] ini: - {key: log_filter, section: defaults} type: list DEFAULT_LOOKUP_PLUGIN_PATH: name: Lookup Plugins Path - description: Colon separated paths in which Ansible will search for Lookup Plugins. + description: Colon-separated paths in which Ansible will search for Lookup Plugins. default: '{{ ANSIBLE_HOME ~ "/plugins/lookup:/usr/share/ansible/plugins/lookup" }}' env: [{name: ANSIBLE_LOOKUP_PLUGINS}] ini: @@ -825,7 +847,7 @@ DEFAULT_LOOKUP_PLUGIN_PATH: DEFAULT_MANAGED_STR: name: Ansible managed default: 'Ansible managed' - description: Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant for those two modules. + description: Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant to those two modules. env: [] ini: - {key: ansible_managed, section: defaults} @@ -845,8 +867,8 @@ DEFAULT_MODULE_COMPRESSION: env: [] ini: - {key: module_compression, section: defaults} -# vars: -# - name: ansible_module_compression + vars: + - name: ansible_module_compression DEFAULT_MODULE_NAME: name: Default adhoc module default: command @@ -856,7 +878,7 @@ DEFAULT_MODULE_NAME: - {key: module_name, section: defaults} DEFAULT_MODULE_PATH: name: Modules Path - description: Colon separated paths in which Ansible will search for Modules. + description: Colon-separated paths in which Ansible will search for Modules. default: '{{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }}' env: [{name: ANSIBLE_LIBRARY}] ini: @@ -864,7 +886,7 @@ DEFAULT_MODULE_PATH: type: pathspec DEFAULT_MODULE_UTILS_PATH: name: Module Utils Path - description: Colon separated paths in which Ansible will search for Module utils files, which are shared by modules. + description: Colon-separated paths in which Ansible will search for Module utils files, which are shared by modules. default: '{{ ANSIBLE_HOME ~ "/plugins/module_utils:/usr/share/ansible/plugins/module_utils" }}' env: [{name: ANSIBLE_MODULE_UTILS}] ini: @@ -873,7 +895,7 @@ DEFAULT_MODULE_UTILS_PATH: DEFAULT_NETCONF_PLUGIN_PATH: name: Netconf Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/netconf:/usr/share/ansible/plugins/netconf" }}' - description: Colon separated paths in which Ansible will search for Netconf Plugins. + description: Colon-separated paths in which Ansible will search for Netconf Plugins. env: [{name: ANSIBLE_NETCONF_PLUGINS}] ini: - {key: netconf_plugins, section: defaults} @@ -890,7 +912,7 @@ DEFAULT_NO_TARGET_SYSLOG: name: No syslog on target default: False description: - - Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer + - Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts, this will disable a newer style PowerShell modules from writing to the event log. env: [{name: ANSIBLE_NO_TARGET_SYSLOG}] ini: @@ -925,7 +947,7 @@ DEFAULT_PRIVATE_KEY_FILE: default: ~ description: - Option for connections using a certificate or key file to authenticate, rather than an agent or passwords, - you can set the default value here to avoid re-specifying --private-key with every invocation. + you can set the default value here to avoid re-specifying ``--private-key`` with every invocation. env: [{name: ANSIBLE_PRIVATE_KEY_FILE}] ini: - {key: private_key_file, section: defaults} @@ -934,9 +956,12 @@ DEFAULT_PRIVATE_ROLE_VARS: name: Private role variables default: False description: - - Makes role variables inaccessible from other roles. - - This was introduced as a way to reset role variables to default values if - a role is used more than once in a playbook. + - By default, imported roles publish their variables to the play and other roles, this setting can avoid that. + - This was introduced as a way to reset role variables to default values if a role is used more than once + in a playbook. + - Starting in version '2.17' M(ansible.builtin.include_roles) and M(ansible.builtin.import_roles) can + individually override this via the C(public) parameter. + - Included roles only make their variables public at execution, unlike imported roles which happen at playbook compile time. env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}] ini: - {key: private_role_vars, section: defaults} @@ -962,7 +987,7 @@ DEFAULT_REMOTE_USER: DEFAULT_ROLES_PATH: name: Roles path default: '{{ ANSIBLE_HOME ~ "/roles:/usr/share/ansible/roles:/etc/ansible/roles" }}' - description: Colon separated paths in which Ansible will search for Roles. + description: Colon-separated paths in which Ansible will search for Roles. env: [{name: ANSIBLE_ROLES_PATH}] expand_relative_paths: True ini: @@ -974,7 +999,7 @@ DEFAULT_SELINUX_SPECIAL_FS: default: fuse, nfs, vboxsf, ramfs, 9p, vfat description: - "Some filesystems do not support safe operations and/or return inconsistent errors, - this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors." + this setting makes Ansible 'tolerate' those in the list without causing fatal errors." - Data corruption may occur and writes are not always verified when a filesystem is in the list. env: - name: ANSIBLE_SELINUX_SPECIAL_FS @@ -993,10 +1018,10 @@ DEFAULT_STDOUT_CALLBACK: ini: - {key: stdout_callback, section: defaults} EDITOR: - name: editor application touse + name: editor application to use default: vi - descrioption: - - for the cases in which Ansible needs to return a file within an editor, this chooses the application to use + description: + - for the cases in which Ansible needs to return a file within an editor, this chooses the application to use. ini: - section: defaults key: editor @@ -1023,7 +1048,7 @@ TASK_DEBUGGER_IGNORE_ERRORS: description: - This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True is specified. - - True specifies that the debugger will honor ignore_errors, False will not honor ignore_errors. + - True specifies that the debugger will honor ignore_errors, and False will not honor ignore_errors. type: boolean env: [{name: ANSIBLE_TASK_DEBUGGER_IGNORE_ERRORS}] ini: @@ -1039,7 +1064,7 @@ DEFAULT_STRATEGY: version_added: "2.3" DEFAULT_STRATEGY_PLUGIN_PATH: name: Strategy Plugins Path - description: Colon separated paths in which Ansible will search for Strategy Plugins. + description: Colon-separated paths in which Ansible will search for Strategy Plugins. default: '{{ ANSIBLE_HOME ~ "/plugins/strategy:/usr/share/ansible/plugins/strategy" }}' env: [{name: ANSIBLE_STRATEGY_PLUGINS}] ini: @@ -1056,21 +1081,21 @@ DEFAULT_SU: DEFAULT_SYSLOG_FACILITY: name: syslog facility default: LOG_USER - description: Syslog facility to use when Ansible logs to the remote target + description: Syslog facility to use when Ansible logs to the remote target. env: [{name: ANSIBLE_SYSLOG_FACILITY}] ini: - {key: syslog_facility, section: defaults} DEFAULT_TERMINAL_PLUGIN_PATH: name: Terminal Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/terminal:/usr/share/ansible/plugins/terminal" }}' - description: Colon separated paths in which Ansible will search for Terminal Plugins. + description: Colon-separated paths in which Ansible will search for Terminal Plugins. env: [{name: ANSIBLE_TERMINAL_PLUGINS}] ini: - {key: terminal_plugins, section: defaults} type: pathspec DEFAULT_TEST_PLUGIN_PATH: name: Jinja2 Test Plugins Path - description: Colon separated paths in which Ansible will search for Jinja2 Test Plugins. + description: Colon-separated paths in which Ansible will search for Jinja2 Test Plugins. default: '{{ ANSIBLE_HOME ~ "/plugins/test:/usr/share/ansible/plugins/test" }}' env: [{name: ANSIBLE_TEST_PLUGINS}] ini: @@ -1107,7 +1132,7 @@ DEFAULT_UNDEFINED_VAR_BEHAVIOR: DEFAULT_VARS_PLUGIN_PATH: name: Vars Plugins Path default: '{{ ANSIBLE_HOME ~ "/plugins/vars:/usr/share/ansible/plugins/vars" }}' - description: Colon separated paths in which Ansible will search for Vars Plugins. + description: Colon-separated paths in which Ansible will search for Vars Plugins. env: [{name: ANSIBLE_VARS_PLUGINS}] ini: - {key: vars_plugins, section: defaults} @@ -1124,7 +1149,7 @@ DEFAULT_VARS_PLUGIN_PATH: DEFAULT_VAULT_ID_MATCH: name: Force vault id match default: False - description: 'If true, decrypting vaults with a vault id will only try the password from the matching vault-id' + description: 'If true, decrypting vaults with a vault id will only try the password from the matching vault-id.' env: [{name: ANSIBLE_VAULT_ID_MATCH}] ini: - {key: vault_id_match, section: defaults} @@ -1132,7 +1157,7 @@ DEFAULT_VAULT_ID_MATCH: DEFAULT_VAULT_IDENTITY: name: Vault id label default: default - description: 'The label to use for the default vault id label in cases where a vault id label is not provided' + description: 'The label to use for the default vault id label in cases where a vault id label is not provided.' env: [{name: ANSIBLE_VAULT_IDENTITY}] ini: - {key: vault_identity, section: defaults} @@ -1147,7 +1172,7 @@ VAULT_ENCRYPT_SALT: version_added: '2.15' DEFAULT_VAULT_ENCRYPT_IDENTITY: name: Vault id to use for encryption - description: 'The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.' + description: 'The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The ``--encrypt-vault-id`` CLI option overrides the configured value.' env: [{name: ANSIBLE_VAULT_ENCRYPT_IDENTITY}] ini: - {key: vault_encrypt_identity, section: defaults} @@ -1155,7 +1180,7 @@ DEFAULT_VAULT_ENCRYPT_IDENTITY: DEFAULT_VAULT_IDENTITY_LIST: name: Default vault ids default: [] - description: 'A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.' + description: 'A list of vault-ids to use by default. Equivalent to multiple ``--vault-id`` args. Vault-ids are tried in order.' env: [{name: ANSIBLE_VAULT_IDENTITY_LIST}] ini: - {key: vault_identity_list, section: defaults} @@ -1165,7 +1190,7 @@ DEFAULT_VAULT_PASSWORD_FILE: name: Vault password file default: ~ description: - - 'The vault password file to use. Equivalent to --vault-password-file or --vault-id' + - 'The vault password file to use. Equivalent to ``--vault-password-file`` or ``--vault-id``.' - If executable, it will be run and the resulting stdout will be used as the password. env: [{name: ANSIBLE_VAULT_PASSWORD_FILE}] ini: @@ -1191,7 +1216,7 @@ DEPRECATION_WARNINGS: DEVEL_WARNING: name: Running devel warning default: True - description: Toggle to control showing warnings related to running devel + description: Toggle to control showing warnings related to running devel. env: [{name: ANSIBLE_DEVEL_WARNING}] ini: - {key: devel_warning, section: defaults} @@ -1207,7 +1232,7 @@ DIFF_ALWAYS: DIFF_CONTEXT: name: Difference context default: 3 - description: How many lines of context to show when displaying the differences between files. + description: Number of lines of context to show when displaying the differences between files. env: [{name: ANSIBLE_DIFF_CONTEXT}] ini: - {key: context, section: diff} @@ -1225,8 +1250,8 @@ DISPLAY_ARGS_TO_STDOUT: you do not want those to be printed." - "If you set this to True you should be sure that you have secured your environment's stdout (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or - made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensitive values - See How do I keep secret data in my playbook? for more information." + made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks that have sensitive values + :ref:`keep_secret_data` for more information." env: [{name: ANSIBLE_DISPLAY_ARGS_TO_STDOUT}] ini: - {key: display_args_to_stdout, section: defaults} @@ -1235,7 +1260,7 @@ DISPLAY_ARGS_TO_STDOUT: DISPLAY_SKIPPED_HOSTS: name: Show skipped results default: True - description: "Toggle to control displaying skipped task/host entries in a task in the default callback" + description: "Toggle to control displaying skipped task/host entries in a task in the default callback." env: - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS ini: @@ -1245,7 +1270,7 @@ DOCSITE_ROOT_URL: name: Root docsite URL default: https://docs.ansible.com/ansible-core/ description: Root docsite URL used to generate docs URLs in warning/error text; - must be an absolute URL with valid scheme and trailing slash. + must be an absolute URL with a valid scheme and trailing slash. ini: - {key: docsite_root_url, section: defaults} version_added: "2.8" @@ -1253,7 +1278,7 @@ DUPLICATE_YAML_DICT_KEY: name: Controls ansible behaviour when finding duplicate keys in YAML. default: warn description: - - By default Ansible will issue a warning when a duplicate dict key is encountered in YAML. + - By default, Ansible will issue a warning when a duplicate dict key is encountered in YAML. - These warnings can be silenced by adjusting this setting to False. env: [{name: ANSIBLE_DUPLICATE_YAML_DICT_KEY}] ini: @@ -1345,7 +1370,7 @@ GALAXY_ROLE_SKELETON: GALAXY_ROLE_SKELETON_IGNORE: name: Galaxy role skeleton ignore default: ["^.git$", "^.*/.git_keep$"] - description: patterns of files to ignore inside a Galaxy role or collection skeleton directory + description: patterns of files to ignore inside a Galaxy role or collection skeleton directory. env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE}] ini: - {key: role_skeleton_ignore, section: galaxy} @@ -1360,14 +1385,14 @@ GALAXY_COLLECTION_SKELETON: GALAXY_COLLECTION_SKELETON_IGNORE: name: Galaxy collection skeleton ignore default: ["^.git$", "^.*/.git_keep$"] - description: patterns of files to ignore inside a Galaxy collection skeleton directory + description: patterns of files to ignore inside a Galaxy collection skeleton directory. env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON_IGNORE}] ini: - {key: collection_skeleton_ignore, section: galaxy} type: list GALAXY_COLLECTIONS_PATH_WARNING: - name: "ansible-galaxy collection install colections path warnings" - description: "whether ``ansible-galaxy collection install`` should warn about ``--collections-path`` missing from configured :ref:`collections_paths`" + name: "ansible-galaxy collection install collections path warnings" + description: "whether ``ansible-galaxy collection install`` should warn about ``--collections-path`` missing from configured :ref:`collections_paths`." default: true type: bool env: [{name: ANSIBLE_GALAXY_COLLECTIONS_PATH_WARNING}] @@ -1395,7 +1420,7 @@ GALAXY_SERVER_LIST: - A list of Galaxy servers to use when installing a collection. - The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details. - 'See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.' - - The order of servers in this list is used to as the order in which a collection is resolved. + - The order of servers in this list is used as the order in which a collection is resolved. - Setting this config option will ignore the :ref:`galaxy_server` config option. env: [{name: ANSIBLE_GALAXY_SERVER_LIST}] ini: @@ -1497,12 +1522,31 @@ GALAXY_REQUIRED_VALID_SIGNATURE_COUNT: - The number of signatures that must be successful during GPG signature verification while installing or verifying collections. - This should be a positive integer or all to indicate all signatures must successfully validate the collection. - Prepend + to the value to fail if no valid signatures are found for the collection. +GALAXY_COLLECTION_IMPORT_POLL_INTERVAL: + description: + - The initial interval in seconds for polling the import status of a collection. + - This interval increases exponentially based on the :ref:`galaxy_collection_import_poll_factor`, with a maximum delay of 30 seconds. + type: float + default: 2.0 + env: + - name: ANSIBLE_GALAXY_COLLECTION_IMPORT_POLL_INTERVAL + version_added: '2.18' +GALAXY_COLLECTION_IMPORT_POLL_FACTOR: + description: + - The multiplier used to increase the :ref:`galaxy_collection_import_poll_interval` when checking the collection import status. + type: float + default: 1.5 + env: + - name: ANSIBLE_GALAXY_COLLECTION_IMPORT_POLL_FACTOR + version_added: "2.18" HOST_KEY_CHECKING: - # note: constant not in use by ssh plugin anymore + # NOTE: constant not in use by ssh/paramiko plugins anymore, but they do support the same configuration sources # TODO: check non ssh connection plugins for use/migration - name: Check host keys + name: Toggle host/key check default: True - description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host' + description: + - Set this to "False" if you want to avoid host key checking by the underlying connection plugin Ansible uses to connect to the host. + - Please read the documentation of the specific connection plugin used for details. env: [{name: ANSIBLE_HOST_KEY_CHECKING}] ini: - {key: host_key_checking, section: defaults} @@ -1510,7 +1554,7 @@ HOST_KEY_CHECKING: HOST_PATTERN_MISMATCH: name: Control host pattern mismatch behaviour default: 'warning' - description: This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it + description: This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it. env: [{name: ANSIBLE_HOST_PATTERN_MISMATCH}] ini: - {key: host_pattern_mismatch, section: inventory} @@ -1533,41 +1577,28 @@ INTERPRETER_PYTHON: falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or - ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility + ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backward-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present. _INTERPRETER_PYTHON_DISTRO_MAP: name: Mapping of known included platform pythons for various Linux distros default: - redhat: - '6': /usr/bin/python - '8': /usr/libexec/platform-python - '9': /usr/bin/python3 - debian: - '8': /usr/bin/python - '10': /usr/bin/python3 - fedora: - '23': /usr/bin/python3 - ubuntu: - '14': /usr/bin/python - '16': /usr/bin/python3 + # Entry only for testing + ansible test: + '99': /usr/bin/python99 version_added: "2.8" # FUTURE: add inventory override once we're sure it can't be abused by a rogue target # FUTURE: add a platform layer to the map so we could use for, eg, freebsd/macos/etc? INTERPRETER_PYTHON_FALLBACK: name: Ordered list of Python interpreters to check for in discovery default: + - python3.13 - python3.12 - python3.11 - python3.10 - python3.9 - python3.8 - - python3.7 - - python3.6 - /usr/bin/python3 - - /usr/libexec/platform-python - - python2.7 - - /usr/bin/python - - python + - python3 vars: - name: ansible_interpreter_python_fallback type: list @@ -1590,7 +1621,7 @@ TRANSFORM_INVALID_GROUP_CHARS: INVALID_TASK_ATTRIBUTE_FAILED: name: Controls whether invalid attributes for a task result in errors instead of warnings default: True - description: If 'false', invalid attributes for a task will result in warnings instead of errors + description: If 'false', invalid attributes for a task will result in warnings instead of errors. type: boolean env: - name: ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED @@ -1610,60 +1641,6 @@ INVENTORY_ANY_UNPARSED_IS_FAILED: ini: - {key: any_unparsed_is_failed, section: inventory} version_added: "2.7" -INVENTORY_CACHE_ENABLED: - name: Inventory caching enabled - default: False - description: - - Toggle to turn on inventory caching. - - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. - - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration. - - This message will be removed in 2.16. - env: [{name: ANSIBLE_INVENTORY_CACHE}] - ini: - - {key: cache, section: inventory} - type: bool -INVENTORY_CACHE_PLUGIN: - name: Inventory cache plugin - description: - - The plugin for caching inventory. - - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. - - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration. - - This message will be removed in 2.16. - env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN}] - ini: - - {key: cache_plugin, section: inventory} -INVENTORY_CACHE_PLUGIN_CONNECTION: - name: Inventory cache plugin URI to override the defaults section - description: - - The inventory cache connection. - - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. - - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration. - - This message will be removed in 2.16. - env: [{name: ANSIBLE_INVENTORY_CACHE_CONNECTION}] - ini: - - {key: cache_connection, section: inventory} -INVENTORY_CACHE_PLUGIN_PREFIX: - name: Inventory cache plugin table prefix - description: - - The table prefix for the cache plugin. - - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. - - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration. - - This message will be removed in 2.16. - env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX}] - default: ansible_inventory_ - ini: - - {key: cache_prefix, section: inventory} -INVENTORY_CACHE_TIMEOUT: - name: Inventory cache plugin expiration timeout - description: - - Expiration timeout for the inventory cache plugin data. - - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. - - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration. - - This message will be removed in 2.16. - default: 3600 - env: [{name: ANSIBLE_INVENTORY_CACHE_TIMEOUT}] - ini: - - {key: cache_timeout, section: inventory} INVENTORY_ENABLED: name: Active Inventory plugins default: ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml'] @@ -1682,8 +1659,8 @@ INVENTORY_EXPORT: type: bool INVENTORY_IGNORE_EXTS: name: Inventory ignore extensions - default: "{{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}" - description: List of extensions to ignore when using a directory as an inventory source + default: "{{(REJECT_EXTS + ('.orig', '.cfg', '.retry'))}}" + description: List of extensions to ignore when using a directory as an inventory source. env: [{name: ANSIBLE_INVENTORY_IGNORE}] ini: - {key: inventory_ignore_extensions, section: defaults} @@ -1692,7 +1669,7 @@ INVENTORY_IGNORE_EXTS: INVENTORY_IGNORE_PATTERNS: name: Inventory ignore patterns default: [] - description: List of patterns to ignore when using a directory as an inventory source + description: List of patterns to ignore when using a directory as an inventory source. env: [{name: ANSIBLE_INVENTORY_IGNORE_REGEX}] ini: - {key: inventory_ignore_patterns, section: defaults} @@ -1703,29 +1680,16 @@ INVENTORY_UNPARSED_IS_FAILED: default: False description: > If 'true' it is a fatal error if every single potential inventory - source fails to parse, otherwise this situation will only attract a + source fails to parse, otherwise, this situation will only attract a warning. env: [{name: ANSIBLE_INVENTORY_UNPARSED_FAILED}] ini: - {key: unparsed_is_failed, section: inventory} type: bool -JINJA2_NATIVE_WARNING: - name: Running older than required Jinja version for jinja2_native warning - default: True - description: Toggle to control showing warnings related to running a Jinja version - older than required for jinja2_native - env: - - name: ANSIBLE_JINJA2_NATIVE_WARNING - deprecated: - why: This option is no longer used in the Ansible Core code base. - version: "2.17" - ini: - - {key: jinja2_native_warning, section: defaults} - type: boolean MAX_FILE_SIZE_FOR_DIFF: name: Diff maximum file size default: 104448 - description: Maximum size of files to be considered for diff display + description: Maximum size of files to be considered for diff display. env: [{name: ANSIBLE_MAX_DIFF_SIZE}] ini: - {key: max_diff_size, section: defaults} @@ -1744,7 +1708,7 @@ INJECT_FACTS_AS_VARS: default: True description: - Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace. - - Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix. + - Unlike inside the `ansible_facts` dictionary where the prefix `ansible_` is removed from fact names, these will have the exact names that are returned by the module. env: [{name: ANSIBLE_INJECT_FACT_VARS}] ini: - {key: inject_facts_as_vars, section: defaults} @@ -1754,8 +1718,8 @@ MODULE_IGNORE_EXTS: name: Module ignore extensions default: "{{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}" description: - - List of extensions to ignore when looking for modules to load - - This is for rejecting script and binary module fallback extensions + - List of extensions to ignore when looking for modules to load. + - This is for rejecting script and binary module fallback extensions. env: [{name: ANSIBLE_MODULE_IGNORE_EXTS}] ini: - {key: module_ignore_exts, section: defaults} @@ -1763,16 +1727,16 @@ MODULE_IGNORE_EXTS: MODULE_STRICT_UTF8_RESPONSE: name: Module strict UTF-8 response description: - - Enables whether module responses are evaluated for containing non UTF-8 data - - Disabling this may result in unexpected behavior - - Only ansible-core should evaluate this configuration + - Enables whether module responses are evaluated for containing non-UTF-8 data. + - Disabling this may result in unexpected behavior. + - Only ansible-core should evaluate this configuration. env: [{name: ANSIBLE_MODULE_STRICT_UTF8_RESPONSE}] ini: - {key: module_strict_utf8_response, section: defaults} type: bool default: True OLD_PLUGIN_CACHE_CLEARING: - description: Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviours in which a plugin loaded in previous plays would be unexpectedly 'sticky'. This setting allows to return to that behaviour. + description: Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviors in which a plugin loaded in previous plays would be unexpectedly 'sticky'. This setting allows the user to return to that behavior. env: [{name: ANSIBLE_OLD_PLUGIN_CACHE_CLEAR}] ini: - {key: old_plugin_cache_clear, section: defaults} @@ -1782,8 +1746,8 @@ OLD_PLUGIN_CACHE_CLEARING: PAGER: name: pager application to use default: less - descrioption: - - for the cases in which Ansible needs to return output in pageable fashion, this chooses the application to use + description: + - for the cases in which Ansible needs to return output in a pageable fashion, this chooses the application to use. ini: - section: defaults key: pager @@ -1793,13 +1757,16 @@ PAGER: version_added: '2.15' - name: PAGER PARAMIKO_HOST_KEY_AUTO_ADD: - # TODO: move to plugin default: False description: 'TODO: write it' env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}] ini: - {key: host_key_auto_add, section: paramiko_connection} type: boolean + deprecated: + why: This option was moved to the plugin itself + version: "2.20" + alternatives: Use the option from the plugin itself. PARAMIKO_LOOK_FOR_KEYS: name: look for keys default: True @@ -1808,10 +1775,14 @@ PARAMIKO_LOOK_FOR_KEYS: ini: - {key: look_for_keys, section: paramiko_connection} type: boolean + deprecated: + why: This option was moved to the plugin itself + version: "2.20" + alternatives: Use the option from the plugin itself. PERSISTENT_CONTROL_PATH_DIR: name: Persistence socket path default: '{{ ANSIBLE_HOME ~ "/pc" }}' - description: Path to socket to be used by the connection persistence system. + description: Path to the socket to be used by the connection persistence system. env: [{name: ANSIBLE_PERSISTENT_CONTROL_PATH_DIR}] ini: - {key: control_path_dir, section: persistent_connection} @@ -1835,7 +1806,7 @@ PERSISTENT_CONNECT_RETRY_TIMEOUT: PERSISTENT_COMMAND_TIMEOUT: name: Persistence command timeout default: 30 - description: This controls the amount of time to wait for response from remote device before timing out persistent connection. + description: This controls the amount of time to wait for a response from a remote device before timing out a persistent connection. env: [{name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT}] ini: - {key: command_timeout, section: persistent_connection} @@ -1853,7 +1824,7 @@ PLAYBOOK_VARS_ROOT: default: top version_added: "2.4.1" description: - - This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars + - This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars. env: [{name: ANSIBLE_PLAYBOOK_VARS_ROOT}] ini: - {key: playbook_vars_root, section: defaults} @@ -1909,7 +1880,7 @@ RUN_VARS_PLUGINS: name: When should vars plugins run relative to inventory default: demand description: - - This setting can be used to optimize vars_plugin usage depending on user's inventory size and play selection. + - This setting can be used to optimize vars_plugin usage depending on the user's inventory size and play selection. env: [{name: ANSIBLE_RUN_VARS_PLUGINS}] ini: - {key: run_vars_plugins, section: defaults} @@ -1921,7 +1892,7 @@ RUN_VARS_PLUGINS: SHOW_CUSTOM_STATS: name: Display custom stats default: False - description: 'This adds the custom stats set via the set_stats plugin to the default output' + description: 'This adds the custom stats set via the set_stats plugin to the default output.' env: [{name: ANSIBLE_SHOW_CUSTOM_STATS}] ini: - {key: show_custom_stats, section: defaults} @@ -1930,7 +1901,7 @@ STRING_TYPE_FILTERS: name: Filters to preserve strings default: [string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json] description: - - "This list of filters avoids 'type conversion' when templating variables" + - "This list of filters avoids 'type conversion' when templating variables." - Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example. env: [{name: ANSIBLE_STRING_TYPE_FILTERS}] ini: @@ -1940,8 +1911,8 @@ SYSTEM_WARNINGS: name: System warnings default: True description: - - Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts) - - These may include warnings about 3rd party packages or other conditions that should be resolved if possible. + - Allows disabling of warnings related to potential issues on the system running Ansible itself (not on the managed hosts). + - These may include warnings about third-party packages or other conditions that should be resolved if possible. env: [{name: ANSIBLE_SYSTEM_WARNINGS}] ini: - {key: system_warnings, section: defaults} @@ -1964,11 +1935,24 @@ TAGS_SKIP: ini: - {key: skip, section: tags} version_added: "2.5" +TARGET_LOG_INFO: + name: Target log info + description: A string to insert into target logging for tracking purposes + env: [{name: ANSIBLE_TARGET_LOG_INFO}] + ini: + - {key: target_log_info, section: defaults} + vars: + - name: ansible_target_log_info + version_added: "2.17" TASK_TIMEOUT: name: Task Timeout default: 0 description: - - Set the maximum time (in seconds) that a task can run for. + - Set the maximum time (in seconds) for a task action to execute in. + - Timeout runs independently from templating or looping. + It applies per each attempt of executing the task's action and remains unchanged by the total time spent on a task. + - When the action execution exceeds the timeout, Ansible interrupts the process. + This is registered as a failure due to outside circumstances, not a task failure, to receive appropriate response and recovery process. - If set to 0 (the default) there is no timeout. env: [{name: ANSIBLE_TASK_TIMEOUT}] ini: @@ -2058,21 +2042,6 @@ NETCONF_SSH_CONFIG: - {key: ssh_config, section: netconf_connection} yaml: {key: netconf_connection.ssh_config} default: null -STRING_CONVERSION_ACTION: - version_added: '2.8' - description: - - Action to take when a module parameter value is converted to a string (this does not affect variables). - For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc. - will be converted by the YAML parser unless fully quoted. - - Valid options are 'error', 'warn', and 'ignore'. - - Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12. - default: 'warn' - env: - - name: ANSIBLE_STRING_CONVERSION_ACTION - ini: - - section: defaults - key: string_conversion_action - type: string VALIDATE_ACTION_GROUP_METADATA: version_added: '2.12' description: @@ -2095,4 +2064,35 @@ VERBOSE_TO_STDERR: - section: defaults key: verbose_to_stderr type: bool -... +_Z_TEST_ENTRY: + name: testentry + description: for tests + env: + - name: ANSIBLE_TEST_ENTRY + - name: ANSIBLE_TEST_ENTRY_D + deprecated: + why: for testing + version: '3.30' + alternatives: nothing + ini: + - section: testing + key: valid + - section: testing + key: deprecated + deprecated: + why: for testing + version: '3.30' + alternatives: nothing +_Z_TEST_ENTRY_2: + version_added: '2.18' + name: testentry + description: for tests + deprecated: + why: for testing + version: '3.30' + alternatives: nothing + env: + - name: ANSIBLE_TEST_ENTRY2 + ini: + - section: testing + key: valid2 diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py index 418528aefa6..818219b1304 100644 --- a/lib/ansible/config/manager.py +++ b/lib/ansible/config/manager.py @@ -1,10 +1,10 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import atexit +import decimal import configparser import os import os.path @@ -16,25 +16,44 @@ from collections import namedtuple from collections.abc import Mapping, Sequence from jinja2.nativetypes import NativeEnvironment -from ansible.errors import AnsibleOptionsError, AnsibleError +from ansible.errors import AnsibleOptionsError, AnsibleError, AnsibleRequiredOptionError +from ansible.module_utils.common.sentinel import Sentinel from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native from ansible.module_utils.common.yaml import yaml_load from ansible.module_utils.six import string_types from ansible.module_utils.parsing.convert_bool import boolean from ansible.parsing.quoting import unquote from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode -from ansible.utils import py3compat from ansible.utils.path import cleanup_tmp_file, makedirs_safe, unfrackpath -Plugin = namedtuple('Plugin', 'name type') Setting = namedtuple('Setting', 'name value origin type') INTERNAL_DEFS = {'lookup': ('_terms',)} +GALAXY_SERVER_DEF = [ + ('url', True, 'str'), + ('username', False, 'str'), + ('password', False, 'str'), + ('token', False, 'str'), + ('auth_url', False, 'str'), + ('api_version', False, 'int'), + ('validate_certs', False, 'bool'), + ('client_id', False, 'str'), + ('timeout', False, 'int'), +] + +# config definition fields +GALAXY_SERVER_ADDITIONAL = { + 'api_version': {'default': None, 'choices': [2, 3]}, + 'validate_certs': {'cli': [{'name': 'validate_certs'}]}, + 'timeout': {'cli': [{'name': 'timeout'}]}, + 'token': {'default': None}, +} + def _get_entry(plugin_type, plugin_name, config): - ''' construct entry for requested config ''' + """ construct entry for requested config """ entry = '' if plugin_type: entry += 'plugin_type: %s ' % plugin_type @@ -45,8 +64,8 @@ def _get_entry(plugin_type, plugin_name, config): # FIXME: see if we can unify in module_utils with similar function used by argspec -def ensure_type(value, value_type, origin=None): - ''' return a configuration variable with casting +def ensure_type(value, value_type, origin=None, origin_ftype=None): + """ return a configuration variable with casting :arg value: The value to ensure correct typing of :kwarg value_type: The type of the value. This can be any of the following strings: :boolean: sets the value to a True or False value @@ -69,7 +88,7 @@ def ensure_type(value, value_type, origin=None): tildes's in the value. :str: Sets the value to string types. :string: Same as 'str' - ''' + """ errmsg = '' basedir = None @@ -84,10 +103,18 @@ def ensure_type(value, value_type, origin=None): value = boolean(value, strict=False) elif value_type in ('integer', 'int'): - value = int(value) + if not isinstance(value, int): + try: + if (decimal_value := decimal.Decimal(value)) == (int_part := int(decimal_value)): + value = int_part + else: + errmsg = 'int' + except decimal.DecimalException as e: + raise ValueError from e elif value_type == 'float': - value = float(value) + if not isinstance(value, float): + value = float(value) elif value_type == 'list': if isinstance(value, string_types): @@ -144,7 +171,7 @@ def ensure_type(value, value_type, origin=None): elif value_type in ('str', 'string'): if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode, bool, int, float, complex)): value = to_text(value, errors='surrogate_or_strict') - if origin == 'ini': + if origin_ftype and origin_ftype == 'ini': value = unquote(value) else: errmsg = 'string' @@ -152,18 +179,18 @@ def ensure_type(value, value_type, origin=None): # defaults to string type elif isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)): value = to_text(value, errors='surrogate_or_strict') - if origin == 'ini': + if origin_ftype and origin_ftype == 'ini': value = unquote(value) if errmsg: - raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value))) + raise ValueError(f'Invalid type provided for "{errmsg}": {value!r}') return to_text(value, errors='surrogate_or_strict', nonstring='passthru') # FIXME: see if this can live in utils/path def resolve_path(path, basedir=None): - ''' resolve relative or 'variable' paths ''' + """ resolve relative or 'variable' paths """ if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}} path = path.replace('{{CWD}}', os.getcwd()) @@ -188,7 +215,7 @@ def get_config_type(cfile): # FIXME: can move to module_utils for use for ini plugins also? def get_ini_config_value(p, entry): - ''' returns the value of last ini entry found ''' + """ returns the value of last ini entry found """ value = None if p is not None: try: @@ -199,22 +226,20 @@ def get_ini_config_value(p, entry): def find_ini_config_file(warnings=None): - ''' Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' + """ Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible """ # FIXME: eventually deprecate ini configs if warnings is None: # Note: In this case, warnings does nothing warnings = set() - # A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later - # We can't use None because we could set path to None. - SENTINEL = object - potential_paths = [] + # A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later + # We can't use None because we could set path to None. # Environment setting - path_from_env = os.getenv("ANSIBLE_CONFIG", SENTINEL) - if path_from_env is not SENTINEL: + path_from_env = os.getenv("ANSIBLE_CONFIG", Sentinel) + if path_from_env is not Sentinel: path_from_env = unfrackpath(path_from_env, follow=False) if os.path.isdir(to_bytes(path_from_env)): path_from_env = os.path.join(path_from_env, "ansible.cfg") @@ -264,7 +289,7 @@ def find_ini_config_file(warnings=None): def _add_base_defs_deprecations(base_defs): - '''Add deprecation source 'ansible.builtin' to deprecations in base.yml''' + """Add deprecation source 'ansible.builtin' to deprecations in base.yml""" def process(entry): if 'deprecated' in entry: entry['deprecated']['collection_name'] = 'ansible.builtin' @@ -305,6 +330,53 @@ class ConfigManager(object): # ensure we always have config def entry self._base_defs['CONFIG_FILE'] = {'default': None, 'type': 'path'} + def load_galaxy_server_defs(self, server_list): + + def server_config_def(section, key, required, option_type): + config_def = { + 'description': 'The %s of the %s Galaxy server' % (key, section), + 'ini': [ + { + 'section': 'galaxy_server.%s' % section, + 'key': key, + } + ], + 'env': [ + {'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())}, + ], + 'required': required, + 'type': option_type, + } + if key in GALAXY_SERVER_ADDITIONAL: + config_def.update(GALAXY_SERVER_ADDITIONAL[key]) + # ensure we always have a default timeout + if key == 'timeout' and 'default' not in config_def: + config_def['default'] = self.get_config_value('GALAXY_SERVER_TIMEOUT') + + return config_def + + if server_list: + for server_key in server_list: + if not server_key: + # To filter out empty strings or non truthy values as an empty server list env var is equal to ['']. + continue + + # Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the + # section [galaxy_server.] for the values url, username, password, and token. + defs = dict((k, server_config_def(server_key, k, req, value_type)) for k, req, value_type in GALAXY_SERVER_DEF) + self.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs) + + def template_default(self, value, variables): + if isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')) and variables is not None: + # template default values if possible + # NOTE: cannot use is_template due to circular dep + try: + t = NativeEnvironment().from_string(value) + value = t.render(variables) + except Exception: + pass # not templatable + return value + def _read_config_yaml_file(self, yml_file): # TODO: handle relative paths as relative to the directory containing the current playbook instead of CWD # Currently this is only used with absolute paths to the `ansible/config` directory @@ -316,7 +388,7 @@ class ConfigManager(object): "Missing base YAML definition file (bad install?): %s" % to_native(yml_file)) def _parse_config_file(self, cfile=None): - ''' return flat configuration settings from file(s) ''' + """ return flat configuration settings from file(s) """ # TODO: take list of files with merge/nomerge if cfile is None: @@ -343,13 +415,13 @@ class ConfigManager(object): raise AnsibleOptionsError("Unsupported configuration file type: %s" % to_native(ftype)) def _find_yaml_config_files(self): - ''' Load YAML Config Files in order, check merge flags, keep origin of settings''' + """ Load YAML Config Files in order, check merge flags, keep origin of settings""" pass def get_plugin_options(self, plugin_type, name, keys=None, variables=None, direct=None): options = {} - defs = self.get_configuration_definitions(plugin_type, name) + defs = self.get_configuration_definitions(plugin_type=plugin_type, name=name) for option in defs: options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables, direct=direct) @@ -358,7 +430,7 @@ class ConfigManager(object): def get_plugin_vars(self, plugin_type, name): pvars = [] - for pdef in self.get_configuration_definitions(plugin_type, name).values(): + for pdef in self.get_configuration_definitions(plugin_type=plugin_type, name=name).values(): if 'vars' in pdef and pdef['vars']: for var_entry in pdef['vars']: pvars.append(var_entry['name']) @@ -367,7 +439,7 @@ class ConfigManager(object): def get_plugin_options_from_var(self, plugin_type, name, variable): options = [] - for option_name, pdef in self.get_configuration_definitions(plugin_type, name).items(): + for option_name, pdef in self.get_configuration_definitions(plugin_type=plugin_type, name=name).items(): if 'vars' in pdef and pdef['vars']: for var_entry in pdef['vars']: if variable == var_entry['name']: @@ -395,7 +467,7 @@ class ConfigManager(object): return has def get_configuration_definitions(self, plugin_type=None, name=None, ignore_private=False): - ''' just list the possible settings, either base or for specific plugins or plugin ''' + """ just list the possible settings, either base or for specific plugins or plugin """ ret = {} if plugin_type is None: @@ -409,11 +481,10 @@ class ConfigManager(object): for cdef in list(ret.keys()): if cdef.startswith('_'): del ret[cdef] - return ret def _loop_entries(self, container, entry_list): - ''' repeat code for value entry assignment ''' + """ repeat code for value entry assignment """ value = None origin = None @@ -439,7 +510,7 @@ class ConfigManager(object): return value, origin def get_config_value(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None): - ''' wrapper ''' + """ wrapper """ try: value, _drop = self.get_config_value_and_origin(config, cfile=cfile, plugin_type=plugin_type, plugin_name=plugin_name, @@ -451,7 +522,7 @@ class ConfigManager(object): return value def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None): - ''' Given a config key figure out the actual value and report on the origin of the settings ''' + """ Given a config key figure out the actual value and report on the origin of the settings """ if cfile is None: # use default config cfile = self._config_file @@ -462,8 +533,9 @@ class ConfigManager(object): # Note: sources that are lists listed in low to high precedence (last one wins) value = None origin = None + origin_ftype = None - defs = self.get_configuration_definitions(plugin_type, plugin_name) + defs = self.get_configuration_definitions(plugin_type=plugin_type, name=plugin_name) if config in defs: aliases = defs[config].get('aliases', []) @@ -514,58 +586,58 @@ class ConfigManager(object): # env vars are next precedence if value is None and defs[config].get('env'): - value, origin = self._loop_entries(py3compat.environ, defs[config]['env']) + value, origin = self._loop_entries(os.environ, defs[config]['env']) origin = 'env: %s' % origin # try config file entries next, if we have one if self._parsers.get(cfile, None) is None: self._parse_config_file(cfile) + # attempt to read from config file if value is None and cfile is not None: ftype = get_config_type(cfile) if ftype and defs[config].get(ftype): - if ftype == 'ini': - # load from ini config - try: # FIXME: generalize _loop_entries to allow for files also, most of this code is dupe - for ini_entry in defs[config]['ini']: - temp_value = get_ini_config_value(self._parsers[cfile], ini_entry) - if temp_value is not None: - value = temp_value - origin = cfile - if 'deprecated' in ini_entry: - self.DEPRECATED.append(('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated'])) - except Exception as e: - sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e))) - elif ftype == 'yaml': - # FIXME: implement, also , break down key from defs (. notation???) - origin = cfile + try: + for entry in defs[config][ftype]: + # load from config + if ftype == 'ini': + temp_value = get_ini_config_value(self._parsers[cfile], entry) + elif ftype == 'yaml': + raise AnsibleError('YAML configuration type has not been implemented yet') + else: + raise AnsibleError('Invalid configuration file type: %s' % ftype) + + if temp_value is not None: + # set value and origin + value = temp_value + origin = cfile + origin_ftype = ftype + if 'deprecated' in entry: + if ftype == 'ini': + self.DEPRECATED.append(('[%s]%s' % (entry['section'], entry['key']), entry['deprecated'])) + else: + raise AnsibleError('Unimplemented file type: %s' % ftype) + + except Exception as e: + sys.stderr.write("Error while loading config %s: %s" % (cfile, to_native(e))) # set default if we got here w/o a value if value is None: if defs[config].get('required', False): if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}): - raise AnsibleError("No setting was provided for required configuration %s" % - to_native(_get_entry(plugin_type, plugin_name, config))) + raise AnsibleRequiredOptionError("No setting was provided for required configuration %s" % + to_native(_get_entry(plugin_type, plugin_name, config))) else: origin = 'default' - value = defs[config].get('default') - if isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')) and variables is not None: - # template default values if possible - # NOTE: cannot use is_template due to circular dep - try: - t = NativeEnvironment().from_string(value) - value = t.render(variables) - except Exception: - pass # not templatable - - # ensure correct type, can raise exceptions on mismatched types + value = self.template_default(defs[config].get('default'), variables) try: - value = ensure_type(value, defs[config].get('type'), origin=origin) + # ensure correct type, can raise exceptions on mismatched types + value = ensure_type(value, defs[config].get('type'), origin=origin, origin_ftype=origin_ftype) except ValueError as e: if origin.startswith('env:') and value == '': # this is empty env var for non string so we can set to default origin = 'default' - value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin) + value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin, origin_ftype=origin_ftype) else: raise AnsibleOptionsError('Invalid type for configuration option %s (from %s): %s' % (to_native(_get_entry(plugin_type, plugin_name, config)).strip(), origin, to_native(e))) @@ -608,3 +680,19 @@ class ConfigManager(object): self._plugins[plugin_type] = {} self._plugins[plugin_type][name] = defs + + @staticmethod + def get_deprecated_msg_from_config(dep_docs, include_removal=False, collection_name=None): + + removal = '' + if include_removal: + if 'removed_at_date' in dep_docs: + removal = f"Will be removed in a release after {dep_docs['removed_at_date']}\n\t" + elif collection_name: + removal = f"Will be removed in: {collection_name} {dep_docs['removed_in']}\n\t" + else: + removal = f"Will be removed in: Ansible {dep_docs['removed_in']}\n\t" + + # TODO: choose to deprecate either singular or plural + alt = dep_docs.get('alternatives', dep_docs.get('alternative', 'none')) + return f"Reason: {dep_docs['why']}\n\t{removal}Alternatives: {alt}" diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 514357b0bc0..af60053a3dd 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -2,8 +2,7 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re @@ -16,9 +15,13 @@ from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE from ansible.release import __version__ from ansible.utils.fqcn import add_internal_fqcns +# initialize config manager/config data to read/store global settings +# and generate 'pseudo constants' for app consumption. +config = ConfigManager() + def _warning(msg): - ''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write ''' + """ display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write """ try: from ansible.utils.display import Display Display().warning(msg) @@ -28,7 +31,7 @@ def _warning(msg): def _deprecated(msg, version): - ''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write ''' + """ display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write """ try: from ansible.utils.display import Display Display().deprecated(msg, version=version) @@ -37,8 +40,30 @@ def _deprecated(msg, version): sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version)) +def handle_config_noise(display=None): + + if display is not None: + w = display.warning + d = display.deprecated + else: + w = _warning + d = _deprecated + + while config.WARNINGS: + warn = config.WARNINGS.pop() + w(warn) + + while config.DEPRECATED: + # tuple with name and options + dep = config.DEPRECATED.pop(0) + msg = config.get_deprecated_msg_from_config(dep[1]) + # use tabs only for ansible-doc? + msg = msg.replace("\t", "") + d(f"{dep[0]} option. {msg}", version=dep[1]['version']) + + def set_constant(name, value, export=vars()): - ''' sets constants and returns resolved options dict ''' + """ sets constants and returns resolved options dict """ export[name] = value @@ -112,11 +137,51 @@ CONFIGURABLE_PLUGINS = ('become', 'cache', 'callback', 'cliconf', 'connection', DOCUMENTABLE_PLUGINS = CONFIGURABLE_PLUGINS + ('module', 'strategy', 'test', 'filter') IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "MANIFEST", "Makefile") # ignore during module search INTERNAL_RESULT_KEYS = ('add_host', 'add_group') +INTERNAL_STATIC_VARS = frozenset( + [ + "ansible_async_path", + "ansible_collection_name", + "ansible_config_file", + "ansible_dependent_role_names", + "ansible_diff_mode", + "ansible_config_file", + "ansible_facts", + "ansible_forks", + "ansible_inventory_sources", + "ansible_limit", + "ansible_play_batch", + "ansible_play_hosts", + "ansible_play_hosts_all", + "ansible_play_role_names", + "ansible_playbook_python", + "ansible_role_name", + "ansible_role_names", + "ansible_run_tags", + "ansible_skip_tags", + "ansible_verbosity", + "ansible_version", + "inventory_dir", + "inventory_file", + "inventory_hostname", + "inventory_hostname_short", + "groups", + "group_names", + "omit", + "hostvars", + "playbook_dir", + "play_hosts", + "role_name", + "role_names", + "role_path", + "role_uuid", + "role_names", + ] +) LOCALHOST = ('127.0.0.1', 'localhost', '::1') -MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell', - 'ansible.windows.win_shell', 'raw', 'script'))) -MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell', - 'ansible.windows.win_shell', 'raw'))) +WIN_MOVED = ['ansible.windows.win_command', 'ansible.windows.win_shell'] +MODULE_REQUIRE_ARGS_SIMPLE = ['command', 'raw', 'script', 'shell', 'win_command', 'win_shell'] +MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(MODULE_REQUIRE_ARGS_SIMPLE) + WIN_MOVED) +MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'shell', 'win_shell', 'raw')) + WIN_MOVED) RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python', 'ansible_facts') SYNTHETIC_COLLECTIONS = ('ansible.builtin', 'ansible.legacy') TREE_DIR = None @@ -179,11 +244,8 @@ MAGIC_VARIABLE_MAPPING = dict( ) # POPULATE SETTINGS FROM CONFIG ### -config = ConfigManager() - -# Generate constants from config for setting in config.get_configuration_definitions(): set_constant(setting, config.get_config_value(setting, variables=vars())) -for warn in config.WARNINGS: - _warning(warn) +# emit any warnings or deprecations +handle_config_noise() diff --git a/lib/ansible/context.py b/lib/ansible/context.py index 216c135e5d3..4b4ed84e13a 100644 --- a/lib/ansible/context.py +++ b/lib/ansible/context.py @@ -1,10 +1,6 @@ # Copyright: (c) 2018, Toshio Kuratomi # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - """ Context of the running Ansible. @@ -14,6 +10,7 @@ running the ansible command line tools. These APIs are still in flux so do not use them unless you are willing to update them with every Ansible release """ +from __future__ import annotations from collections.abc import Mapping, Set diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py index a10be9958ee..31ee4bdf1da 100644 --- a/lib/ansible/errors/__init__.py +++ b/lib/ansible/errors/__init__.py @@ -15,9 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import traceback @@ -38,7 +36,7 @@ from ansible.module_utils.common.text.converters import to_native, to_text class AnsibleError(Exception): - ''' + """ This is the base class for all errors raised from Ansible code, and can be instantiated with two optional parameters beyond the error message to control whether detailed information is displayed @@ -50,7 +48,7 @@ class AnsibleError(Exception): Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject, which should be returned by the DataLoader() class. - ''' + """ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None): super(AnsibleError, self).__init__(message) @@ -68,14 +66,18 @@ class AnsibleError(Exception): from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject message = [self._message] + + # Add from previous exceptions + if self.orig_exc: + message.append('. %s' % to_native(self.orig_exc)) + + # Add from yaml to give specific file/line no if isinstance(self.obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() if extended_error and not self._suppress_extended_error: message.append( '\n\n%s' % to_native(extended_error) ) - elif self.orig_exc: - message.append('. %s' % to_native(self.orig_exc)) return ''.join(message) @@ -90,11 +92,11 @@ class AnsibleError(Exception): return self.message def _get_error_lines_from_file(self, file_name, line_number): - ''' + """ Returns the line in the file which corresponds to the reported error location, as well as the line preceding it (if the error did not occur on the first line), to provide context to the error. - ''' + """ target_line = '' prev_line = '' @@ -123,7 +125,7 @@ class AnsibleError(Exception): return (target_line, prev_line) def _get_extended_error(self): - ''' + """ Given an object reporting the location of the exception in a file, return detailed information regarding it including: @@ -132,7 +134,7 @@ class AnsibleError(Exception): If this error was created with show_content=False, the reporting of content is suppressed, as the file contents may be sensitive (ie. vault data). - ''' + """ error_message = '' @@ -212,80 +214,85 @@ class AnsibleError(Exception): class AnsiblePromptInterrupt(AnsibleError): - '''User interrupt''' + """User interrupt""" class AnsiblePromptNoninteractive(AnsibleError): - '''Unable to get user input''' + """Unable to get user input""" class AnsibleAssertionError(AnsibleError, AssertionError): - '''Invalid assertion''' + """Invalid assertion""" pass class AnsibleOptionsError(AnsibleError): - ''' bad or incomplete options passed ''' + """ bad or incomplete options passed """ + pass + + +class AnsibleRequiredOptionError(AnsibleOptionsError): + """ bad or incomplete options passed """ pass class AnsibleParserError(AnsibleError): - ''' something was detected early that is wrong about a playbook or data file ''' + """ something was detected early that is wrong about a playbook or data file """ pass class AnsibleInternalError(AnsibleError): - ''' internal safeguards tripped, something happened in the code that should never happen ''' + """ internal safeguards tripped, something happened in the code that should never happen """ pass class AnsibleRuntimeError(AnsibleError): - ''' ansible had a problem while running a playbook ''' + """ ansible had a problem while running a playbook """ pass class AnsibleModuleError(AnsibleRuntimeError): - ''' a module failed somehow ''' + """ a module failed somehow """ pass class AnsibleConnectionFailure(AnsibleRuntimeError): - ''' the transport / connection_plugin had a fatal error ''' + """ the transport / connection_plugin had a fatal error """ pass class AnsibleAuthenticationFailure(AnsibleConnectionFailure): - '''invalid username/password/key''' + """invalid username/password/key""" pass class AnsibleCallbackError(AnsibleRuntimeError): - ''' a callback failure ''' + """ a callback failure """ pass class AnsibleTemplateError(AnsibleRuntimeError): - '''A template related error''' + """A template related error""" pass class AnsibleFilterError(AnsibleTemplateError): - ''' a templating failure ''' + """ a templating failure """ pass class AnsibleLookupError(AnsibleTemplateError): - ''' a lookup failure ''' + """ a lookup failure """ pass class AnsibleUndefinedVariable(AnsibleTemplateError): - ''' a templating failure ''' + """ a templating failure """ pass class AnsibleFileNotFound(AnsibleRuntimeError): - ''' a file missing failure ''' + """ a file missing failure """ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, paths=None, file_name=None): @@ -315,7 +322,7 @@ class AnsibleFileNotFound(AnsibleRuntimeError): # DO NOT USE as they will probably be removed soon. # We will port the action modules in our tree to use a context manager instead. class AnsibleAction(AnsibleRuntimeError): - ''' Base Exception for Action plugin flow control ''' + """ Base Exception for Action plugin flow control """ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None): @@ -328,7 +335,7 @@ class AnsibleAction(AnsibleRuntimeError): class AnsibleActionSkip(AnsibleAction): - ''' an action runtime skip''' + """ an action runtime skip""" def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None): super(AnsibleActionSkip, self).__init__(message=message, obj=obj, show_content=show_content, @@ -337,7 +344,7 @@ class AnsibleActionSkip(AnsibleAction): class AnsibleActionFail(AnsibleAction): - ''' an action runtime failure''' + """ an action runtime failure""" def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None): super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content, suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result) @@ -345,37 +352,37 @@ class AnsibleActionFail(AnsibleAction): class _AnsibleActionDone(AnsibleAction): - ''' an action runtime early exit''' + """ an action runtime early exit""" pass class AnsiblePluginError(AnsibleError): - ''' base class for Ansible plugin-related errors that do not need AnsibleError contextual data ''' + """ base class for Ansible plugin-related errors that do not need AnsibleError contextual data """ def __init__(self, message=None, plugin_load_context=None): super(AnsiblePluginError, self).__init__(message) self.plugin_load_context = plugin_load_context class AnsiblePluginRemovedError(AnsiblePluginError): - ''' a requested plugin has been removed ''' + """ a requested plugin has been removed """ pass class AnsiblePluginCircularRedirect(AnsiblePluginError): - '''a cycle was detected in plugin redirection''' + """a cycle was detected in plugin redirection""" pass class AnsibleCollectionUnsupportedVersionError(AnsiblePluginError): - '''a collection is not supported by this version of Ansible''' + """a collection is not supported by this version of Ansible""" pass class AnsibleFilterTypeError(AnsibleTemplateError, TypeError): - ''' a Jinja filter templating failure due to bad type''' + """ a Jinja filter templating failure due to bad type""" pass class AnsiblePluginNotFound(AnsiblePluginError): - ''' Indicates we did not find an Ansible plugin ''' + """ Indicates we did not find an Ansible plugin """ pass diff --git a/lib/ansible/errors/yaml_strings.py b/lib/ansible/errors/yaml_strings.py index e10a3f9d919..cc5cfb6c45a 100644 --- a/lib/ansible/errors/yaml_strings.py +++ b/lib/ansible/errors/yaml_strings.py @@ -15,9 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations __all__ = [ 'YAML_SYNTAX_ERROR', diff --git a/lib/ansible/executor/__init__.py b/lib/ansible/executor/__init__.py index ae8ccff5952..64fee52484f 100644 --- a/lib/ansible/executor/__init__.py +++ b/lib/ansible/executor/__init__.py @@ -15,6 +15,4 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations diff --git a/lib/ansible/executor/discovery/python_target.py b/lib/ansible/executor/discovery/python_target.py index 71377332a81..f66588dedc5 100644 --- a/lib/ansible/executor/discovery/python_target.py +++ b/lib/ansible/executor/discovery/python_target.py @@ -4,8 +4,7 @@ # FUTURE: this could be swapped out for our bundled version of distro to move more complete platform # logic to the targets, so long as we maintain Py2.6 compat and don't need to do any kind of script assembly -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import platform diff --git a/lib/ansible/executor/interpreter_discovery.py b/lib/ansible/executor/interpreter_discovery.py index c95cf2ed8fc..24b2174d3c8 100644 --- a/lib/ansible/executor/interpreter_discovery.py +++ b/lib/ansible/executor/interpreter_discovery.py @@ -1,8 +1,7 @@ # Copyright: (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import bisect import json @@ -10,6 +9,7 @@ import pkgutil import re from ansible import constants as C +from ansible.errors import AnsibleError from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.distro import LinuxDistribution from ansible.utils.display import Display @@ -41,7 +41,7 @@ class InterpreterDiscoveryRequiredError(Exception): def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): # interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to # get the system type from uname, and find any random Python that can get us the info we need. For supported - # target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available) + # target OS types, we'll dispatch a Python script that calls platform.dist() (for older platforms, where available) # and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known # distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the # default fallback of /usr/bin/python is used (if we know it's there), or discovery fails. @@ -53,7 +53,7 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): host = task_vars.get('inventory_hostname', 'unknown') res = None platform_type = 'unknown' - found_interpreters = [u'/usr/bin/python'] # fallback value + found_interpreters = [u'/usr/bin/python3'] # fallback value is_auto_legacy = discovery_mode.startswith('auto_legacy') is_silent = discovery_mode.endswith('_silent') @@ -89,7 +89,7 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): action._discovery_warnings.append(u'No python interpreters found for ' u'host {0} (tried {1})'.format(host, bootstrap_python_list)) # this is lame, but returning None or throwing an exception is uglier - return u'/usr/bin/python' + return u'/usr/bin/python3' if platform_type != 'linux': raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type))) @@ -106,7 +106,6 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): platform_info = json.loads(res.get('stdout')) distro, version = _get_linux_distro(platform_info) - if not distro or not version: raise NotImplementedError('unable to get Linux distribution/version info') @@ -120,15 +119,15 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): # provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been) if is_auto_legacy: - if platform_interpreter != u'/usr/bin/python' and u'/usr/bin/python' in found_interpreters: + if platform_interpreter != u'/usr/bin/python3' and u'/usr/bin/python3' in found_interpreters: if not is_silent: action._discovery_warnings.append( u"Distribution {0} {1} on host {2} should use {3}, but is using " - u"/usr/bin/python for backward compatibility with prior Ansible releases. " + u"/usr/bin/python3 for backward compatibility with prior Ansible releases. " u"See {4} for more information" .format(distro, version, host, platform_interpreter, get_versioned_doclink('reference_appendices/interpreter_discovery.html'))) - return u'/usr/bin/python' + return u'/usr/bin/python3' if platform_interpreter not in found_interpreters: if platform_interpreter not in bootstrap_python_list: @@ -150,6 +149,8 @@ def discover_interpreter(action, interpreter_name, discovery_mode, task_vars): return platform_interpreter except NotImplementedError as ex: display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host) + except AnsibleError: + raise except Exception as ex: if not is_silent: display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex))) diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py index b09601fc207..d4c2eab600f 100644 --- a/lib/ansible/executor/module_common.py +++ b/lib/ansible/executor/module_common.py @@ -16,9 +16,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ast import base64 @@ -26,6 +24,7 @@ import datetime import json import os import shlex +import time import zipfile import re import pkgutil @@ -75,7 +74,7 @@ _MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils # ****************************************************************************** -ANSIBALLZ_TEMPLATE = u'''%(shebang)s +ANSIBALLZ_TEMPLATE = u"""%(shebang)s %(coding)s _ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER # This code is part of Ansible, but is an independent component. @@ -166,7 +165,7 @@ def _ansiballz_main(): else: PY3 = True - ZIPDATA = """%(zipdata)s""" + ZIPDATA = %(zipdata)r # Note: temp_path isn't needed once we switch to zipimport def invoke_module(modlib_path, temp_path, json_params): @@ -177,13 +176,13 @@ def _ansiballz_main(): z = zipfile.ZipFile(modlib_path, mode='a') # py3: modlib_path will be text, py2: it's bytes. Need bytes at the end - sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path + sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path sitecustomize = sitecustomize.encode('utf-8') # Use a ZipInfo to work around zipfile limitation on hosts with # clocks set to a pre-1980 year (for instance, Raspberry Pi) zinfo = zipfile.ZipInfo() zinfo.filename = 'sitecustomize.py' - zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i) + zinfo.date_time = %(date_time)s z.writestr(zinfo, sitecustomize) z.close() @@ -196,7 +195,7 @@ def _ansiballz_main(): basic._ANSIBLE_ARGS = json_params %(coverage)s # Run the module! By importing it as '__main__', it thinks it is executing as a script - runpy.run_module(mod_name='%(module_fqn)s', init_globals=dict(_module_fqn='%(module_fqn)s', _modlib_path=modlib_path), + runpy.run_module(mod_name=%(module_fqn)r, init_globals=dict(_module_fqn=%(module_fqn)r, _modlib_path=modlib_path), run_name='__main__', alter_sys=True) # Ansible modules must exit themselves @@ -287,7 +286,7 @@ def _ansiballz_main(): basic._ANSIBLE_ARGS = json_params # Run the module! By importing it as '__main__', it thinks it is executing as a script - runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True) + runpy.run_module(mod_name=%(module_fqn)r, init_globals=None, run_name='__main__', alter_sys=True) # Ansible modules must exit themselves print('{"msg": "New-style module did not handle its own exit", "failed": true}') @@ -312,9 +311,9 @@ def _ansiballz_main(): # store this in remote_tmpdir (use system tempdir instead) # Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport # (this helps ansible-test produce coverage stats) - temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_') + temp_path = tempfile.mkdtemp(prefix='ansible_' + %(ansible_module)r + '_payload_') - zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip') + zipped_mod = os.path.join(temp_path, 'ansible_' + %(ansible_module)r + '_payload.zip') with open(zipped_mod, 'wb') as modlib: modlib.write(base64.b64decode(ZIPDATA)) @@ -334,10 +333,10 @@ def _ansiballz_main(): if __name__ == '__main__': _ansiballz_main() -''' +""" -ANSIBALLZ_COVERAGE_TEMPLATE = ''' - os.environ['COVERAGE_FILE'] = '%(coverage_output)s=python-%%s=coverage' %% '.'.join(str(v) for v in sys.version_info[:2]) +ANSIBALLZ_COVERAGE_TEMPLATE = """ + os.environ['COVERAGE_FILE'] = %(coverage_output)r + '=python-%%s=coverage' %% '.'.join(str(v) for v in sys.version_info[:2]) import atexit @@ -347,7 +346,7 @@ ANSIBALLZ_COVERAGE_TEMPLATE = ''' print('{"msg": "Could not import `coverage` module.", "failed": true}') sys.exit(1) - cov = coverage.Coverage(config_file='%(coverage_config)s') + cov = coverage.Coverage(config_file=%(coverage_config)r) def atexit_coverage(): cov.stop() @@ -356,9 +355,9 @@ ANSIBALLZ_COVERAGE_TEMPLATE = ''' atexit.register(atexit_coverage) cov.start() -''' +""" -ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = ''' +ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = """ try: if PY3: import importlib.util @@ -370,9 +369,9 @@ ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = ''' except ImportError: print('{"msg": "Could not find `coverage` module.", "failed": true}') sys.exit(1) -''' +""" -ANSIBALLZ_RLIMIT_TEMPLATE = ''' +ANSIBALLZ_RLIMIT_TEMPLATE = """ import resource existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE) @@ -386,7 +385,7 @@ ANSIBALLZ_RLIMIT_TEMPLATE = ''' except ValueError: # some platforms (eg macOS) lie about their hard limit pass -''' +""" def _strip_comments(source): @@ -870,7 +869,17 @@ class CollectionModuleUtilLocator(ModuleUtilLocatorBase): return name_parts[5:] # eg, foo.bar for ansible_collections.ns.coll.plugins.module_utils.foo.bar -def recursive_finder(name, module_fqn, module_data, zf): +def _make_zinfo(filename, date_time, zf=None): + zinfo = zipfile.ZipInfo( + filename=filename, + date_time=date_time + ) + if zf: + zinfo.compress_type = zf.compression + return zinfo + + +def recursive_finder(name, module_fqn, module_data, zf, date_time=None): """ Using ModuleDepFinder, make sure we have all of the module_utils files that the module and its module_utils files needs. (no longer actually recursive) @@ -880,6 +889,8 @@ def recursive_finder(name, module_fqn, module_data, zf): :arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload which we're assembling """ + if date_time is None: + date_time = time.gmtime()[:6] # py_module_cache maps python module names to a tuple of the code in the module # and the pathname to the module. @@ -976,7 +987,10 @@ def recursive_finder(name, module_fqn, module_data, zf): for py_module_name in py_module_cache: py_module_file_name = py_module_cache[py_module_name][1] - zf.writestr(py_module_file_name, py_module_cache[py_module_name][0]) + zf.writestr( + _make_zinfo(py_module_file_name, date_time, zf=zf), + py_module_cache[py_module_name][0] + ) mu_file = to_text(py_module_file_name, errors='surrogate_or_strict') display.vvvvv("Including module_utils file %s" % mu_file) @@ -1020,13 +1034,16 @@ def _get_ansible_module_fqn(module_path): return remote_module_fqn -def _add_module_to_zip(zf, remote_module_fqn, b_module_data): +def _add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data): """Add a module from ansible or from an ansible collection into the module zip""" module_path_parts = remote_module_fqn.split('.') # Write the module module_path = '/'.join(module_path_parts) + '.py' - zf.writestr(module_path, b_module_data) + zf.writestr( + _make_zinfo(module_path, date_time, zf=zf), + b_module_data + ) # Write the __init__.py's necessary to get there if module_path_parts[0] == 'ansible': @@ -1045,7 +1062,10 @@ def _add_module_to_zip(zf, remote_module_fqn, b_module_data): continue # Note: We don't want to include more than one ansible module in a payload at this time # so no need to fill the __init__.py with namespace code - zf.writestr(package_path, b'') + zf.writestr( + _make_zinfo(package_path, date_time, zf=zf), + b'' + ) def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become, @@ -1110,6 +1130,10 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas remote_module_fqn = 'ansible.modules.%s' % module_name if module_substyle == 'python': + date_time = time.gmtime()[:6] + if date_time[0] < 1980: + date_string = datetime.datetime(*date_time, tzinfo=datetime.timezone.utc).strftime('%c') + raise AnsibleError(f'Cannot create zipfile due to pre-1980 configured date: {date_string}') params = dict(ANSIBLE_MODULE_ARGS=module_args,) try: python_repred_params = repr(json.dumps(params, cls=AnsibleJSONEncoder, vault_to_text=True)) @@ -1155,10 +1179,10 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method) # walk the module imports, looking for module_utils to send- they'll be added to the zipfile - recursive_finder(module_name, remote_module_fqn, b_module_data, zf) + recursive_finder(module_name, remote_module_fqn, b_module_data, zf, date_time) display.debug('ANSIBALLZ: Writing module into payload') - _add_module_to_zip(zf, remote_module_fqn, b_module_data) + _add_module_to_zip(zf, date_time, remote_module_fqn, b_module_data) zf.close() zipdata = base64.b64encode(zipoutput.getvalue()) @@ -1241,7 +1265,6 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas else: coverage = '' - now = datetime.datetime.utcnow() output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict( zipdata=zipdata, ansible_module=module_name, @@ -1249,12 +1272,7 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas params=python_repred_params, shebang=shebang, coding=ENCODING_STRING, - year=now.year, - month=now.month, - day=now.day, - hour=now.hour, - minute=now.minute, - second=now.second, + date_time=date_time, coverage=coverage, rlimit=rlimit, ))) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 820db017fee..e512b64b840 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -15,9 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import fnmatch @@ -52,7 +50,7 @@ class FailedStates(IntFlag): TASKS = 2 RESCUE = 4 ALWAYS = 8 - HANDLERS = 16 + HANDLERS = 16 # NOTE not in use anymore class HostState: @@ -429,31 +427,51 @@ class PlayIterator: # might be there from previous flush state.handlers = self.handlers[:] state.update_handlers = False - state.cur_handlers_task = 0 - if state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS: - state.update_handlers = True - state.run_state = IteratingStates.COMPLETE - else: - while True: - try: - task = state.handlers[state.cur_handlers_task] - except IndexError: - task = None - state.run_state = state.pre_flushing_run_state - state.update_handlers = True - break - else: - state.cur_handlers_task += 1 - if task.is_host_notified(host): - break + while True: + try: + task = state.handlers[state.cur_handlers_task] + except IndexError: + task = None + state.cur_handlers_task = 0 + state.run_state = state.pre_flushing_run_state + state.update_handlers = True + break + else: + state.cur_handlers_task += 1 + if task.is_host_notified(host): + return state, task elif state.run_state == IteratingStates.COMPLETE: return (state, None) # if something above set the task, break out of the loop now if task: - break + # skip implicit flush_handlers if there are no handlers notified + if ( + task.implicit + and task.action in C._ACTION_META + and task.args.get('_raw_params', None) == 'flush_handlers' + and ( + # the state store in the `state` variable could be a nested state, + # notifications are always stored in the top level state, get it here + not self.get_state_for_host(host.name).handler_notifications + # in case handlers notifying other handlers, the notifications are not + # saved in `handler_notifications` and handlers are notified directly + # to prevent duplicate handler runs, so check whether any handler + # is notified + and all(not h.notified_hosts for h in self.handlers) + ) + ): + display.debug("No handler notifications for %s, skipping." % host.name) + elif ( + (role := task._role) + and role._metadata.allow_duplicates is False + and host.name in self._play._get_cached_role(role)._completed + ): + display.debug("'%s' skipped because role has already run" % task) + else: + break return (state, task) @@ -487,20 +505,16 @@ class PlayIterator: else: state.fail_state |= FailedStates.ALWAYS state.run_state = IteratingStates.COMPLETE - elif state.run_state == IteratingStates.HANDLERS: - state.fail_state |= FailedStates.HANDLERS - state.update_handlers = True - if state._blocks[state.cur_block].rescue: - state.run_state = IteratingStates.RESCUE - elif state._blocks[state.cur_block].always: - state.run_state = IteratingStates.ALWAYS - else: - state.run_state = IteratingStates.COMPLETE return state def mark_host_failed(self, host): s = self.get_host_state(host) display.debug("marking host %s failed, current state: %s" % (host, s)) + if s.run_state == IteratingStates.HANDLERS: + # we are failing `meta: flush_handlers`, so just reset the state to whatever + # it was before and let `_set_failed_state` figure out the next state + s.run_state = s.pre_flushing_run_state + s.update_handlers = True s = self._set_failed_state(s) display.debug("^ failed state is now: %s" % s) self.set_state_for_host(host.name, s) @@ -516,8 +530,6 @@ class PlayIterator: return True elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state): return True - elif state.run_state == IteratingStates.HANDLERS and state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS: - return True elif state.fail_state != FailedStates.NONE: if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0: return False @@ -551,9 +563,9 @@ class PlayIterator: self._clear_state_errors(state.always_child_state) def get_active_state(self, state): - ''' + """ Finds the active state, recursively if necessary when there are child states. - ''' + """ if state.run_state == IteratingStates.TASKS and state.tasks_child_state is not None: return self.get_active_state(state.tasks_child_state) elif state.run_state == IteratingStates.RESCUE and state.rescue_child_state is not None: @@ -563,10 +575,10 @@ class PlayIterator: return state def is_any_block_rescuing(self, state): - ''' + """ Given the current HostState state, determines if the current block, or any child blocks, are in rescue mode. - ''' + """ if state.run_state == IteratingStates.TASKS and state.get_current_block().rescue: return True if state.tasks_child_state is not None: @@ -647,3 +659,19 @@ class PlayIterator: def clear_notification(self, hostname: str, notification: str) -> None: self._host_states[hostname].handler_notifications.remove(notification) + + def end_host(self, hostname: str) -> None: + """Used by ``end_host``, ``end_batch`` and ``end_play`` meta tasks to end executing given host.""" + state = self.get_active_state(self.get_state_for_host(hostname)) + if state.run_state == IteratingStates.RESCUE: + # This is a special case for when ending a host occurs in rescue. + # By definition the meta task responsible for ending the host + # is the last task, so we need to clear the fail state to mark + # the host as rescued. + # The reason we need to do that is because this operation is + # normally done when PlayIterator transitions from rescue to + # always when only then we can say that rescue didn't fail + # but with ending a host via meta task, we don't get to that transition. + self.set_fail_state_for_host(hostname, FailedStates.NONE) + self.set_run_state_for_host(hostname, IteratingStates.COMPLETE) + self._play._removed_hosts.append(hostname) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 3feb971d774..468c4bdc709 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -15,9 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os @@ -42,10 +40,10 @@ display = Display() class PlaybookExecutor: - ''' + """ This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. - ''' + """ def __init__(self, playbooks, inventory, variable_manager, loader, passwords): self._playbooks = playbooks @@ -76,10 +74,10 @@ class PlaybookExecutor: set_default_transport() def run(self): - ''' + """ Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. - ''' + """ result = 0 entrylist = [] @@ -148,7 +146,7 @@ class PlaybookExecutor: encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) - unsafe = var.get("unsafe", None) + unsafe = boolean(var.get("unsafe", False)) if vname not in self._variable_manager.extra_vars: if self._tqm: @@ -197,10 +195,7 @@ class PlaybookExecutor: result = self._tqm.RUN_FAILED_HOSTS break_play = True - # check the number of failures here, to see if they're above the maximum - # failure percentage allowed, or if any errors are fatal. If either of those - # conditions are met, we break out, otherwise we only break out if the entire - # batch failed + # check the number of failures here and break out if the entire batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ (previously_failed + previously_unreachable) @@ -272,10 +267,10 @@ class PlaybookExecutor: return result def _get_serialized_batches(self, play): - ''' + """ Returns a list of hosts, subdivided into batches based on the serial size specified in the play. - ''' + """ # make sure we have a unique list of hosts all_hosts = self._inventory.get_hosts(play.hosts, order=play.order) @@ -318,11 +313,11 @@ class PlaybookExecutor: return serialized_batches def _generate_retry_inventory(self, retry_path, replay_hosts): - ''' + """ Called when a playbook run fails. It generates an inventory which allows re-running on ONLY the failed hosts. This may duplicate some variable information in group_vars/host_vars but that is ok, and expected. - ''' + """ try: makedirs_safe(os.path.dirname(retry_path)) with open(retry_path, 'w') as fd: diff --git a/lib/ansible/executor/powershell/become_wrapper.ps1 b/lib/ansible/executor/powershell/become_wrapper.ps1 index f40e2658f5f..cea42c128aa 100644 --- a/lib/ansible/executor/powershell/become_wrapper.ps1 +++ b/lib/ansible/executor/powershell/become_wrapper.ps1 @@ -116,12 +116,11 @@ Write-AnsibleLog "INFO - parsed become input, user: '$username', type: '$logon_t # set to Stop and cannot be changed. Also need to split the payload from the wrapper to prevent potentially # sensitive content from being logged by the scriptblock logger. $bootstrap_wrapper = { - &chcp.com 65001 > $null - $exec_wrapper_str = [System.Console]::In.ReadToEnd() - $split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries) + [Console]::InputEncoding = [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding + $ew = [System.Console]::In.ReadToEnd() + $split_parts = $ew.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries) Set-Variable -Name json_raw -Value $split_parts[1] - $exec_wrapper = [ScriptBlock]::Create($split_parts[0]) - &$exec_wrapper + &([ScriptBlock]::Create($split_parts[0])) } $exec_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper.ToString())) $lp_command_line = "powershell.exe -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $exec_command" diff --git a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 index cdba80cbb01..8e7141eb515 100644 --- a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 +++ b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 @@ -1,4 +1,4 @@ -&chcp.com 65001 > $null +try { [Console]::InputEncoding = [Console]::OutputEncoding = New-Object System.Text.UTF8Encoding } catch { $null = $_ } if ($PSVersionTable.PSVersion -lt [Version]"3.0") { '{"failed":true,"msg":"Ansible requires PowerShell v3.0 or newer"}' @@ -9,5 +9,4 @@ $exec_wrapper_str = $input | Out-String $split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries) If (-not $split_parts.Length -eq 2) { throw "invalid payload" } Set-Variable -Name json_raw -Value $split_parts[1] -$exec_wrapper = [ScriptBlock]::Create($split_parts[0]) -&$exec_wrapper +& ([ScriptBlock]::Create($split_parts[0])) diff --git a/lib/ansible/executor/powershell/exec_wrapper.ps1 b/lib/ansible/executor/powershell/exec_wrapper.ps1 index 0f97bdfb8a5..4ecc1367c84 100644 --- a/lib/ansible/executor/powershell/exec_wrapper.ps1 +++ b/lib/ansible/executor/powershell/exec_wrapper.ps1 @@ -16,7 +16,7 @@ begin { .SYNOPSIS Converts a JSON string to a Hashtable/Array in the fastest way possible. Unfortunately ConvertFrom-Json is still faster but outputs - a PSCustomObject which is combersone for module consumption. + a PSCustomObject which is cumbersome for module consumption. .PARAMETER InputObject [String] The JSON string to deserialize. @@ -178,6 +178,7 @@ $($ErrorRecord.InvocationInfo.PositionMessage) Write-AnsibleLog "INFO - converting json raw to a payload" "exec_wrapper" $payload = ConvertFrom-AnsibleJson -InputObject $json_raw + $payload.module_args._ansible_exec_wrapper_warnings = [System.Collections.Generic.List[string]]@() # TODO: handle binary modules # TODO: handle persistence diff --git a/lib/ansible/executor/powershell/module_manifest.py b/lib/ansible/executor/powershell/module_manifest.py index 0720d23e971..da69c9dacb5 100644 --- a/lib/ansible/executor/powershell/module_manifest.py +++ b/lib/ansible/executor/powershell/module_manifest.py @@ -1,23 +1,22 @@ # (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import base64 import errno import json import os import pkgutil -import random +import secrets import re +from importlib import import_module from ansible.module_utils.compat.version import LooseVersion from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.compat.importlib import import_module from ansible.plugins.loader import ps_module_utils_loader from ansible.utils.collection_loader import resource_from_fqcr @@ -255,9 +254,8 @@ def _slurp(path): if not os.path.exists(path): raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path)) - fd = open(path, 'rb') - data = fd.read() - fd.close() + with open(path, 'rb') as fd: + data = fd.read() return data @@ -319,7 +317,7 @@ def _create_powershell_wrapper(b_module_data, module_path, module_args, exec_manifest["actions"].insert(0, 'async_watchdog') exec_manifest["actions"].insert(0, 'async_wrapper') - exec_manifest["async_jid"] = f'j{random.randint(0, 999999999999)}' + exec_manifest["async_jid"] = f'j{secrets.randbelow(999999999999)}' exec_manifest["async_timeout_sec"] = async_timeout exec_manifest["async_startup_timeout"] = C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars) diff --git a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 index c35c84cfc86..f79dd6fbc86 100644 --- a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 +++ b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 @@ -29,7 +29,18 @@ if ($csharp_utils.Count -gt 0) { # add any C# references so the module does not have to do so $new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"]) - Add-CSharpType -References $csharp_utils -TempPath $new_tmp -IncludeDebugInfo + + # We use a fake module object to capture warnings + $fake_module = [PSCustomObject]@{ + Tmpdir = $new_tmp + Verbosity = 3 + } + $warning_func = New-Object -TypeName System.Management.Automation.PSScriptMethod -ArgumentList Warn, { + param($message) + $Payload.module_args._ansible_exec_wrapper_warnings.Add($message) + } + $fake_module.PSObject.Members.Add($warning_func) + Add-CSharpType -References $csharp_utils -AnsibleModule $fake_module } if ($Payload.ContainsKey("coverage") -and $null -ne $host.Runspace -and $null -ne $host.Runspace.Debugger) { diff --git a/lib/ansible/executor/powershell/module_wrapper.ps1 b/lib/ansible/executor/powershell/module_wrapper.ps1 index 20a967731b7..1cfaf3ceae1 100644 --- a/lib/ansible/executor/powershell/module_wrapper.ps1 +++ b/lib/ansible/executor/powershell/module_wrapper.ps1 @@ -207,7 +207,10 @@ if ($null -ne $rc) { # with the trap handler that's now in place, this should only write to the output if # $ErrorActionPreference != "Stop", that's ok because this is sent to the stderr output # for a user to manually debug if something went horribly wrong -if ($ps.HadErrors -or ($PSVersionTable.PSVersion.Major -lt 4 -and $ps.Streams.Error.Count -gt 0)) { +if ( + $ps.Streams.Error.Count -and + ($ps.HadErrors -or $PSVersionTable.PSVersion.Major -lt 4) +) { Write-AnsibleLog "WARN - module had errors, outputting error info $ModuleName" "module_wrapper" # if the rc wasn't explicitly set, we return an exit code of 1 if ($null -eq $rc) { diff --git a/lib/ansible/executor/process/__init__.py b/lib/ansible/executor/process/__init__.py index ae8ccff5952..64fee52484f 100644 --- a/lib/ansible/executor/process/__init__.py +++ b/lib/ansible/executor/process/__init__.py @@ -15,6 +15,4 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index c043137c95f..f5e7b979f42 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -15,9 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys @@ -49,11 +47,11 @@ class WorkerQueue(Queue): class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defined] - ''' + """ The worker thread class, which uses TaskExecutor to run tasks read from a job queue and pushes results into a results queue for reading later. - ''' + """ def __init__(self, final_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj, worker_id): @@ -93,13 +91,13 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin self._new_stdin = open(os.devnull) def start(self): - ''' + """ multiprocessing.Process replaces the worker's stdin with a new file but we wish to preserve it if it is connected to a terminal. Therefore dup a copy prior to calling the real start(), ensuring the descriptor is preserved somewhere in the new child, and make sure it is closed in the parent when start() completes. - ''' + """ self._save_stdin() # FUTURE: this lock can be removed once a more generalized pre-fork thread pause is in place @@ -110,12 +108,12 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin self._new_stdin.close() def _hard_exit(self, e): - ''' + """ There is no safe exception to return to higher level code that does not risk an innocent try/except finding itself executing in the wrong process. All code executing above WorkerProcess.run() on the stack conceptually belongs to another program. - ''' + """ try: display.debug(u"WORKER HARD EXIT: %s" % to_text(e)) @@ -128,7 +126,7 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin os._exit(1) def run(self): - ''' + """ Wrap _run() to ensure no possibility an errant exception can cause control to return to the StrategyBase task loop, or any other code higher in the stack. @@ -136,7 +134,7 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin As multiprocessing in Python 2.x provides no protection, it is possible a try/except added in far-away code can cause a crashed child process to suddenly assume the role and prior state of its parent. - ''' + """ try: return self._run() except BaseException as e: @@ -157,11 +155,11 @@ class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defin sys.stdout = sys.stderr = open(os.devnull, 'w') def _run(self): - ''' + """ Called when the process is started. Pushes the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. - ''' + """ # import cProfile, pstats, StringIO # pr = cProfile.Profile() diff --git a/lib/ansible/executor/stats.py b/lib/ansible/executor/stats.py index 13a053ba3ab..acedf10759f 100644 --- a/lib/ansible/executor/stats.py +++ b/lib/ansible/executor/stats.py @@ -15,9 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from collections.abc import MutableMapping @@ -25,7 +23,7 @@ from ansible.utils.vars import merge_hash class AggregateStats: - ''' holds stats about per-host activity during playbook runs ''' + """ holds stats about per-host activity during playbook runs """ def __init__(self): @@ -42,7 +40,7 @@ class AggregateStats: self.custom = {} def increment(self, what, host): - ''' helper function to bump a statistic ''' + """ helper function to bump a statistic """ self.processed[host] = 1 prev = (getattr(self, what)).get(host, 0) @@ -59,7 +57,7 @@ class AggregateStats: _what[host] = 0 def summarize(self, host): - ''' return information about a particular host ''' + """ return information about a particular host """ return dict( ok=self.ok.get(host, 0), @@ -72,7 +70,7 @@ class AggregateStats: ) def set_custom_stats(self, which, what, host=None): - ''' allow setting of a custom stat''' + """ allow setting of a custom stat""" if host is None: host = '_run' @@ -82,7 +80,7 @@ class AggregateStats: self.custom[host][which] = what def update_custom_stats(self, which, what, host=None): - ''' allow aggregation of a custom stat''' + """ allow aggregation of a custom stat""" if host is None: host = '_run' diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 784f80a5a1c..ff1c33871f2 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -1,27 +1,27 @@ # (c) 2012-2014, Michael DeHaan # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os -import pty import time import json +import pathlib import signal import subprocess import sys -import termios import traceback from ansible import constants as C +from ansible.cli import scripts from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure, AnsibleActionFail, AnsibleActionSkip from ansible.executor.task_result import TaskResult from ansible.executor.module_common import get_action_args_with_defaults from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six import binary_type from ansible.module_utils.common.text.converters import to_text, to_native -from ansible.module_utils.connection import write_to_file_descriptor +from ansible.module_utils.connection import write_to_stream +from ansible.module_utils.six import string_types from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.plugins import get_plugin_class @@ -32,7 +32,7 @@ from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unsafe_proxy import to_unsafe_text, wrap_var from ansible.vars.clean import namespace_facts, clean_facts from ansible.utils.display import Display -from ansible.utils.vars import combine_vars, isidentifier +from ansible.utils.vars import combine_vars display = Display() @@ -43,18 +43,28 @@ __all__ = ['TaskExecutor'] class TaskTimeoutError(BaseException): - pass + def __init__(self, message="", frame=None): + + if frame is not None: + orig = frame + root = pathlib.Path(__file__).parent + while not pathlib.Path(frame.f_code.co_filename).is_relative_to(root): + frame = frame.f_back + + self.frame = 'Interrupted at %s called from %s' % (orig, frame) + + super(TaskTimeoutError, self).__init__(message) def task_timeout(signum, frame): - raise TaskTimeoutError + raise TaskTimeoutError(frame=frame) def remove_omit(task_args, omit_token): - ''' + """ Remove args with a value equal to the ``omit_token`` recursively to align with now having suboptions in the argument_spec - ''' + """ if not isinstance(task_args, dict): return task_args @@ -75,12 +85,12 @@ def remove_omit(task_args, omit_token): class TaskExecutor: - ''' + """ This is the main worker class for the executor pipeline, which handles loading an action plugin to actually dispatch the task to a given host. This class roughly corresponds to the old Runner() class. - ''' + """ def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, final_q, variable_manager): self._host = host @@ -98,12 +108,12 @@ class TaskExecutor: self._task.squash() def run(self): - ''' + """ The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with self._run_loop() or self._execute(). After that, the returned results are parsed and returned as a dict. - ''' + """ display.debug("in run() - task %s" % self._task._uuid) @@ -140,6 +150,7 @@ class TaskExecutor: if 'unreachable' in item and item['unreachable']: item_ignore_unreachable = item.pop('_ansible_ignore_unreachable') if not res.get('unreachable'): + res['unreachable'] = True self._task.ignore_unreachable = item_ignore_unreachable elif self._task.ignore_unreachable and not item_ignore_unreachable: self._task.ignore_unreachable = item_ignore_unreachable @@ -208,10 +219,10 @@ class TaskExecutor: display.debug(u"error closing connection: %s" % to_text(e)) def _get_loop_items(self): - ''' + """ Loads a lookup plugin to handle the with_* portion of a task (if specified), and returns the items result. - ''' + """ # get search path for this task to pass to lookup plugins self._job_vars['ansible_search_path'] = self._task.get_search_path() @@ -225,7 +236,7 @@ class TaskExecutor: if self._task.loop_with: if self._task.loop_with in self._shared_loader_obj.lookup_loader: - # TODO: hardcoded so it fails for non first_found lookups, but thhis shoudl be generalized for those that don't do their own templating + # TODO: hardcoded so it fails for non first_found lookups, but this should be generalized for those that don't do their own templating # lookup prop/attribute? fail = bool(self._task.loop_with != 'first_found') loop_terms = listify_lookup_plugin_terms(terms=self._task.loop, templar=templar, fail_on_undefined=fail, convert_bare=False) @@ -256,11 +267,11 @@ class TaskExecutor: return items def _run_loop(self, items): - ''' + """ Runs the task with the loop items specified and collates the result into an array named 'results' which is inserted into the final result along with the item for which the loop ran. - ''' + """ task_vars = self._job_vars templar = Templar(loader=self._loader, variables=task_vars) @@ -333,6 +344,13 @@ class TaskExecutor: (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) res = self._execute(variables=task_vars) + + if self._task.register: + # Ensure per loop iteration results are registered in case `_execute()` + # returns early (when conditional, failure, ...). + # This is needed in case the registered variable is used in the loop label template. + task_vars[self._task.register] = res + task_fields = self._task.dump_attrs() (self._task, tmp_task) = (tmp_task, self._task) (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context) @@ -363,12 +381,17 @@ class TaskExecutor: 'msg': 'Failed to template loop_control.label: %s' % to_text(e) }) + # if plugin is loaded, get resolved name, otherwise leave original task connection + if self._connection and not isinstance(self._connection, string_types): + task_fields['connection'] = getattr(self._connection, 'ansible_name') + tr = TaskResult( self._host.name, self._task._uuid, res, task_fields=task_fields, ) + if tr.is_failed() or tr.is_unreachable(): self._final_q.send_callback('v2_runner_item_on_failed', tr) elif tr.is_skipped(): @@ -380,6 +403,19 @@ class TaskExecutor: self._final_q.send_callback('v2_runner_item_on_ok', tr) results.append(res) + + # break loop if break_when conditions are met + if self._task.loop_control and self._task.loop_control.break_when: + cond = Conditional(loader=self._loader) + cond.when = self._task.loop_control.get_validated_value( + 'break_when', self._task.loop_control.fattributes.get('break_when'), self._task.loop_control.break_when, templar + ) + if cond.evaluate_conditional(templar, task_vars): + # delete loop vars before exiting loop + del task_vars[loop_var] + break + + # done with loop var, remove for next iteration del task_vars[loop_var] # clear 'connection related' plugin variables for next iteration @@ -408,11 +444,7 @@ class TaskExecutor: """This method is responsible for effectively pre-validating Task.delegate_to and will happen before Task.post_validate is executed """ - delegated_vars, delegated_host_name = self._variable_manager.get_delegated_vars_and_hostname( - templar, - self._task, - variables - ) + delegated_vars, delegated_host_name = self._variable_manager.get_delegated_vars_and_hostname(templar, self._task, variables) # At the point this is executed it is safe to mutate self._task, # since `self._task` is either a copy referred to by `tmp_task` in `_run_loop` # or just a singular non-looped task @@ -421,11 +453,11 @@ class TaskExecutor: variables.update(delegated_vars) def _execute(self, variables=None): - ''' + """ The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution - ''' + """ if variables is None: variables = self._job_vars @@ -595,9 +627,9 @@ class TaskExecutor: # feed back into pc to ensure plugins not using get_option can get correct value self._connection._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=vars_copy, templar=templar) - # TODO: eventually remove this block as this should be a 'consequence' of 'forced_local' modules + # TODO: eventually remove this block as this should be a 'consequence' of 'forced_local' modules, right now rely on remote_is_local connection # special handling for python interpreter for network_os, default to ansible python unless overridden - if 'ansible_network_os' in cvars and 'ansible_python_interpreter' not in cvars: + if 'ansible_python_interpreter' not in cvars and 'ansible_network_os' in cvars and getattr(self._connection, '_remote_is_local', False): # this also avoids 'python discovery' cvars['ansible_python_interpreter'] = sys.executable @@ -620,17 +652,11 @@ class TaskExecutor: if omit_token is not None: self._task.args = remove_omit(self._task.args, omit_token) - # Read some values from the task, so that we can modify them if need be - if self._task.until: - retries = self._task.retries - if retries is None: - retries = 3 - elif retries <= 0: - retries = 1 - else: - retries += 1 - else: - retries = 1 + retries = 1 # includes the default actual run + retries set by user/default + if self._task.retries is not None: + retries += max(0, self._task.retries) + elif self._task.until: + retries += 3 # the default is not set in FA because we need to differentiate "unset" value delay = self._task.delay if delay < 0: @@ -651,7 +677,7 @@ class TaskExecutor: return dict(unreachable=True, msg=to_text(e)) except TaskTimeoutError as e: msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout) - return dict(failed=True, msg=msg) + return dict(failed=True, msg=msg, timedout={'frame': e.frame, 'period': self._task.timeout}) finally: if self._task.timeout: signal.alarm(0) @@ -659,8 +685,8 @@ class TaskExecutor: self._handler.cleanup() display.debug("handler run complete") - # preserve no log - result["_ansible_no_log"] = no_log + # propagate no log to result- the action can set this, so only overwrite it with the task's value if missing or falsey + result["_ansible_no_log"] = bool(no_log or result.get('_ansible_no_log', False)) if self._task.action not in C._ACTION_WITH_CLEAN_FACTS: result = wrap_var(result) @@ -668,9 +694,6 @@ class TaskExecutor: # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: - if not isidentifier(self._task.register): - raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % self._task.register) - vars_copy[self._task.register] = result if self._task.async_val > 0: @@ -736,7 +759,7 @@ class TaskExecutor: result['failed'] = False # Make attempts and retries available early to allow their use in changed/failed_when - if self._task.until: + if retries > 1: result['attempts'] = attempt # set the changed property if it was missing. @@ -768,7 +791,7 @@ class TaskExecutor: if retries > 1: cond = Conditional(loader=self._loader) - cond.when = self._task.until + cond.when = self._task.until or [not result['failed']] if cond.evaluate_conditional(templar, vars_copy): break else: @@ -836,9 +859,9 @@ class TaskExecutor: return result def _poll_async_result(self, result, templar, task_vars=None): - ''' + """ Polls for the specified JID to be complete - ''' + """ if task_vars is None: task_vars = self._job_vars @@ -851,7 +874,12 @@ class TaskExecutor: # that (with a sleep for "poll" seconds between each retry) until the # async time limit is exceeded. - async_task = Task.load(dict(action='async_status', args={'jid': async_jid}, environment=self._task.environment)) + async_task = Task.load(dict( + action='async_status', + args={'jid': async_jid}, + check_mode=self._task.check_mode, + environment=self._task.environment, + )) # FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized # Because this is an async task, the action handler is async. However, @@ -923,6 +951,7 @@ class TaskExecutor: 'jid': async_jid, 'mode': 'cleanup', }, + 'check_mode': self._task.check_mode, 'environment': self._task.environment, } ) @@ -948,10 +977,10 @@ class TaskExecutor: return become def _get_connection(self, cvars, templar, current_connection): - ''' + """ Reads the connection property for the host, and returns the correct connection object from the list of connection plugins - ''' + """ self._play_context.connection = current_connection @@ -1053,7 +1082,7 @@ class TaskExecutor: # add extras if plugin supports them if getattr(self._connection, 'allow_extras', False): for k in variables: - if k.startswith('ansible_%s_' % self._connection._load_name) and k not in options: + if k.startswith('ansible_%s_' % self._connection.extras_prefix) and k not in options: options['_extras'][k] = templar.template(variables[k]) task_keys = self._task.dump_attrs() @@ -1096,7 +1125,7 @@ class TaskExecutor: # deals with networking sub_plugins (network_cli/httpapi/netconf) sub = getattr(self._connection, '_sub_plugin', None) - if sub is not None and sub.get('type') != 'external': + if sub and sub.get('type') != 'external': plugin_type = get_plugin_class(sub.get("obj")) varnames.extend(self._set_plugin_options(plugin_type, variables, templar, task_keys)) sub_conn = getattr(self._connection, 'ssh_type_conn', None) @@ -1106,15 +1135,15 @@ class TaskExecutor: return varnames def _get_action_handler(self, templar): - ''' + """ Returns the correct action plugin to handle the requestion task action - ''' + """ return self._get_action_handler_with_module_context(templar)[0] def _get_action_handler_with_module_context(self, templar): - ''' + """ Returns the correct action plugin to handle the requestion task action and the module context - ''' + """ module_collection, separator, module_name = self._task.action.rpartition(".") module_prefix = module_name.split('_')[0] if module_collection: @@ -1184,26 +1213,19 @@ class TaskExecutor: return handler, module +CLI_STUB_NAME = 'ansible_connection_cli_stub.py' + + def start_connection(play_context, options, task_uuid): - ''' + """ Starts the persistent connection - ''' - candidate_paths = [C.ANSIBLE_CONNECTION_PATH or os.path.dirname(sys.argv[0])] - candidate_paths.extend(os.environ.get('PATH', '').split(os.pathsep)) - for dirname in candidate_paths: - ansible_connection = os.path.join(dirname, 'ansible-connection') - if os.path.isfile(ansible_connection): - display.vvvv("Found ansible-connection at path {0}".format(ansible_connection)) - break - else: - raise AnsibleError("Unable to find location of 'ansible-connection'. " - "Please set or check the value of ANSIBLE_CONNECTION_PATH") + """ env = os.environ.copy() env.update({ # HACK; most of these paths may change during the controller's lifetime # (eg, due to late dynamic role includes, multi-playbook execution), without a way - # to invalidate/update, ansible-connection won't always see the same plugins the controller + # to invalidate/update, the persistent connection helper won't always see the same plugins the controller # can. 'ANSIBLE_BECOME_PLUGINS': become_loader.print_paths(), 'ANSIBLE_CLICONF_PLUGINS': cliconf_loader.print_paths(), @@ -1216,30 +1238,19 @@ def start_connection(play_context, options, task_uuid): verbosity = [] if display.verbosity: verbosity.append('-%s' % ('v' * display.verbosity)) - python = sys.executable - master, slave = pty.openpty() + + if not (cli_stub_path := C.config.get_config_value('_ANSIBLE_CONNECTION_PATH')): + cli_stub_path = str(pathlib.Path(scripts.__file__).parent / CLI_STUB_NAME) + p = subprocess.Popen( - [python, ansible_connection, *verbosity, to_text(os.getppid()), to_text(task_uuid)], - stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env + [sys.executable, cli_stub_path, *verbosity, to_text(os.getppid()), to_text(task_uuid)], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, ) - os.close(slave) - - # We need to set the pty into noncanonical mode. This ensures that we - # can receive lines longer than 4095 characters (plus newline) without - # truncating. - old = termios.tcgetattr(master) - new = termios.tcgetattr(master) - new[3] = new[3] & ~termios.ICANON - - try: - termios.tcsetattr(master, termios.TCSANOW, new) - write_to_file_descriptor(master, options) - write_to_file_descriptor(master, play_context.serialize()) - - (stdout, stderr) = p.communicate() - finally: - termios.tcsetattr(master, termios.TCSANOW, old) - os.close(master) + + write_to_stream(p.stdin, options) + write_to_stream(p.stdin, play_context.serialize()) + + (stdout, stderr) = p.communicate() if p.returncode == 0: result = json.loads(to_text(stdout, errors='surrogate_then_replace')) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 3bbf3d592e1..75f8a698612 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -15,9 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys @@ -114,7 +112,7 @@ class AnsibleEndPlay(Exception): class TaskQueueManager: - ''' + """ This class handles the multiprocessing requirements of Ansible by creating a pool of worker forks, a result handler fork, and a manager object with shared datastructures/queues for coordinating @@ -122,7 +120,7 @@ class TaskQueueManager: The queue manager is responsible for loading the play strategy plugin, which dispatches the Play's tasks to hosts. - ''' + """ RUN_OK = 0 RUN_ERROR = 1 @@ -178,11 +176,11 @@ class TaskQueueManager: self._workers.append(None) def load_callbacks(self): - ''' + """ Loads all available callbacks, with the exception of those which utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout', only one such callback plugin will be loaded. - ''' + """ if self._callbacks_loaded: return @@ -225,7 +223,7 @@ class TaskQueueManager: callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '') callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)) - # try to get colleciotn world name first + # try to get collection world name first cnames = getattr(callback_plugin, '_redirected_names', []) if cnames: # store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later @@ -271,13 +269,13 @@ class TaskQueueManager: self._callbacks_loaded = True def run(self, play): - ''' + """ Iterates over the roles/tasks in a play, using the given (or default) strategy for queueing tasks. The default is the linear strategy, which operates like classic Ansible by keeping all hosts in lock-step with a given task (meaning no hosts move on to the next task until all hosts are done with the current task). - ''' + """ if not self._callbacks_loaded: self.load_callbacks() diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py index 543b860ebe7..06e9af72e3c 100644 --- a/lib/ansible/executor/task_result.py +++ b/lib/ansible/executor/task_result.py @@ -2,8 +2,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible import constants as C from ansible.parsing.dataloader import DataLoader @@ -23,11 +22,11 @@ CLEAN_EXCEPTIONS = ( class TaskResult: - ''' + """ This class is responsible for interpreting the resulting data from an executed task, and provides helper methods for determining the result of a given task. - ''' + """ def __init__(self, host, task, return_data, task_fields=None): self._host = host @@ -55,7 +54,7 @@ class TaskResult: if 'results' in self._result: results = self._result['results'] # Loop tasks are only considered skipped if all items were skipped. - # some squashed results (eg, yum) are not dicts and can't be skipped individually + # some squashed results (eg, dnf) are not dicts and can't be skipped individually if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results): return True @@ -94,7 +93,7 @@ class TaskResult: return ret def _check_key(self, key): - '''get a specific key from the result or its items''' + """get a specific key from the result or its items""" if isinstance(self._result, dict) and key in self._result: return self._result.get(key, False) @@ -107,7 +106,7 @@ class TaskResult: def clean_copy(self): - ''' returns 'clean' taskresult object ''' + """ returns 'clean' taskresult object """ # FIXME: clean task_fields, _task and _host copies result = TaskResult(self._host, self._task, {}, self._task_fields) @@ -140,7 +139,7 @@ class TaskResult: elif self._result: result._result = module_response_deepcopy(self._result) - # actualy remove + # actually remove for remove_key in ignore: if remove_key in result._result: del result._result[remove_key] diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py index 26d9f143650..7b6fa569f4b 100644 --- a/lib/ansible/galaxy/__init__.py +++ b/lib/ansible/galaxy/__init__.py @@ -18,10 +18,9 @@ # along with Ansible. If not, see . # ######################################################################## -''' This manages remote shared Ansible objects, mainly roles''' +""" This manages remote shared Ansible objects, mainly roles""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os @@ -41,7 +40,7 @@ def get_collections_galaxy_meta_info(): class Galaxy(object): - ''' Keeps global galaxy info ''' + """ Keeps global galaxy info """ def __init__(self): # TODO: eventually remove this as it contains a mismash of properties that aren't really global diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py index e1cbc83a93c..6765b087b35 100644 --- a/lib/ansible/galaxy/api.py +++ b/lib/ansible/galaxy/api.py @@ -2,8 +2,7 @@ # Copyright: (c) 2019, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import collections import datetime @@ -63,8 +62,7 @@ def should_retry_error(exception): if isinstance(orig_exc, URLError): orig_exc = orig_exc.reason - # Handle common URL related errors such as TimeoutError, and BadStatusLine - # Note: socket.timeout is only required for Py3.9 + # Handle common URL related errors if isinstance(orig_exc, (TimeoutError, BadStatusLine, IncompleteRead)): return True @@ -134,6 +132,15 @@ def g_connect(versions): % (method.__name__, ", ".join(versions), ", ".join(available_versions), self.name, self.api_server)) + # Warn only when we know we are talking to a collections API + if common_versions == {'v2'}: + display.deprecated( + 'The v2 Ansible Galaxy API is deprecated and no longer supported. ' + 'Ensure that you have configured the ansible-galaxy CLI to utilize an ' + 'updated and supported version of Ansible Galaxy.', + version='2.20' + ) + return method(self, *args, **kwargs) return wrapped return decorator @@ -359,7 +366,8 @@ class GalaxyAPI: valid = False if cache_key in server_cache: expires = datetime.datetime.strptime(server_cache[cache_key]['expires'], iso_datetime_format) - valid = datetime.datetime.utcnow() < expires + expires = expires.replace(tzinfo=datetime.timezone.utc) + valid = datetime.datetime.now(datetime.timezone.utc) < expires is_paginated_url = 'page' in query or 'offset' in query if valid and not is_paginated_url: @@ -384,7 +392,7 @@ class GalaxyAPI: elif not is_paginated_url: # The cache entry had expired or does not exist, start a new blank entry to be filled later. - expires = datetime.datetime.utcnow() + expires = datetime.datetime.now(datetime.timezone.utc) expires += datetime.timedelta(days=1) server_cache[cache_key] = { 'expires': expires.strftime(iso_datetime_format), @@ -482,8 +490,6 @@ class GalaxyAPI: } if role_name: args['alternate_role_name'] = role_name - elif github_repo.startswith('ansible-role'): - args['alternate_role_name'] = github_repo[len('ansible-role') + 1:] data = self._call_galaxy(url, args=urlencode(args), method="POST") if data.get('results', None): return data['results'] @@ -713,7 +719,7 @@ class GalaxyAPI: display.display("Waiting until Galaxy import task %s has completed" % full_url) start = time.time() - wait = 2 + wait = C.GALAXY_COLLECTION_IMPORT_POLL_INTERVAL while timeout == 0 or (time.time() - start) < timeout: try: @@ -737,7 +743,7 @@ class GalaxyAPI: time.sleep(wait) # poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds. - wait = min(30, wait * 1.5) + wait = min(30, wait * C.GALAXY_COLLECTION_IMPORT_POLL_FACTOR) if state == 'waiting': raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'" % to_native(full_url)) diff --git a/lib/ansible/galaxy/collection/__init__.py b/lib/ansible/galaxy/collection/__init__.py index cf726538755..829f7aa19d2 100644 --- a/lib/ansible/galaxy/collection/__init__.py +++ b/lib/ansible/galaxy/collection/__init__.py @@ -3,12 +3,13 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Installed collections management package.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import errno import fnmatch import functools +import glob +import inspect import json import os import pathlib @@ -26,7 +27,7 @@ import typing as t from collections import namedtuple from contextlib import contextmanager -from dataclasses import dataclass, fields as dc_fields +from dataclasses import dataclass from hashlib import sha256 from io import BytesIO from importlib.metadata import distribution @@ -124,13 +125,14 @@ from ansible.galaxy.dependency_resolution.dataclasses import ( ) from ansible.galaxy.dependency_resolution.versioning import meets_requirements from ansible.plugins.loader import get_all_plugin_loaders +from ansible.module_utils.common.file import S_IRWU_RG_RO, S_IRWXU_RXG_RXO, S_IXANY +from ansible.module_utils.common.sentinel import Sentinel from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.common.collections import is_sequence from ansible.module_utils.common.yaml import yaml_dump from ansible.utils.collection_loader import AnsibleCollectionRef from ansible.utils.display import Display from ansible.utils.hashing import secure_hash, secure_hash_s -from ansible.utils.sentinel import Sentinel display = Display() @@ -152,9 +154,9 @@ class ManifestControl: # Allow a dict representing this dataclass to be splatted directly. # Requires attrs to have a default value, so anything with a default # of None is swapped for its, potentially mutable, default - for field in dc_fields(self): - if getattr(self, field.name) is None: - super().__setattr__(field.name, field.type()) + for field_name, field_type in inspect.get_annotations(type(self), eval_str=True).items(): + if getattr(self, field_name) is None: + super().__setattr__(field_name, field_type()) class CollectionSignatureError(Exception): @@ -333,11 +335,18 @@ def verify_local_collection(local_collection, remote_collection, artifacts_manag os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict')) ) + b_ignore_patterns = [ + b'*.pyc', + ] + # Find any paths not in the FILES.json for root, dirs, files in os.walk(b_collection_path): for name in files: full_path = os.path.join(root, name) path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict') + if any(fnmatch.fnmatch(full_path, b_pattern) for b_pattern in b_ignore_patterns): + display.v("Ignoring verification for %s" % full_path) + continue if full_path not in collection_files: modified_content.append( @@ -544,7 +553,7 @@ def download_collections( for fqcn, concrete_coll_pin in dep_map.copy().items(): # FIXME: move into the provider if concrete_coll_pin.is_virtual: display.display( - 'Virtual collection {coll!s} is not downloadable'. + '{coll!s} is not downloadable'. format(coll=to_text(concrete_coll_pin)), ) continue @@ -741,7 +750,7 @@ def install_collections( for fqcn, concrete_coll_pin in dependency_map.items(): if concrete_coll_pin.is_virtual: display.vvvv( - "'{coll!s}' is virtual, skipping.". + "Encountered {coll!s}, skipping.". format(coll=to_text(concrete_coll_pin)), ) continue @@ -1203,10 +1212,17 @@ def _build_files_manifest_walk(b_collection_path, namespace, name, ignore_patter manifest = _make_manifest() + def _discover_relative_base_directory(b_path: bytes, b_top_level_dir: bytes) -> bytes: + if b_path == b_top_level_dir: + return b'' + common_prefix = os.path.commonpath((b_top_level_dir, b_path)) + b_rel_base_dir = os.path.relpath(b_path, common_prefix) + return b_rel_base_dir.lstrip(os.path.sep.encode()) + def _walk(b_path, b_top_level_dir): + b_rel_base_dir = _discover_relative_base_directory(b_path, b_top_level_dir) for b_item in os.listdir(b_path): b_abs_path = os.path.join(b_path, b_item) - b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:] b_rel_path = os.path.join(b_rel_base_dir, b_item) rel_path = to_text(b_rel_path, errors='surrogate_or_strict') @@ -1303,7 +1319,7 @@ def _build_collection_tar( tar_info = tarfile.TarInfo(name) tar_info.size = len(b) tar_info.mtime = int(time.time()) - tar_info.mode = 0o0644 + tar_info.mode = S_IRWU_RG_RO tar_file.addfile(tarinfo=tar_info, fileobj=b_io) for file_info in file_manifest['files']: # type: ignore[union-attr] @@ -1317,7 +1333,7 @@ def _build_collection_tar( def reset_stat(tarinfo): if tarinfo.type != tarfile.SYMTYPE: existing_is_exec = tarinfo.mode & stat.S_IXUSR - tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644 + tarinfo.mode = S_IRWXU_RXG_RXO if existing_is_exec or tarinfo.isdir() else S_IRWU_RG_RO tarinfo.uid = tarinfo.gid = 0 tarinfo.uname = tarinfo.gname = '' @@ -1325,6 +1341,8 @@ def _build_collection_tar( if os.path.islink(b_src_path): b_link_target = os.path.realpath(b_src_path) + if not os.path.exists(b_link_target): + raise AnsibleError(f"Failed to find the target path '{to_native(b_link_target)}' for the symlink '{to_native(b_src_path)}'.") if _is_child_path(b_link_target, b_collection_path): b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path)) @@ -1357,7 +1375,7 @@ def _build_collection_dir(b_collection_path, b_collection_output, collection_man This should follow the same pattern as _build_collection_tar. """ - os.makedirs(b_collection_output, mode=0o0755) + os.makedirs(b_collection_output, mode=S_IRWXU_RXG_RXO) files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict') collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256) @@ -1369,7 +1387,7 @@ def _build_collection_dir(b_collection_path, b_collection_output, collection_man with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io: shutil.copyfileobj(b_io, file_obj) - os.chmod(b_path, 0o0644) + os.chmod(b_path, S_IRWU_RG_RO) base_directories = [] for file_info in sorted(file_manifest['files'], key=lambda x: x['name']): @@ -1380,11 +1398,11 @@ def _build_collection_dir(b_collection_path, b_collection_output, collection_man dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict')) existing_is_exec = os.stat(src_file, follow_symlinks=False).st_mode & stat.S_IXUSR - mode = 0o0755 if existing_is_exec else 0o0644 + mode = S_IRWXU_RXG_RXO if existing_is_exec else S_IRWU_RG_RO # ensure symlinks to dirs are not translated to empty dirs if os.path.isdir(src_file) and not os.path.islink(src_file): - mode = 0o0755 + mode = S_IRWXU_RXG_RXO base_directories.append(src_file) os.mkdir(dest_file, mode) else: @@ -1420,6 +1438,10 @@ def find_existing_collections(path_filter, artifacts_manager, namespace_filter=N if path_filter and not is_sequence(path_filter): path_filter = [path_filter] + if namespace_filter and not is_sequence(namespace_filter): + namespace_filter = [namespace_filter] + if collection_filter and not is_sequence(collection_filter): + collection_filter = [collection_filter] paths = set() for path in files('ansible_collections').glob('*/*/'): @@ -1441,9 +1463,9 @@ def find_existing_collections(path_filter, artifacts_manager, namespace_filter=N for path in paths: namespace = path.parent.name name = path.name - if namespace_filter and namespace != namespace_filter: + if namespace_filter and namespace not in namespace_filter: continue - if collection_filter and name != collection_filter: + if collection_filter and name not in collection_filter: continue if dedupe: @@ -1504,6 +1526,7 @@ def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses? artifacts_manager.required_successful_signature_count, artifacts_manager.ignore_signature_errors, ) + remove_source_metadata(collection, b_collection_path) if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)): write_source_metadata( collection, @@ -1529,10 +1552,10 @@ def write_source_metadata(collection, b_collection_path, artifacts_manager): shutil.rmtree(b_info_dir) try: - os.mkdir(b_info_dir, mode=0o0755) + os.mkdir(b_info_dir, mode=S_IRWXU_RXG_RXO) with open(b_info_dest, mode='w+b') as fd: fd.write(b_yaml_source_data) - os.chmod(b_info_dest, 0o0644) + os.chmod(b_info_dest, S_IRWU_RG_RO) except Exception: # Ensure we don't leave the dir behind in case of a failure. if os.path.isdir(b_info_dir): @@ -1540,6 +1563,22 @@ def write_source_metadata(collection, b_collection_path, artifacts_manager): raise +def remove_source_metadata(collection, b_collection_path): + pattern = f"{collection.namespace}.{collection.name}-*.info" + info_path = os.path.join( + b_collection_path, + b'../../', + to_bytes(pattern, errors='surrogate_or_strict') + ) + if (outdated_info := glob.glob(info_path)): + display.vvvv(f"Removing {pattern} metadata from previous installations") + for info_dir in outdated_info: + try: + shutil.rmtree(info_dir) + except Exception: + pass + + def verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors): # type: (str, list[str], str, str, list[str]) -> None failed_verify = False @@ -1563,13 +1602,6 @@ def install_artifact(b_coll_targz_path, b_collection_path, b_temp_path, signatur """ try: with tarfile.open(b_coll_targz_path, mode='r') as collection_tar: - # Remove this once py3.11 is our controller minimum - # Workaround for https://bugs.python.org/issue47231 - # See _extract_tar_dir - collection_tar._ansible_normalized_cache = { - m.name.removesuffix(os.path.sep): m for m in collection_tar.getmembers() - } # deprecated: description='TarFile member index' core_version='2.18' python_version='3.11' - # Verify the signature on the MANIFEST.json before extracting anything else _extract_tar_file(collection_tar, MANIFEST_FILENAME, b_collection_path, b_temp_path) @@ -1650,10 +1682,10 @@ def install_src(collection, b_collection_path, b_collection_output_path, artifac def _extract_tar_dir(tar, dirname, b_dest): """ Extracts a directory from a collection tar. """ - dirname = to_native(dirname, errors='surrogate_or_strict').removesuffix(os.path.sep) + dirname = to_native(dirname, errors='surrogate_or_strict') try: - tar_member = tar._ansible_normalized_cache[dirname] + tar_member = tar.getmember(dirname) except KeyError: raise AnsibleError("Unable to extract '%s' from collection" % dirname) @@ -1661,7 +1693,7 @@ def _extract_tar_dir(tar, dirname, b_dest): b_parent_path = os.path.dirname(b_dir_path) try: - os.makedirs(b_parent_path, mode=0o0755) + os.makedirs(b_parent_path, mode=S_IRWXU_RXG_RXO) except OSError as e: if e.errno != errno.EEXIST: raise @@ -1676,7 +1708,7 @@ def _extract_tar_dir(tar, dirname, b_dest): else: if not os.path.isdir(b_dir_path): - os.mkdir(b_dir_path, 0o0755) + os.mkdir(b_dir_path, S_IRWXU_RXG_RXO) def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None): @@ -1702,7 +1734,7 @@ def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None): if not os.path.exists(b_parent_dir): # Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check # makes sure we create the parent directory even if it wasn't set in the metadata. - os.makedirs(b_parent_dir, mode=0o0755) + os.makedirs(b_parent_dir, mode=S_IRWXU_RXG_RXO) if tar_member.type == tarfile.SYMTYPE: b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict') @@ -1717,9 +1749,9 @@ def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None): # Default to rw-r--r-- and only add execute if the tar file has execute. tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict')) - new_mode = 0o644 + new_mode = S_IRWU_RG_RO if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR: - new_mode |= 0o0111 + new_mode |= S_IXANY os.chmod(b_dest_filepath, new_mode) @@ -1811,15 +1843,15 @@ def _resolve_depenency_map( elif not req.specifier.contains(RESOLVELIB_VERSION.vstring): raise AnsibleError(f"ansible-galaxy requires {req.name}{req.specifier}") - if allow_pre_release: - pre_release_hint = '' - else: - pre_release_hint = 'Hint: Pre-releases are not installed by default unless the specific version is given. To enable pre-releases, use --pre.' + pre_release_hint = '' if allow_pre_release else ( + 'Hint: Pre-releases hosted on Galaxy or Automation Hub are not ' + 'installed by default unless a specific version is requested. ' + 'To enable pre-releases globally, use --pre.' + ) collection_dep_resolver = build_collection_dependency_resolver( galaxy_apis=galaxy_apis, concrete_artifacts_manager=concrete_artifacts_manager, - user_requirements=requested_requirements, preferred_candidates=preferred_candidates, with_deps=not no_deps, with_pre_releases=allow_pre_release, @@ -1855,8 +1887,7 @@ def _resolve_depenency_map( raise AnsibleError('\n'.join(error_msg_lines)) from dep_exc except CollectionDependencyInconsistentCandidate as dep_exc: parents = [ - "%s.%s:%s" % (p.namespace, p.name, p.ver) - for p in dep_exc.criterion.iter_parent() + str(p) for p in dep_exc.criterion.iter_parent() if p is not None ] @@ -1876,7 +1907,7 @@ def _resolve_depenency_map( for req in dep_exc.criterion.iter_requirement(): error_msg_lines.append( - '* {req.fqcn!s}:{req.ver!s}'.format(req=req) + f'* {req.fqcn!s}:{req.ver!s}' ) error_msg_lines.append(pre_release_hint) diff --git a/lib/ansible/galaxy/collection/concrete_artifact_manager.py b/lib/ansible/galaxy/collection/concrete_artifact_manager.py index fe1a81b15aa..fb807766f5c 100644 --- a/lib/ansible/galaxy/collection/concrete_artifact_manager.py +++ b/lib/ansible/galaxy/collection/concrete_artifact_manager.py @@ -3,14 +3,14 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Concrete collection candidate management helper module.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import os import tarfile import subprocess import typing as t +import yaml from contextlib import contextmanager from hashlib import sha256 @@ -25,6 +25,7 @@ if t.TYPE_CHECKING: ) from ansible.galaxy.token import GalaxyToken +from ansible import context from ansible.errors import AnsibleError from ansible.galaxy import get_collections_galaxy_meta_info from ansible.galaxy.api import should_retry_error @@ -34,12 +35,12 @@ from ansible.module_utils.common.text.converters import to_bytes, to_native, to_ from ansible.module_utils.api import retry_with_delays_and_condition from ansible.module_utils.api import generate_jittered_backoff from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.common.sentinel import Sentinel from ansible.module_utils.common.yaml import yaml_load from ansible.module_utils.urls import open_url from ansible.utils.display import Display -from ansible.utils.sentinel import Sentinel -import yaml +import ansible.constants as C display = Display() @@ -62,7 +63,7 @@ class ConcreteArtifactsManager: """ def __init__(self, b_working_directory, validate_certs=True, keyring=None, timeout=60, required_signature_count=None, ignore_signature_errors=None): # type: (bytes, bool, str, int, str, list[str]) -> None - """Initialize ConcreteArtifactsManager caches and costraints.""" + """Initialize ConcreteArtifactsManager caches and constraints.""" self._validate_certs = validate_certs # type: bool self._artifact_cache = {} # type: dict[bytes, bytes] self._galaxy_artifact_cache = {} # type: dict[Candidate | Requirement, bytes] @@ -140,7 +141,7 @@ class ConcreteArtifactsManager: url, sha256_hash, token = self._galaxy_collection_cache[collection] except KeyError as key_err: raise RuntimeError( - 'The is no known source for {coll!s}'. + 'There is no known source for {coll!s}'. format(coll=collection), ) from key_err @@ -414,7 +415,7 @@ def _extract_collection_from_git(repo_url, coll_ver, b_path): b_checkout_path = mkdtemp( dir=b_path, prefix=to_bytes(name, errors='surrogate_or_strict'), - ) # type: bytes + ) try: git_executable = get_bin_path('git') @@ -426,11 +427,14 @@ def _extract_collection_from_git(repo_url, coll_ver, b_path): # Perform a shallow clone if simply cloning HEAD if version == 'HEAD': - git_clone_cmd = git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path) + git_clone_cmd = [git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path)] else: - git_clone_cmd = git_executable, 'clone', git_url, to_text(b_checkout_path) + git_clone_cmd = [git_executable, 'clone', git_url, to_text(b_checkout_path)] # FIXME: '--branch', version + if context.CLIARGS['ignore_certs'] or C.GALAXY_IGNORE_CERTS: + git_clone_cmd.extend(['-c', 'http.sslVerify=false']) + try: subprocess.check_call(git_clone_cmd) except subprocess.CalledProcessError as proc_err: @@ -702,6 +706,11 @@ def _get_meta_from_installed_dir( def _get_meta_from_tar( b_path, # type: bytes ): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]] + if not os.path.exists(b_path): + raise AnsibleError( + f"Unable to find collection artifact file at '{to_native(b_path)}'." + ) + if not tarfile.is_tarfile(b_path): raise AnsibleError( "Collection artifact at '{path!s}' is not a valid tar file.". diff --git a/lib/ansible/galaxy/collection/galaxy_api_proxy.py b/lib/ansible/galaxy/collection/galaxy_api_proxy.py index 64d545f7f05..046354a395d 100644 --- a/lib/ansible/galaxy/collection/galaxy_api_proxy.py +++ b/lib/ansible/galaxy/collection/galaxy_api_proxy.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """A facade for interfacing with multiple Galaxy instances.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import typing as t @@ -28,8 +27,7 @@ display = Display() class MultiGalaxyAPIProxy: """A proxy that abstracts talking to multiple Galaxy instances.""" - def __init__(self, apis, concrete_artifacts_manager, offline=False): - # type: (t.Iterable[GalaxyAPI], ConcreteArtifactsManager, bool) -> None + def __init__(self, apis: t.Iterable[GalaxyAPI], concrete_artifacts_manager: ConcreteArtifactsManager, offline: bool = False) -> None: """Initialize the target APIs list.""" self._apis = apis self._concrete_art_mgr = concrete_artifacts_manager @@ -39,22 +37,21 @@ class MultiGalaxyAPIProxy: def is_offline_mode_requested(self): return self._offline - def _assert_that_offline_mode_is_not_requested(self): # type: () -> None + def _assert_that_offline_mode_is_not_requested(self) -> None: if self.is_offline_mode_requested: raise NotImplementedError("The calling code is not supposed to be invoked in 'offline' mode.") - def _get_collection_versions(self, requirement): - # type: (Requirement) -> t.Iterator[tuple[GalaxyAPI, str]] + def _get_collection_versions(self, requirement: Requirement) -> t.Iterator[tuple[GalaxyAPI, str]]: """Helper for get_collection_versions. Yield api, version pairs for all APIs, and reraise the last error if no valid API was found. """ if self._offline: - return [] + return found_api = False - last_error = None # type: Exception | None + last_error: Exception | None = None api_lookup_order = ( (requirement.src, ) @@ -87,8 +84,7 @@ class MultiGalaxyAPIProxy: if not found_api and last_error is not None: raise last_error - def get_collection_versions(self, requirement): - # type: (Requirement) -> t.Iterable[tuple[str, GalaxyAPI]] + def get_collection_versions(self, requirement: Requirement) -> t.Iterable[tuple[str, GalaxyAPI]]: """Get a set of unique versions for FQCN on Galaxy servers.""" if requirement.is_concrete_artifact: return { @@ -111,8 +107,7 @@ class MultiGalaxyAPIProxy: ) ) - def get_collection_version_metadata(self, collection_candidate): - # type: (Candidate) -> CollectionVersionMetadata + def get_collection_version_metadata(self, collection_candidate: Candidate) -> CollectionVersionMetadata: """Retrieve collection metadata of a given candidate.""" self._assert_that_offline_mode_is_not_requested() @@ -161,8 +156,7 @@ class MultiGalaxyAPIProxy: raise last_err - def get_collection_dependencies(self, collection_candidate): - # type: (Candidate) -> dict[str, str] + def get_collection_dependencies(self, collection_candidate: Candidate) -> dict[str, str]: # FIXME: return Requirement instances instead? """Retrieve collection dependencies of a given candidate.""" if collection_candidate.is_concrete_artifact: @@ -178,13 +172,12 @@ class MultiGalaxyAPIProxy: dependencies ) - def get_signatures(self, collection_candidate): - # type: (Candidate) -> list[str] + def get_signatures(self, collection_candidate: Candidate) -> list[str]: self._assert_that_offline_mode_is_not_requested() namespace = collection_candidate.namespace name = collection_candidate.name version = collection_candidate.ver - last_err = None # type: Exception | None + last_err: Exception | None = None api_lookup_order = ( (collection_candidate.src, ) diff --git a/lib/ansible/galaxy/collection/gpg.py b/lib/ansible/galaxy/collection/gpg.py index 8641f0d7f72..9d41cdcde8c 100644 --- a/lib/ansible/galaxy/collection/gpg.py +++ b/lib/ansible/galaxy/collection/gpg.py @@ -2,28 +2,24 @@ # Copyright: (c) 2022, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Signature verification helpers.""" +from __future__ import annotations from ansible.errors import AnsibleError from ansible.galaxy.user_agent import user_agent from ansible.module_utils.urls import open_url import contextlib +import inspect import os import subprocess -import sys import typing as t from dataclasses import dataclass, fields as dc_fields -from functools import partial from urllib.error import HTTPError, URLError if t.TYPE_CHECKING: from ansible.utils.display import Display -IS_PY310_PLUS = sys.version_info[:2] >= (3, 10) - -frozen_dataclass = partial(dataclass, frozen=True, **({'slots': True} if IS_PY310_PLUS else {})) - def get_signature_from_source(source, display=None): # type: (str, t.Optional[Display]) -> str if display is not None: @@ -126,7 +122,7 @@ def parse_gpg_errors(status_out): # type: (str) -> t.Iterator[GpgBaseError] yield cls(*fields) -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgBaseError(Exception): status: str @@ -136,39 +132,39 @@ class GpgBaseError(Exception): return ' '.join(cls.__doc__.split()) def __post_init__(self): - for field in dc_fields(self): - super(GpgBaseError, self).__setattr__(field.name, field.type(getattr(self, field.name))) + for field_name, field_type in inspect.get_annotations(type(self), eval_str=True).items(): + super(GpgBaseError, self).__setattr__(field_name, field_type(getattr(self, field_name))) -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgExpSig(GpgBaseError): """The signature with the keyid is good, but the signature is expired.""" keyid: str username: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgExpKeySig(GpgBaseError): """The signature with the keyid is good, but the signature was made by an expired key.""" keyid: str username: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgRevKeySig(GpgBaseError): """The signature with the keyid is good, but the signature was made by a revoked key.""" keyid: str username: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgBadSig(GpgBaseError): """The signature with the keyid has not been verified okay.""" keyid: str username: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgErrSig(GpgBaseError): """"It was not possible to check the signature. This may be caused by a missing public key or an unsupported algorithm. A RC of 4 @@ -184,24 +180,24 @@ class GpgErrSig(GpgBaseError): fpr: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgNoPubkey(GpgBaseError): """The public key is not available.""" keyid: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgMissingPassPhrase(GpgBaseError): """No passphrase was supplied.""" -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgBadPassphrase(GpgBaseError): """The supplied passphrase was wrong or not given.""" keyid: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgNoData(GpgBaseError): """No data has been found. Codes for WHAT are: - 1 :: No armored data. @@ -213,7 +209,7 @@ class GpgNoData(GpgBaseError): what: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgUnexpected(GpgBaseError): """No data has been found. Codes for WHAT are: - 1 :: No armored data. @@ -225,7 +221,7 @@ class GpgUnexpected(GpgBaseError): what: str -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgError(GpgBaseError): """This is a generic error status message, it might be followed by error location specific data.""" location: str @@ -233,30 +229,30 @@ class GpgError(GpgBaseError): more: str = "" -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgFailure(GpgBaseError): """This is the counterpart to SUCCESS and used to indicate a program failure.""" location: str code: int -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgBadArmor(GpgBaseError): """The ASCII armor is corrupted.""" -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgKeyExpired(GpgBaseError): """The key has expired.""" timestamp: int -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgKeyRevoked(GpgBaseError): """The used key has been revoked by its owner.""" -@frozen_dataclass +@dataclass(frozen=True, slots=True) class GpgNoSecKey(GpgBaseError): """The secret key is not available.""" keyid: str diff --git a/lib/ansible/galaxy/data/COPYING b/lib/ansible/galaxy/data/COPYING new file mode 100644 index 00000000000..87a9639c92f --- /dev/null +++ b/lib/ansible/galaxy/data/COPYING @@ -0,0 +1,7 @@ +All templates, files and files generated from them in the subdirectories of this one +are subject to the MIT license when applicable. + +MIT License: +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/lib/ansible/galaxy/data/apb/Dockerfile.j2 b/lib/ansible/galaxy/data/apb/Dockerfile.j2 index 4d99a8b0c37..f39abc3bd89 100644 --- a/lib/ansible/galaxy/data/apb/Dockerfile.j2 +++ b/lib/ansible/galaxy/data/apb/Dockerfile.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 FROM ansibleplaybookbundle/apb-base LABEL "com.redhat.apb.spec"=\ diff --git a/lib/ansible/galaxy/data/apb/Makefile.j2 b/lib/ansible/galaxy/data/apb/Makefile.j2 index ebeaa61f168..9278d246094 100644 --- a/lib/ansible/galaxy/data/apb/Makefile.j2 +++ b/lib/ansible/galaxy/data/apb/Makefile.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 DOCKERHOST = DOCKERHOST DOCKERORG = DOCKERORG IMAGENAME = {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/README.md b/lib/ansible/galaxy/data/apb/README.md index 2e350a03fde..0f51845fbd3 100644 --- a/lib/ansible/galaxy/data/apb/README.md +++ b/lib/ansible/galaxy/data/apb/README.md @@ -6,17 +6,21 @@ A brief description of the APB goes here. Requirements ------------ -Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. +For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. APB Variables -------------- -A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well. +A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and +any variables that can/should be set via parameters to the role. +Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well. Dependencies ------------ -A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. +A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to +parameters that may need to be set for other roles, or variables that are used from other roles. Example Playbook ---------------- diff --git a/lib/ansible/galaxy/data/apb/apb.yml.j2 b/lib/ansible/galaxy/data/apb/apb.yml.j2 index f96880196fe..e9405dcc359 100644 --- a/lib/ansible/galaxy/data/apb/apb.yml.j2 +++ b/lib/ansible/galaxy/data/apb/apb.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 version: '1.0.0' name: {{ role_name }} description: {{ description }} diff --git a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 index 3818e64c335..8842d94e345 100644 --- a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 +++ b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # defaults file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 index 3f4c49674d4..89371a09bab 100644 --- a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 +++ b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # handlers file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/meta/main.yml.j2 b/lib/ansible/galaxy/data/apb/meta/main.yml.j2 index 862f8ef8b4c..23f870c4c50 100644 --- a/lib/ansible/galaxy/data/apb/meta/main.yml.j2 +++ b/lib/ansible/galaxy/data/apb/meta/main.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 galaxy_info: author: {{ author }} description: {{ description }} @@ -16,21 +17,6 @@ galaxy_info: # - CC-BY-4.0 license: {{ license }} - # - # platforms is a list of platforms, and each platform has a name and a list of versions. - # - # platforms: - # - name: Fedora - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - galaxy_tags: - apb # List tags for your role here, one per line. A tag is a keyword that describes diff --git a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 index 19527310a59..0a863784990 100644 --- a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 +++ b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 - name: "{{ role_name }} playbook to deprovision the application" hosts: localhost gather_facts: false diff --git a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 index 7b08605ec58..f0691e2b875 100644 --- a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 +++ b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 - name: "{{ role_name }} playbook to provision the application" hosts: localhost gather_facts: false diff --git a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 index a9880650590..1bba65a7566 100644 --- a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 +++ b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # tasks file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/apb/tests/ansible.cfg b/lib/ansible/galaxy/data/apb/tests/ansible.cfg index 2f74f1b2722..e2b73526706 100644 --- a/lib/ansible/galaxy/data/apb/tests/ansible.cfg +++ b/lib/ansible/galaxy/data/apb/tests/ansible.cfg @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 [defaults] inventory=./inventory diff --git a/lib/ansible/galaxy/data/apb/tests/inventory b/lib/ansible/galaxy/data/apb/tests/inventory index ea69cbf1225..a24f8243f1f 100644 --- a/lib/ansible/galaxy/data/apb/tests/inventory +++ b/lib/ansible/galaxy/data/apb/tests/inventory @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 localhost diff --git a/lib/ansible/galaxy/data/apb/tests/test.yml.j2 b/lib/ansible/galaxy/data/apb/tests/test.yml.j2 index fb14f85c97b..1b03869978c 100644 --- a/lib/ansible/galaxy/data/apb/tests/test.yml.j2 +++ b/lib/ansible/galaxy/data/apb/tests/test.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 --- - hosts: localhost gather_facts: no diff --git a/lib/ansible/galaxy/data/apb/vars/main.yml.j2 b/lib/ansible/galaxy/data/apb/vars/main.yml.j2 index 092d511a1e6..8fc2f46c5e0 100644 --- a/lib/ansible/galaxy/data/apb/vars/main.yml.j2 +++ b/lib/ansible/galaxy/data/apb/vars/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # vars file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/collections_galaxy_meta.yml b/lib/ansible/galaxy/data/collections_galaxy_meta.yml index 5c4472cda1a..f47f1a7efff 100644 --- a/lib/ansible/galaxy/data/collections_galaxy_meta.yml +++ b/lib/ansible/galaxy/data/collections_galaxy_meta.yml @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # Copyright (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) diff --git a/lib/ansible/galaxy/data/container/defaults/main.yml.j2 b/lib/ansible/galaxy/data/container/defaults/main.yml.j2 index 3818e64c335..8842d94e345 100644 --- a/lib/ansible/galaxy/data/container/defaults/main.yml.j2 +++ b/lib/ansible/galaxy/data/container/defaults/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # defaults file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/container/handlers/main.yml.j2 b/lib/ansible/galaxy/data/container/handlers/main.yml.j2 index 3f4c49674d4..89371a09bab 100644 --- a/lib/ansible/galaxy/data/container/handlers/main.yml.j2 +++ b/lib/ansible/galaxy/data/container/handlers/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # handlers file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/container/meta/container.yml.j2 b/lib/ansible/galaxy/data/container/meta/container.yml.j2 index f033d34110e..97b39617192 100644 --- a/lib/ansible/galaxy/data/container/meta/container.yml.j2 +++ b/lib/ansible/galaxy/data/container/meta/container.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # Add your Ansible Container service definitions here. # For example: # diff --git a/lib/ansible/galaxy/data/container/meta/main.yml.j2 b/lib/ansible/galaxy/data/container/meta/main.yml.j2 index 72fc9a22e8a..d3fe1495a25 100644 --- a/lib/ansible/galaxy/data/container/meta/main.yml.j2 +++ b/lib/ansible/galaxy/data/container/meta/main.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 galaxy_info: author: {{ author }} description: {{ description }} @@ -21,24 +22,6 @@ galaxy_info: # If Ansible is required outside of the build container, provide the minimum version: # min_ansible_version: - # - # Provide a list of supported platforms, and for each platform a list of versions. - # If you don't wish to enumerate all versions for a particular platform, use 'all'. - # To view available platforms and versions (or releases), visit: - # https://galaxy.ansible.com/api/v1/platforms/ - # - # platforms: - # - name: Fedora - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - galaxy_tags: - container # List tags for your role here, one per line. A tag is a keyword that describes diff --git a/lib/ansible/galaxy/data/container/tasks/main.yml.j2 b/lib/ansible/galaxy/data/container/tasks/main.yml.j2 index a9880650590..1bba65a7566 100644 --- a/lib/ansible/galaxy/data/container/tasks/main.yml.j2 +++ b/lib/ansible/galaxy/data/container/tasks/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # tasks file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/container/tests/ansible.cfg b/lib/ansible/galaxy/data/container/tests/ansible.cfg index 2f74f1b2722..e2b73526706 100644 --- a/lib/ansible/galaxy/data/container/tests/ansible.cfg +++ b/lib/ansible/galaxy/data/container/tests/ansible.cfg @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 [defaults] inventory=./inventory diff --git a/lib/ansible/galaxy/data/container/tests/inventory b/lib/ansible/galaxy/data/container/tests/inventory index ea69cbf1225..a24f8243f1f 100644 --- a/lib/ansible/galaxy/data/container/tests/inventory +++ b/lib/ansible/galaxy/data/container/tests/inventory @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 localhost diff --git a/lib/ansible/galaxy/data/container/tests/test.yml.j2 b/lib/ansible/galaxy/data/container/tests/test.yml.j2 index fb14f85c97b..1b03869978c 100644 --- a/lib/ansible/galaxy/data/container/tests/test.yml.j2 +++ b/lib/ansible/galaxy/data/container/tests/test.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 --- - hosts: localhost gather_facts: no diff --git a/lib/ansible/galaxy/data/container/vars/main.yml.j2 b/lib/ansible/galaxy/data/container/vars/main.yml.j2 index 092d511a1e6..8fc2f46c5e0 100644 --- a/lib/ansible/galaxy/data/container/vars/main.yml.j2 +++ b/lib/ansible/galaxy/data/container/vars/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # vars file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/collection/README.md.j2 b/lib/ansible/galaxy/data/default/collection/README.md.j2 index 5e5162206ec..ff8d7a3e8b1 100644 --- a/lib/ansible/galaxy/data/default/collection/README.md.j2 +++ b/lib/ansible/galaxy/data/default/collection/README.md.j2 @@ -1,3 +1,4 @@ +{# SPDX-License-Identifier: MIT-0 #} # Ansible Collection - {{ namespace }}.{{ collection_name }} Documentation for the collection. diff --git a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 index 7821491b257..842bdb10ce1 100644 --- a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 +++ b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 ### REQUIRED {% for option in required_config %} {{ option.description | comment_ify }} diff --git a/lib/ansible/galaxy/data/default/collection/meta/runtime.yml b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml index 20f709edff5..936cae9f714 100644 --- a/lib/ansible/galaxy/data/default/collection/meta/runtime.yml +++ b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 --- # Collections must specify a minimum required ansible version to upload # to galaxy diff --git a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 index 7c006cfa76f..795e371cd60 100644 --- a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 +++ b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 @@ -1,3 +1,4 @@ +{# SPDX-License-Identifier: MIT-0 #} # Collections Plugins Directory This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that diff --git a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 index 3818e64c335..8842d94e345 100644 --- a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 +++ b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # defaults file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 index 3f4c49674d4..89371a09bab 100644 --- a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 +++ b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # handlers file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 index 4891a68b490..b23f47cc5bc 100644 --- a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 +++ b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 galaxy_info: author: {{ author }} description: {{ description }} @@ -21,24 +22,6 @@ galaxy_info: # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: - # - # Provide a list of supported platforms, and for each platform a list of versions. - # If you don't wish to enumerate all versions for a particular platform, use 'all'. - # To view available platforms and versions (or releases), visit: - # https://galaxy.ansible.com/api/v1/platforms/ - # - # platforms: - # - name: Fedora - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - galaxy_tags: [] # List tags for your role here, one per line. A tag is a keyword that describes # and categorizes the role. Users find roles by searching for tags. Be sure to diff --git a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 index a9880650590..1bba65a7566 100644 --- a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 +++ b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # tasks file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/default/role/tests/inventory b/lib/ansible/galaxy/data/default/role/tests/inventory index 878877b0776..03ca42fd173 100644 --- a/lib/ansible/galaxy/data/default/role/tests/inventory +++ b/lib/ansible/galaxy/data/default/role/tests/inventory @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 localhost diff --git a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 index 0c40f95a697..bf4f028593e 100644 --- a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 +++ b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 --- - hosts: localhost remote_user: root diff --git a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 index 092d511a1e6..8fc2f46c5e0 100644 --- a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 +++ b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # vars file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 index 02f234acb23..51e41111117 100644 --- a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 +++ b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # # (c) 2018 Red Hat Inc. # @@ -16,9 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations from ansible.errors import AnsibleError -__metaclass__ = type try: from ansible.plugins.cliconf import CliconfBase diff --git a/lib/ansible/galaxy/data/network/defaults/main.yml.j2 b/lib/ansible/galaxy/data/network/defaults/main.yml.j2 index 3818e64c335..8842d94e345 100644 --- a/lib/ansible/galaxy/data/network/defaults/main.yml.j2 +++ b/lib/ansible/galaxy/data/network/defaults/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # defaults file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/network/library/example_command.py.j2 b/lib/ansible/galaxy/data/network/library/example_command.py.j2 index 0f3dac2d986..9aa6ef62d4d 100644 --- a/lib/ansible/galaxy/data/network/library/example_command.py.j2 +++ b/lib/ansible/galaxy/data/network/library/example_command.py.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # # (c) 2018 Red Hat Inc. # @@ -16,9 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations from ansible.errors import AnsibleError -__metaclass__ = type ### Documentation diff --git a/lib/ansible/galaxy/data/network/library/example_config.py.j2 b/lib/ansible/galaxy/data/network/library/example_config.py.j2 index 2c2c72be9c0..2913af08a0c 100644 --- a/lib/ansible/galaxy/data/network/library/example_config.py.j2 +++ b/lib/ansible/galaxy/data/network/library/example_config.py.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # # (c) 2018 Red Hat Inc. # @@ -16,9 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations from ansible.errors import AnsibleError -__metaclass__ = type ### Documentation diff --git a/lib/ansible/galaxy/data/network/library/example_facts.py.j2 b/lib/ansible/galaxy/data/network/library/example_facts.py.j2 index 9f7608c39a1..f90f456eab0 100644 --- a/lib/ansible/galaxy/data/network/library/example_facts.py.j2 +++ b/lib/ansible/galaxy/data/network/library/example_facts.py.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # # (c) 2018 Red Hat Inc. # @@ -16,9 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations from ansible.errors import AnsibleError -__metaclass__ = type ### Documentation diff --git a/lib/ansible/galaxy/data/network/meta/main.yml.j2 b/lib/ansible/galaxy/data/network/meta/main.yml.j2 index d0184ae8cea..0cd67263113 100644 --- a/lib/ansible/galaxy/data/network/meta/main.yml.j2 +++ b/lib/ansible/galaxy/data/network/meta/main.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 galaxy_info: author: {{ author }} description: {{ description }} @@ -21,21 +22,6 @@ galaxy_info: # If this a Container Enabled role, provide the minimum Ansible Container version. # min_ansible_container_version: - # - # platforms is a list of platforms, and each platform has a name and a list of versions. - # - # platforms: - # - name: VYOS - # versions: - # - all - # - 25 - # - name: SomePlatform - # versions: - # - all - # - 1.0 - # - 7 - # - 99.99 - galaxy_tags: [] # List tags for your role here, one per line. A tag is a keyword that describes # and categorizes the role. Users find roles by searching for tags. Be sure to diff --git a/lib/ansible/galaxy/data/network/module_utils/example.py.j2 b/lib/ansible/galaxy/data/network/module_utils/example.py.j2 index 9bf2d3f61ea..a3d9aeac236 100644 --- a/lib/ansible/galaxy/data/network/module_utils/example.py.j2 +++ b/lib/ansible/galaxy/data/network/module_utils/example.py.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # # (c) 2018 Red Hat Inc. # @@ -16,9 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations from ansible.errors import AnsibleError -__metaclass__ = type ### Imports try: diff --git a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 index e3a1ce6160f..fb9ddfad86c 100644 --- a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 +++ b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # # (c) 2018 Red Hat Inc. # @@ -16,9 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations from ansible.errors import AnsibleError -__metaclass__ = type try: from ansible.plugins.terminal import NetconfBase diff --git a/lib/ansible/galaxy/data/network/tasks/main.yml.j2 b/lib/ansible/galaxy/data/network/tasks/main.yml.j2 index a9880650590..1bba65a7566 100644 --- a/lib/ansible/galaxy/data/network/tasks/main.yml.j2 +++ b/lib/ansible/galaxy/data/network/tasks/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # tasks file for {{ role_name }} diff --git a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 index 621a140c59b..d3562d15136 100644 --- a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 +++ b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 # # (c) 2018 Red Hat Inc. # @@ -16,9 +17,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations from ansible.errors import AnsibleError -__metaclass__ = type try: from ansible.plugins.terminal import TerminalBase diff --git a/lib/ansible/galaxy/data/network/tests/inventory b/lib/ansible/galaxy/data/network/tests/inventory index 878877b0776..03ca42fd173 100644 --- a/lib/ansible/galaxy/data/network/tests/inventory +++ b/lib/ansible/galaxy/data/network/tests/inventory @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 localhost diff --git a/lib/ansible/galaxy/data/network/tests/test.yml.j2 b/lib/ansible/galaxy/data/network/tests/test.yml.j2 index 11284eb5b8e..93263043bf1 100644 --- a/lib/ansible/galaxy/data/network/tests/test.yml.j2 +++ b/lib/ansible/galaxy/data/network/tests/test.yml.j2 @@ -1,3 +1,4 @@ +#SPDX-License-Identifier: MIT-0 --- - hosts: localhost connection: network_cli diff --git a/lib/ansible/galaxy/data/network/vars/main.yml.j2 b/lib/ansible/galaxy/data/network/vars/main.yml.j2 index 092d511a1e6..8fc2f46c5e0 100644 --- a/lib/ansible/galaxy/data/network/vars/main.yml.j2 +++ b/lib/ansible/galaxy/data/network/vars/main.yml.j2 @@ -1,2 +1,3 @@ +#SPDX-License-Identifier: MIT-0 --- # vars file for {{ role_name }} diff --git a/lib/ansible/galaxy/dependency_resolution/__init__.py b/lib/ansible/galaxy/dependency_resolution/__init__.py index cfde7df0f98..2e8ef147723 100644 --- a/lib/ansible/galaxy/dependency_resolution/__init__.py +++ b/lib/ansible/galaxy/dependency_resolution/__init__.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Dependency resolution machinery.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import typing as t @@ -13,10 +12,7 @@ if t.TYPE_CHECKING: from ansible.galaxy.collection.concrete_artifact_manager import ( ConcreteArtifactsManager, ) - from ansible.galaxy.dependency_resolution.dataclasses import ( - Candidate, - Requirement, - ) + from ansible.galaxy.dependency_resolution.dataclasses import Candidate from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy from ansible.galaxy.dependency_resolution.providers import CollectionDependencyProvider @@ -27,7 +23,6 @@ from ansible.galaxy.dependency_resolution.resolvers import CollectionDependencyR def build_collection_dependency_resolver( galaxy_apis, # type: t.Iterable[GalaxyAPI] concrete_artifacts_manager, # type: ConcreteArtifactsManager - user_requirements, # type: t.Iterable[Requirement] preferred_candidates=None, # type: t.Iterable[Candidate] with_deps=True, # type: bool with_pre_releases=False, # type: bool @@ -44,7 +39,6 @@ def build_collection_dependency_resolver( CollectionDependencyProvider( apis=MultiGalaxyAPIProxy(galaxy_apis, concrete_artifacts_manager, offline=offline), concrete_artifacts_manager=concrete_artifacts_manager, - user_requirements=user_requirements, preferred_candidates=preferred_candidates, with_deps=with_deps, with_pre_releases=with_pre_releases, diff --git a/lib/ansible/galaxy/dependency_resolution/dataclasses.py b/lib/ansible/galaxy/dependency_resolution/dataclasses.py index 1ca98610b99..ea4c875adb4 100644 --- a/lib/ansible/galaxy/dependency_resolution/dataclasses.py +++ b/lib/ansible/galaxy/dependency_resolution/dataclasses.py @@ -4,8 +4,7 @@ """Dependency structs.""" # FIXME: add caching all over the place -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import typing as t @@ -463,8 +462,8 @@ class _ComputedReqKindsMixin: def __unicode__(self): if self.fqcn is None: return ( - u'"virtual collection Git repo"' if self.is_scm - else u'"virtual collection namespace"' + f'{self.type} collection from a Git repo' if self.is_scm + else f'{self.type} collection from a namespace' ) return ( @@ -504,14 +503,14 @@ class _ComputedReqKindsMixin: @property def namespace(self): if self.is_virtual: - raise TypeError('Virtual collections do not have a namespace') + raise TypeError(f'{self.type} collections do not have a namespace') return self._get_separate_ns_n_name()[0] @property def name(self): if self.is_virtual: - raise TypeError('Virtual collections do not have a name') + raise TypeError(f'{self.type} collections do not have a name') return self._get_separate_ns_n_name()[-1] @@ -564,6 +563,27 @@ class _ComputedReqKindsMixin: def is_online_index_pointer(self): return not self.is_concrete_artifact + @property + def is_pinned(self): + """Indicate if the version set is considered pinned. + + This essentially computes whether the version field of the current + requirement explicitly requests a specific version and not an allowed + version range. + + It is then used to help the resolvelib-based dependency resolver judge + whether it's acceptable to consider a pre-release candidate version + despite pre-release installs not being requested by the end-user + explicitly. + + See https://github.com/ansible/ansible/pull/81606 for extra context. + """ + version_string = self.ver[0] + return version_string.isdigit() or not ( + version_string == '*' or + version_string.startswith(('<', '>', '!=')) + ) + @property def source_info(self): return self._source_info diff --git a/lib/ansible/galaxy/dependency_resolution/errors.py b/lib/ansible/galaxy/dependency_resolution/errors.py index acd885750d9..1e183e9a11c 100644 --- a/lib/ansible/galaxy/dependency_resolution/errors.py +++ b/lib/ansible/galaxy/dependency_resolution/errors.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Dependency resolution exceptions.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations try: from resolvelib.resolvers import ( # pylint: disable=unused-import diff --git a/lib/ansible/galaxy/dependency_resolution/providers.py b/lib/ansible/galaxy/dependency_resolution/providers.py index 19fb2acae54..d336c3441e2 100644 --- a/lib/ansible/galaxy/dependency_resolution/providers.py +++ b/lib/ansible/galaxy/dependency_resolution/providers.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Requirement provider interfaces.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import functools import typing as t @@ -40,7 +39,7 @@ except ImportError: # TODO: add python requirements to ansible-test's ansible-core distribution info and remove the hardcoded lowerbound/upperbound fallback RESOLVELIB_LOWERBOUND = SemanticVersion("0.5.3") -RESOLVELIB_UPPERBOUND = SemanticVersion("1.1.0") +RESOLVELIB_UPPERBOUND = SemanticVersion("2.0.0") RESOLVELIB_VERSION = SemanticVersion.from_loose_version(LooseVersion(resolvelib_version)) @@ -51,7 +50,6 @@ class CollectionDependencyProviderBase(AbstractProvider): self, # type: CollectionDependencyProviderBase apis, # type: MultiGalaxyAPIProxy concrete_artifacts_manager=None, # type: ConcreteArtifactsManager - user_requirements=None, # type: t.Iterable[Requirement] preferred_candidates=None, # type: t.Iterable[Candidate] with_deps=True, # type: bool with_pre_releases=False, # type: bool @@ -87,58 +85,12 @@ class CollectionDependencyProviderBase(AbstractProvider): Requirement.from_requirement_dict, art_mgr=concrete_artifacts_manager, ) - self._pinned_candidate_requests = set( - # NOTE: User-provided signatures are supplemental, so signatures - # NOTE: are not used to determine if a candidate is user-requested - Candidate(req.fqcn, req.ver, req.src, req.type, None) - for req in (user_requirements or ()) - if req.is_concrete_artifact or ( - req.ver != '*' and - not req.ver.startswith(('<', '>', '!=')) - ) - ) self._preferred_candidates = set(preferred_candidates or ()) self._with_deps = with_deps self._with_pre_releases = with_pre_releases self._upgrade = upgrade self._include_signatures = include_signatures - def _is_user_requested(self, candidate): # type: (Candidate) -> bool - """Check if the candidate is requested by the user.""" - if candidate in self._pinned_candidate_requests: - return True - - if candidate.is_online_index_pointer and candidate.src is not None: - # NOTE: Candidate is a namedtuple, it has a source server set - # NOTE: to a specific GalaxyAPI instance or `None`. When the - # NOTE: user runs - # NOTE: - # NOTE: $ ansible-galaxy collection install ns.coll - # NOTE: - # NOTE: then it's saved in `self._pinned_candidate_requests` - # NOTE: as `('ns.coll', '*', None, 'galaxy')` but then - # NOTE: `self.find_matches()` calls `self.is_satisfied_by()` - # NOTE: with Candidate instances bound to each specific - # NOTE: server available, those look like - # NOTE: `('ns.coll', '*', GalaxyAPI(...), 'galaxy')` and - # NOTE: wouldn't match the user requests saved in - # NOTE: `self._pinned_candidate_requests`. This is why we - # NOTE: normalize the collection to have `src=None` and try - # NOTE: again. - # NOTE: - # NOTE: When the user request comes from `requirements.yml` - # NOTE: with the `source:` set, it'll match the first check - # NOTE: but it still can have entries with `src=None` so this - # NOTE: normalized check is still necessary. - # NOTE: - # NOTE: User-provided signatures are supplemental, so signatures - # NOTE: are not used to determine if a candidate is user-requested - return Candidate( - candidate.fqcn, candidate.ver, None, candidate.type, None - ) in self._pinned_candidate_requests - - return False - def identify(self, requirement_or_candidate): # type: (t.Union[Candidate, Requirement]) -> str """Given requirement or candidate, return an identifier for it. @@ -174,7 +126,7 @@ class CollectionDependencyProviderBase(AbstractProvider): the current candidate list * ``parent`` specifies the candidate that provides - (dependend on) the requirement, or `None` + (depended on) the requirement, or `None` to indicate a root requirement. resolvelib >=0.7.0, < 0.8.0 @@ -183,7 +135,7 @@ class CollectionDependencyProviderBase(AbstractProvider): :param resolutions: Mapping of identifier, candidate pairs. - :param candidates: Possible candidates for the identifer. + :param candidates: Possible candidates for the identifier. Mapping of identifier, list of candidate pairs. :param information: Requirement information of each package. @@ -250,7 +202,7 @@ class CollectionDependencyProviderBase(AbstractProvider): remote archives), the one-and-only match is returned For a "named" requirement, Galaxy-compatible APIs are consulted - to find concrete candidates for this requirement. Of theres a + to find concrete candidates for this requirement. If there's a pre-installed candidate, it's prepended in front of others. resolvelib >=0.5.3, <0.6.0 @@ -342,25 +294,79 @@ class CollectionDependencyProviderBase(AbstractProvider): latest_matches = [] signatures = [] extra_signature_sources = [] # type: list[str] + + discarding_pre_releases_acceptable = any( + not is_pre_release(candidate_version) + for candidate_version, _src_server in coll_versions + ) + + # NOTE: The optimization of conditionally looping over the requirements + # NOTE: is used to skip having to compute the pinned status of all + # NOTE: requirements and apply version normalization to the found ones. + all_pinned_requirement_version_numbers = { + # NOTE: Pinned versions can start with a number, but also with an + # NOTE: equals sign. Stripping it at the beginning should be + # NOTE: enough. If there's a space after equals, the second strip + # NOTE: will take care of it. + # NOTE: Without this conversion, requirements versions like + # NOTE: '1.2.3-alpha.4' work, but '=1.2.3-alpha.4' don't. + requirement.ver.lstrip('=').strip() + for requirement in requirements + if requirement.is_pinned + } if discarding_pre_releases_acceptable else set() + for version, src_server in coll_versions: tmp_candidate = Candidate(fqcn, version, src_server, 'galaxy', None) - unsatisfied = False for requirement in requirements: - unsatisfied |= not self.is_satisfied_by(requirement, tmp_candidate) + candidate_satisfies_requirement = self.is_satisfied_by( + requirement, tmp_candidate, + ) + if not candidate_satisfies_requirement: + break + + should_disregard_pre_release_candidate = ( + # NOTE: Do not discard pre-release candidates in the + # NOTE: following cases: + # NOTE: * the end-user requested pre-releases explicitly; + # NOTE: * the candidate is a concrete artifact (e.g. a + # NOTE: Git repository, subdirs, a tarball URL, or a + # NOTE: local dir or file etc.); + # NOTE: * the candidate's pre-release version exactly + # NOTE: matches a version specifically requested by one + # NOTE: of the requirements in the current match + # NOTE: discovery round (i.e. matching a requirement + # NOTE: that is not a range but an explicit specific + # NOTE: version pin). This works when some requirements + # NOTE: request version ranges but others (possibly on + # NOTE: different dependency tree level depths) demand + # NOTE: pre-release dependency versions, even if those + # NOTE: dependencies are transitive. + is_pre_release(tmp_candidate.ver) + and discarding_pre_releases_acceptable + and not ( + self._with_pre_releases + or tmp_candidate.is_concrete_artifact + or version in all_pinned_requirement_version_numbers + ) + ) + if should_disregard_pre_release_candidate: + break + # FIXME - # unsatisfied |= not self.is_satisfied_by(requirement, tmp_candidate) or not ( - # requirement.src is None or # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str + # candidate_is_from_requested_source = ( + # requirement.src is None # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str # or requirement.src == candidate.src # ) - if unsatisfied: - break + # if not candidate_is_from_requested_source: + # break + if not self._include_signatures: continue extra_signature_sources.extend(requirement.signature_sources or []) - if not unsatisfied: + else: # candidate satisfies requirements, `break` never happened if self._include_signatures: for extra_source in extra_signature_sources: signatures.append(get_signature_from_source(extra_source)) @@ -405,21 +411,6 @@ class CollectionDependencyProviderBase(AbstractProvider): :returns: Indication whether the `candidate` is a viable \ solution to the `requirement`. """ - # NOTE: Only allow pre-release candidates if we want pre-releases - # NOTE: or the req ver was an exact match with the pre-release - # NOTE: version. Another case where we'd want to allow - # NOTE: pre-releases is when there are several user requirements - # NOTE: and one of them is a pre-release that also matches a - # NOTE: transitive dependency of another requirement. - allow_pre_release = self._with_pre_releases or not ( - requirement.ver == '*' or - requirement.ver.startswith('<') or - requirement.ver.startswith('>') or - requirement.ver.startswith('!=') - ) or self._is_user_requested(candidate) - if is_pre_release(candidate.ver) and not allow_pre_release: - return False - # NOTE: This is a set of Pipenv-inspired optimizations. Ref: # https://github.com/sarugaku/passa/blob/2ac00f1/src/passa/models/providers.py#L58-L74 if ( @@ -446,12 +437,12 @@ class CollectionDependencyProviderBase(AbstractProvider): # FIXME: differs. So how do we resolve this case? Priority? # FIXME: Taking into account a pinned hash? Exploding on # FIXME: any differences? - # NOTE: The underlying implmentation currently uses first found + # NOTE: The underlying implementation currently uses first found req_map = self._api_proxy.get_collection_dependencies(candidate) # NOTE: This guard expression MUST perform an early exit only # NOTE: after the `get_collection_dependencies()` call because - # NOTE: internally it polulates the artifact URL of the candidate, + # NOTE: internally it populates the artifact URL of the candidate, # NOTE: its SHA hash and the Galaxy API token. These are still # NOTE: necessary with `--no-deps` because even with the disabled # NOTE: dependency resolution the outer layer will still need to diff --git a/lib/ansible/galaxy/dependency_resolution/reporters.py b/lib/ansible/galaxy/dependency_resolution/reporters.py index 69908b22435..a9da75a8674 100644 --- a/lib/ansible/galaxy/dependency_resolution/reporters.py +++ b/lib/ansible/galaxy/dependency_resolution/reporters.py @@ -1,10 +1,9 @@ # -*- coding: utf-8 -*- # Copyright: (c) 2020-2021, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -"""Requiement reporter implementations.""" +"""Requirement reporter implementations.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations try: from resolvelib import BaseReporter diff --git a/lib/ansible/galaxy/dependency_resolution/resolvers.py b/lib/ansible/galaxy/dependency_resolution/resolvers.py index 87ca38d5d42..d15537dddbd 100644 --- a/lib/ansible/galaxy/dependency_resolution/resolvers.py +++ b/lib/ansible/galaxy/dependency_resolution/resolvers.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Requirement resolver implementations.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations try: from resolvelib import Resolver diff --git a/lib/ansible/galaxy/dependency_resolution/versioning.py b/lib/ansible/galaxy/dependency_resolution/versioning.py index 93adce45f24..74f956cf1e8 100644 --- a/lib/ansible/galaxy/dependency_resolution/versioning.py +++ b/lib/ansible/galaxy/dependency_resolution/versioning.py @@ -3,8 +3,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """Version comparison helpers.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import operator diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py index 2d7927a6fac..9ee7f3b9054 100644 --- a/lib/ansible/galaxy/role.py +++ b/lib/ansible/galaxy/role.py @@ -19,11 +19,11 @@ # ######################################################################## -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import errno import datetime +import functools import os import tarfile import tempfile @@ -41,10 +41,37 @@ from ansible.module_utils.compat.version import LooseVersion from ansible.module_utils.urls import open_url from ansible.playbook.role.requirement import RoleRequirement from ansible.utils.display import Display +from ansible.utils.path import is_subpath, unfrackpath display = Display() +@functools.cache +def _check_working_data_filter() -> bool: + """ + Check if tarfile.data_filter implementation is working + for the current Python version or not + """ + + # Implemented the following code to circumvent broken implementation of data_filter + # in tarfile. See for more information - https://github.com/python/cpython/issues/107845 + # deprecated: description='probing broken data filter implementation' python_version='3.11' + ret = False + if hasattr(tarfile, 'data_filter'): + # We explicitly check if tarfile.data_filter is broken or not + ti = tarfile.TarInfo('docs/README.md') + ti.type = tarfile.SYMTYPE + ti.linkname = '../README.md' + + try: + tarfile.data_filter(ti, '/foo') + except tarfile.LinkOutsideDestinationError: + pass + else: + ret = True + return ret + + class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) @@ -158,13 +185,11 @@ class GalaxyRole(object): info_path = os.path.join(self.path, self.META_INSTALL) if os.path.isfile(info_path): try: - f = open(info_path, 'r') - self._install_info = yaml_load(f) + with open(info_path, 'r') as f: + self._install_info = yaml_load(f) except Exception: display.vvvvv("Unable to load Galaxy install info for %s" % self.name) return False - finally: - f.close() return self._install_info @property @@ -184,7 +209,7 @@ class GalaxyRole(object): info = dict( version=self.version, - install_date=datetime.datetime.utcnow().strftime("%c"), + install_date=datetime.datetime.now(datetime.timezone.utc).strftime("%c"), ) if not os.path.exists(os.path.join(self.path, 'meta')): os.makedirs(os.path.join(self.path, 'meta')) @@ -229,7 +254,7 @@ class GalaxyRole(object): display.display("- downloading role from %s" % archive_url) try: - url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent()) + url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent(), timeout=60) temp_file = tempfile.NamedTemporaryFile(delete=False) data = url_file.read() while data: @@ -270,7 +295,7 @@ class GalaxyRole(object): # are no versions in the list, we'll grab the head # of the master branch if len(role_versions) > 0: - loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions] + loose_versions = [v for a in role_versions if (v := LooseVersion()) and v.parse(a.get('name') or '') is None] try: loose_versions.sort() except TypeError: @@ -359,6 +384,8 @@ class GalaxyRole(object): else: os.makedirs(self.path) + resolved_archive = unfrackpath(archive_parent_dir, follow=False) + # We strip off any higher-level directories for all of the files # contained within the tar file here. The default is 'github_repo-target'. # Gerrit instances, on the other hand, does not have a parent directory at all. @@ -366,19 +393,36 @@ class GalaxyRole(object): # we only extract files, and remove any relative path # bits that might be in the file for security purposes # and drop any containing directory, as mentioned above - if member.isreg() or member.issym(): - n_member_name = to_native(member.name) - n_archive_parent_dir = to_native(archive_parent_dir) - n_parts = n_member_name.replace(n_archive_parent_dir, "", 1).split(os.sep) - n_final_parts = [] - for n_part in n_parts: - # TODO if the condition triggers it produces a broken installation. - # It will create the parent directory as an empty file and will - # explode if the directory contains valid files. - # Leaving this as is since the whole module needs a rewrite. - if n_part != '..' and not n_part.startswith('~') and '$' not in n_part: - n_final_parts.append(n_part) - member.name = os.path.join(*n_final_parts) + if not (member.isreg() or member.issym()): + continue + + for attr in ('name', 'linkname'): + if not (attr_value := getattr(member, attr, None)): + continue + + if attr == 'linkname': + # Symlinks are relative to the link + relative_to = os.path.dirname(getattr(member, 'name', '')) + else: + # Normalize paths that start with the archive dir + attr_value = attr_value.replace(archive_parent_dir, "", 1) + attr_value = os.path.join(*attr_value.split(os.sep)) # remove leading os.sep + relative_to = '' + + full_path = os.path.join(resolved_archive, relative_to, attr_value) + if not is_subpath(full_path, resolved_archive, real=True): + err = f"Invalid {attr} for tarfile member: path {full_path} is not a subpath of the role {resolved_archive}" + raise AnsibleError(err) + + relative_path_dir = os.path.join(resolved_archive, relative_to) + relative_path = os.path.join(*full_path.replace(relative_path_dir, "", 1).split(os.sep)) + setattr(member, attr, relative_path) + + if _check_working_data_filter(): + # deprecated: description='extract fallback without filter' python_version='3.11' + role_tar_file.extract(member, to_native(self.path), filter='data') # type: ignore[call-arg] + else: + # Remove along with manual path filter once Python 3.12 is minimum supported version role_tar_file.extract(member, to_native(self.path)) # write out the install info file for later use @@ -424,12 +468,10 @@ class GalaxyRole(object): meta_path = os.path.join(self.path, meta_requirements) if os.path.isfile(meta_path): try: - f = open(meta_path, 'r') - self._requirements = yaml_load(f) + with open(meta_path, 'r') as f: + self._requirements = yaml_load(f) except Exception: display.vvvvv("Unable to load requirements for %s" % self.name) - finally: - f.close() break diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py index 313d007379b..9b82ad6c62c 100644 --- a/lib/ansible/galaxy/token.py +++ b/lib/ansible/galaxy/token.py @@ -18,16 +18,19 @@ # along with Ansible. If not, see . # ######################################################################## -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import base64 -import os import json +import os +import time from stat import S_IRUSR, S_IWUSR +from urllib.error import HTTPError from ansible import constants as C +from ansible.galaxy.api import GalaxyError from ansible.galaxy.user_agent import user_agent +from ansible.module_utils.common.sentinel import Sentinel as NoTokenSentinel from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.common.yaml import yaml_dump, yaml_load from ansible.module_utils.urls import open_url @@ -36,17 +39,11 @@ from ansible.utils.display import Display display = Display() -class NoTokenSentinel(object): - """ Represents an ansible.cfg server with not token defined (will ignore cmdline and GALAXY_TOKEN_PATH. """ - def __new__(cls, *args, **kwargs): - return cls - - class KeycloakToken(object): - '''A token granted by a Keycloak server. + """A token granted by a Keycloak server. Like sso.redhat.com as used by cloud.redhat.com - ie Automation Hub''' + ie Automation Hub""" token_type = 'Bearer' @@ -58,12 +55,16 @@ class KeycloakToken(object): self.client_id = client_id if self.client_id is None: self.client_id = 'cloud-services' + self._expiration = None def _form_payload(self): return 'grant_type=refresh_token&client_id=%s&refresh_token=%s' % (self.client_id, self.access_token) def get(self): + if self._expiration and time.time() >= self._expiration: + self._token = None + if self._token: return self._token @@ -77,15 +78,20 @@ class KeycloakToken(object): # or 'azp' (Authorized party - the party to which the ID Token was issued) payload = self._form_payload() - resp = open_url(to_native(self.auth_url), - data=payload, - validate_certs=self.validate_certs, - method='POST', - http_agent=user_agent()) + try: + resp = open_url(to_native(self.auth_url), + data=payload, + validate_certs=self.validate_certs, + method='POST', + http_agent=user_agent()) + except HTTPError as e: + raise GalaxyError(e, 'Unable to get access token') - # TODO: handle auth errors + data = json.load(resp) - data = json.loads(to_text(resp.read(), errors='surrogate_or_strict')) + # So that we have a buffer, expire the token in ~2/3 the given value + expires_in = data['expires_in'] // 3 * 2 + self._expiration = time.time() + expires_in # - extract 'access_token' self._token = data.get('access_token') @@ -99,7 +105,7 @@ class KeycloakToken(object): class GalaxyToken(object): - ''' Class to storing and retrieving local galaxy token ''' + """ Class to storing and retrieving local galaxy token """ token_type = 'Token' diff --git a/lib/ansible/galaxy/user_agent.py b/lib/ansible/galaxy/user_agent.py index c860bcdb612..a049e88dfb5 100644 --- a/lib/ansible/galaxy/user_agent.py +++ b/lib/ansible/galaxy/user_agent.py @@ -1,8 +1,7 @@ # Copyright: (c) 2019, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import platform import sys diff --git a/lib/ansible/inventory/data.py b/lib/ansible/inventory/data.py index 15a64202d90..691ad5bed42 100644 --- a/lib/ansible/inventory/data.py +++ b/lib/ansible/inventory/data.py @@ -16,8 +16,7 @@ # along with Ansible. If not, see . ############################################# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys @@ -102,7 +101,7 @@ class InventoryData(object): return new_host def reconcile_inventory(self): - ''' Ensure inventory basic rules, run after updates ''' + """ Ensure inventory basic rules, run after updates """ display.debug('Reconcile groups and hosts in inventory.') self.current_source = None @@ -146,7 +145,7 @@ class InventoryData(object): self._groups_dict_cache = {} def get_host(self, hostname): - ''' fetch host object using name deal with implicit localhost ''' + """ fetch host object using name deal with implicit localhost """ matching_host = self.hosts.get(hostname, None) @@ -158,7 +157,7 @@ class InventoryData(object): return matching_host def add_group(self, group): - ''' adds a group to inventory if not there already, returns named actually used ''' + """ adds a group to inventory if not there already, returns named actually used """ if group: if not isinstance(group, string_types): @@ -189,7 +188,7 @@ class InventoryData(object): h.remove_group(group) def add_host(self, host, group=None, port=None): - ''' adds a host to inventory and possibly a group if not there already ''' + """ adds a host to inventory and possibly a group if not there already """ if host: if not isinstance(host, string_types): @@ -243,7 +242,7 @@ class InventoryData(object): g.remove_host(host) def set_variable(self, entity, varname, value): - ''' sets a variable for an inventory object ''' + """ sets a variable for an inventory object """ if entity in self.groups: inv_object = self.groups[entity] @@ -256,7 +255,7 @@ class InventoryData(object): display.debug('set %s for %s' % (varname, entity)) def add_child(self, group, child): - ''' Add host or group to group ''' + """ Add host or group to group """ added = False if group in self.groups: g = self.groups[group] diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py index e81cca1b3da..335f60127c3 100644 --- a/lib/ansible/inventory/group.py +++ b/lib/ansible/inventory/group.py @@ -14,10 +14,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from collections.abc import Mapping, MutableMapping +from enum import Enum from itertools import chain from ansible import constants as C @@ -53,8 +53,14 @@ def to_safe_group_name(name, replacer="_", force=False, silent=False): return name +class InventoryObjectType(Enum): + HOST = 0 + GROUP = 1 + + class Group: - ''' a group of ansible hosts ''' + """ a group of ansible hosts """ + base_type = InventoryObjectType.GROUP # __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ] @@ -114,7 +120,7 @@ class Group: self.parent_groups.append(g) def _walk_relationship(self, rel, include_self=False, preserve_ordering=False): - ''' + """ Given `rel` that is an iterable property of Group, consitituting a directed acyclic graph among all groups, Returns a set of all groups in full tree @@ -126,7 +132,7 @@ class Group: | / are directed upward F Called on F, returns set of (A, B, C, D, E) - ''' + """ seen = set([]) unprocessed = set(getattr(self, rel)) if include_self: diff --git a/lib/ansible/inventory/helpers.py b/lib/ansible/inventory/helpers.py index 39c72210918..8293f905266 100644 --- a/lib/ansible/inventory/helpers.py +++ b/lib/ansible/inventory/helpers.py @@ -16,8 +16,7 @@ # along with Ansible. If not, see . ############################################# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.utils.vars import combine_vars diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 18569ce50b6..fafa9520928 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -15,13 +15,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from collections.abc import Mapping, MutableMapping -from ansible.inventory.group import Group +from ansible.inventory.group import Group, InventoryObjectType from ansible.parsing.utils.addresses import patterns from ansible.utils.vars import combine_vars, get_unique_id @@ -30,7 +28,8 @@ __all__ = ['Host'] class Host: - ''' a single ansible host ''' + """ a single ansible host """ + base_type = InventoryObjectType.HOST # __slots__ = [ 'name', 'vars', 'groups' ] diff --git a/lib/ansible/inventory/manager.py b/lib/ansible/inventory/manager.py index a95c9d2bbd4..ba6397f1787 100644 --- a/lib/ansible/inventory/manager.py +++ b/lib/ansible/inventory/manager.py @@ -16,8 +16,7 @@ # along with Ansible. If not, see . ############################################# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import fnmatch import os @@ -51,7 +50,7 @@ IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS)) PATTERN_WITH_SUBSCRIPT = re.compile( - r'''^ + r"""^ (.+) # A pattern expression ending with... \[(?: # A [subscript] expression comprising: (-?[0-9]+)| # A single positive or negative number @@ -59,12 +58,12 @@ PATTERN_WITH_SUBSCRIPT = re.compile( ([0-9]*) )\] $ - ''', re.X + """, re.X ) def order_patterns(patterns): - ''' takes a list of patterns and reorders them by modifier to apply them consistently ''' + """ takes a list of patterns and reorders them by modifier to apply them consistently """ # FIXME: this goes away if we apply patterns incrementally or by groups pattern_regular = [] @@ -126,19 +125,19 @@ def split_host_pattern(pattern): # This mishandles IPv6 addresses, and is retained only for backwards # compatibility. patterns = re.findall( - to_text(r'''(?: # We want to match something comprising: + to_text(r"""(?: # We want to match something comprising: [^\s:\[\]] # (anything other than whitespace or ':[]' | # ...or... \[[^\]]*\] # a single complete bracketed expression) )+ # occurring once or more - '''), pattern, re.X + """), pattern, re.X ) return [p.strip() for p in patterns if p.strip()] class InventoryManager(object): - ''' Creates and manages inventory ''' + """ Creates and manages inventory """ def __init__(self, loader, sources=None, parse=True, cache=True): @@ -198,7 +197,7 @@ class InventoryManager(object): return self._inventory.get_host(hostname) def _fetch_inventory_plugins(self): - ''' sets up loaded inventory plugins for usage ''' + """ sets up loaded inventory plugins for usage """ display.vvvv('setting up inventory plugins') @@ -216,7 +215,7 @@ class InventoryManager(object): return plugins def parse_sources(self, cache=False): - ''' iterate over inventory sources and parse each one to populate it''' + """ iterate over inventory sources and parse each one to populate it""" parsed = False # allow for multiple inventory parsing @@ -244,7 +243,7 @@ class InventoryManager(object): host.vars = combine_vars(host.vars, get_vars_from_inventory_sources(self._loader, self._sources, [host], 'inventory')) def parse_source(self, source, cache=False): - ''' Generate or update inventory for the source provided ''' + """ Generate or update inventory for the source provided """ parsed = False failures = [] @@ -336,12 +335,12 @@ class InventoryManager(object): return parsed def clear_caches(self): - ''' clear all caches ''' + """ clear all caches """ self._hosts_patterns_cache = {} self._pattern_cache = {} def refresh_inventory(self): - ''' recalculate inventory ''' + """ recalculate inventory """ self.clear_caches() self._inventory = InventoryData() @@ -658,9 +657,9 @@ class InventoryManager(object): self._pattern_cache = {} def add_dynamic_host(self, host_info, result_item): - ''' + """ Helper function to add a new host to inventory based on a task result. - ''' + """ changed = False if not result_item.get('refresh'): @@ -698,10 +697,10 @@ class InventoryManager(object): result_item['changed'] = changed def add_dynamic_group(self, host, result_item): - ''' + """ Helper function to add a group (if it does not exist), and to assign the specified host to that group. - ''' + """ changed = False diff --git a/lib/ansible/keyword_desc.yml b/lib/ansible/keyword_desc.yml index 1e8d844a3d4..4aea8234b61 100644 --- a/lib/ansible/keyword_desc.yml +++ b/lib/ansible/keyword_desc.yml @@ -5,7 +5,7 @@ action: "The 'action' to execute for a task, it normally translates into a C(mod args: "A secondary way to add arguments into a task. Takes a dictionary in which keys map to options and values." always: List of tasks, in a block, that execute no matter if there is an error in the block or not. any_errors_fatal: Force any un-handled task errors on any host to propagate to all hosts and end the play. -async: Run a task asynchronously if the C(action) supports this; value is maximum runtime in seconds. +async: Run a task asynchronously if the C(action) supports this; the value is the maximum runtime in seconds. become: Boolean that controls if privilege escalation is used or not on :term:`Task` execution. Implemented by the become plugin. See :ref:`become_plugins`. become_exe: Path to the executable used to elevate privileges. Implemented by the become plugin. See :ref:`become_plugins`. become_flags: A string of flag(s) to pass to the privilege escalation program when :term:`become` is True. @@ -23,25 +23,25 @@ collections: | connection: Allows you to change the connection plugin used for tasks to execute on the target. See :ref:`using_connection`. -debugger: Enable debugging tasks based on state of the task result. See :ref:`playbook_debugger`. +debugger: Enable debugging tasks based on the state of the task result. See :ref:`playbook_debugger`. delay: Number of seconds to delay between retries. This setting is only used in combination with :term:`until`. delegate_facts: Boolean that allows you to apply facts to a delegated host instead of inventory_hostname. delegate_to: Host to execute task instead of the target (inventory_hostname). Connection vars from the delegated host will also be used for the task. diff: "Toggle to make tasks return 'diff' information or not." -environment: A dictionary that gets converted into environment vars to be provided for the task upon execution. This can ONLY be used with modules. This isn't supported for any other type of plugins nor Ansible itself nor its configuration, it just sets the variables for the code responsible for executing the task. This is not a recommended way to pass in confidential data. +environment: A dictionary that gets converted into environment vars to be provided for the task upon execution. This can ONLY be used with modules. This is not supported for any other type of plugins nor Ansible itself nor its configuration, it just sets the variables for the code responsible for executing the task. This is not a recommended way to pass in confidential data. fact_path: Set the fact path option for the fact gathering plugin controlled by :term:`gather_facts`. failed_when: "Conditional expression that overrides the task's normal 'failed' status." force_handlers: Will force notified handler execution for hosts even if they failed during the play. Will not trigger if the play itself fails. gather_facts: "A boolean that controls if the play will automatically run the 'setup' task to gather facts for the hosts." -gather_subset: Allows you to pass subset options to the fact gathering plugin controlled by :term:`gather_facts`. +gather_subset: Allows you to pass subset options to the fact gathering plugin controlled by :term:`gather_facts`. gather_timeout: Allows you to set the timeout for the fact gathering plugin controlled by :term:`gather_facts`. handlers: "A section with tasks that are treated as handlers, these won't get executed normally, only when notified after each section of tasks is complete. A handler's `listen` field is not templatable." hosts: "A list of groups, hosts or host pattern that translates into a list of hosts that are the play's target." ignore_errors: Boolean that allows you to ignore task failures and continue with play. It does not affect connection errors. ignore_unreachable: Boolean that allows you to ignore task failures due to an unreachable host and continue with the play. This does not affect other task errors (see :term:`ignore_errors`) but is useful for groups of volatile/ephemeral hosts. loop: "Takes a list for the task to iterate over, saving each list element into the ``item`` variable (configurable via loop_control)" -loop_control: Several keys here allow you to modify/set loop behaviour in a task. See :ref:`loop_control`. -max_fail_percentage: can be used to abort the run after a given percentage of hosts in the current batch has failed. This only works on linear or linear derived strategies. +loop_control: Several keys here allow you to modify/set loop behavior in a task. See :ref:`loop_control`. +max_fail_percentage: can be used to abort the run after a given percentage of hosts in the current batch has failed. This only works on linear or linear-derived strategies. module_defaults: Specifies default parameter values for modules. name: "Identifier. Can be used for documentation, or in tasks/handlers." no_log: Boolean that controls information disclosure. @@ -56,13 +56,13 @@ register: Name of variable that will contain task status and module return data. rescue: List of tasks in a :term:`block` that run if there is a task error in the main :term:`block` list. retries: "Number of retries before giving up in a :term:`until` loop. This setting is only used in combination with :term:`until`." roles: List of roles to be imported into the play -run_once: Boolean that will bypass the host loop, forcing the task to attempt to execute on the first host available and afterwards apply any results and facts to all active hosts in the same batch. +run_once: Boolean that will bypass the host loop, forcing the task to attempt to execute on the first host available and afterward apply any results and facts to all active hosts in the same batch. serial: Explicitly define how Ansible batches the execution of the current play on the play's target. See :ref:`rolling_update_batch_size`. -strategy: Allows you to choose the connection plugin to use for the play. +strategy: Allows you to choose the strategy plugin to use for the play. See :ref:`strategy_plugins`. tags: Tags applied to the task or included tasks, this allows selecting subsets of tasks from the command line. tasks: Main list of tasks to execute in the play, they run after :term:`roles` and before :term:`post_tasks`. -timeout: Time limit for task to execute in, if exceeded Ansible will interrupt and fail the task. -throttle: Limit number of concurrent task runs on task, block and playbook level. This is independent of the forks and serial settings, but cannot be set higher than those limits. For example, if forks is set to 10 and the throttle is set to 15, at most 10 hosts will be operated on in parallel. +timeout: Time limit for the task action to execute in, if exceeded, Ansible will interrupt the process. Timeout does not include templating or looping. +throttle: Limit the number of concurrent task runs on task, block and playbook level. This is independent of the forks and serial settings, but cannot be set higher than those limits. For example, if forks is set to 10 and the throttle is set to 15, at most 10 hosts will be operated on in parallel. until: "This keyword implies a ':term:`retries` loop' that will go on until the condition supplied here is met or we hit the :term:`retries` limit." vars: Dictionary/map of variables vars_files: List of files that contain vars to include in the play. diff --git a/test/units/compat/__init__.py b/lib/ansible/module_utils/_internal/__init__.py similarity index 100% rename from test/units/compat/__init__.py rename to lib/ansible/module_utils/_internal/__init__.py diff --git a/lib/ansible/module_utils/_internal/_concurrent/__init__.py b/lib/ansible/module_utils/_internal/_concurrent/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py b/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py new file mode 100644 index 00000000000..0b32a062fed --- /dev/null +++ b/lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py @@ -0,0 +1,28 @@ +"""Proxy stdlib threading module that only supports non-joinable daemon threads.""" +# NB: all new local module attrs are _ prefixed to ensure an identical public attribute surface area to the module we're proxying + +from __future__ import annotations as _annotations + +import threading as _threading +import typing as _t + + +class _DaemonThread(_threading.Thread): + """ + Daemon-only Thread subclass; prevents running threads of this type from blocking interpreter shutdown and process exit. + The join() method is a no-op. + """ + + def __init__(self, *args, daemon: bool | None = None, **kwargs) -> None: + super().__init__(*args, daemon=daemon or True, **kwargs) + + def join(self, timeout=None) -> None: + """ThreadPoolExecutor's atexit handler joins all queue threads before allowing shutdown; prevent them from blocking.""" + + +Thread = _DaemonThread # shadow the real Thread attr with our _DaemonThread + + +def __getattr__(name: str) -> _t.Any: + """Delegate anything not defined locally to the real `threading` module.""" + return getattr(_threading, name) diff --git a/lib/ansible/module_utils/_internal/_concurrent/_futures.py b/lib/ansible/module_utils/_internal/_concurrent/_futures.py new file mode 100644 index 00000000000..2ca493f6873 --- /dev/null +++ b/lib/ansible/module_utils/_internal/_concurrent/_futures.py @@ -0,0 +1,21 @@ +"""Utilities for concurrent code execution using futures.""" + +from __future__ import annotations + +import concurrent.futures +import types + +from . import _daemon_threading + + +class DaemonThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor): + """ThreadPoolExecutor subclass that creates non-joinable daemon threads for non-blocking pool and process shutdown with abandoned threads.""" + + atc = concurrent.futures.ThreadPoolExecutor._adjust_thread_count + + # clone the base class `_adjust_thread_count` method with a copy of its globals dict + _adjust_thread_count = types.FunctionType(atc.__code__, atc.__globals__.copy(), name=atc.__name__, argdefs=atc.__defaults__, closure=atc.__closure__) + # patch the method closure's `threading` module import to use our daemon-only thread factory instead + _adjust_thread_count.__globals__.update(threading=_daemon_threading) + + del atc # don't expose this as a class attribute diff --git a/lib/ansible/module_utils/_text.py b/lib/ansible/module_utils/_text.py index f30a5e97126..b6dd62074f6 100644 --- a/lib/ansible/module_utils/_text.py +++ b/lib/ansible/module_utils/_text.py @@ -1,11 +1,10 @@ # Copyright (c), Toshio Kuratomi 2016 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type """ .. warn:: Use ansible.module_utils.common.text.converters instead. """ +from __future__ import annotations # Backwards compat for people still calling it from this package # pylint: disable=unused-import diff --git a/lib/ansible/module_utils/api.py b/lib/ansible/module_utils/api.py index 2de8a4efc14..2415c38a839 100644 --- a/lib/ansible/module_utils/api.py +++ b/lib/ansible/module_utils/api.py @@ -23,13 +23,12 @@ The 'api' module provides the following common argument specs: - retries: number of attempts - retry_pause: delay between attempts in seconds """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import copy import functools import itertools -import random +import secrets import sys import time @@ -132,7 +131,7 @@ def generate_jittered_backoff(retries=10, delay_base=3, delay_threshold=60): :param delay_threshold: The maximum time in seconds for any delay. """ for retry in range(0, retries): - yield random.randint(0, min(delay_threshold, delay_base * 2 ** retry)) + yield secrets.randbelow(min(delay_threshold, delay_base * 2 ** retry)) def retry_never(exception_or_result): diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 190e84099c1..fbc5ea17630 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -2,46 +2,22 @@ # Copyright (c), Toshio Kuratomi 2016 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +import json import sys +import typing as t # Used for determining if the system is running a new enough python version # and should only restrict on our documented minimum versions -_PY3_MIN = sys.version_info >= (3, 6) -_PY2_MIN = (2, 7) <= sys.version_info < (3,) -_PY_MIN = _PY3_MIN or _PY2_MIN - -if not _PY_MIN: - print( - '\n{"failed": true, ' - '"msg": "ansible-core requires a minimum of Python2 version 2.7 or Python3 version 3.6. Current version: %s"}' % ''.join(sys.version.splitlines()) - ) - sys.exit(1) +_PY_MIN = (3, 8) -FILE_ATTRIBUTES = { - 'A': 'noatime', - 'a': 'append', - 'c': 'compressed', - 'C': 'nocow', - 'd': 'nodump', - 'D': 'dirsync', - 'e': 'extents', - 'E': 'encrypted', - 'h': 'blocksize', - 'i': 'immutable', - 'I': 'indexed', - 'j': 'journalled', - 'N': 'inline', - 's': 'zero', - 'S': 'synchronous', - 't': 'notail', - 'T': 'blockroot', - 'u': 'undelete', - 'X': 'compressedraw', - 'Z': 'compresseddirty', -} +if sys.version_info < _PY_MIN: + print(json.dumps(dict( + failed=True, + msg=f"ansible-core requires a minimum of Python version {'.'.join(map(str, _PY_MIN))}. Current version: {''.join(sys.version.splitlines())}", + ))) + sys.exit(1) # Ansible modules can be written in any language. # The functions available here can be used to do many common tasks, @@ -50,7 +26,6 @@ FILE_ATTRIBUTES = { import __main__ import atexit import errno -import datetime import grp import fcntl import locale @@ -59,17 +34,16 @@ import pwd import platform import re import select +import selectors import shlex import shutil -import signal import stat import subprocess import tempfile import time import traceback -import types -from itertools import chain, repeat +from functools import reduce try: import syslog @@ -97,8 +71,6 @@ except ImportError: # Python2 & 3 way to get NoneType NoneType = type(None) -from ansible.module_utils.compat import selectors - from ._text import to_native, to_bytes, to_text from ansible.module_utils.common.text.converters import ( jsonify, @@ -120,21 +92,9 @@ import hashlib def _get_available_hash_algorithms(): """Return a dictionary of available hash function names and their associated function.""" - try: - # Algorithms available in Python 2.7.9+ and Python 3.2+ - # https://docs.python.org/2.7/library/hashlib.html#hashlib.algorithms_available - # https://docs.python.org/3.2/library/hashlib.html#hashlib.algorithms_available - algorithm_names = hashlib.algorithms_available - except AttributeError: - # Algorithms in Python 2.7.x (used only for Python 2.7.0 through 2.7.8) - # https://docs.python.org/2.7/library/hashlib.html#hashlib.hashlib.algorithms - algorithm_names = set(hashlib.algorithms) - algorithms = {} - - for algorithm_name in algorithm_names: + for algorithm_name in hashlib.algorithms_available: algorithm_func = getattr(hashlib, algorithm_name, None) - if algorithm_func: try: # Make sure the algorithm is actually available for use. @@ -151,12 +111,6 @@ def _get_available_hash_algorithms(): AVAILABLE_HASH_ALGORITHMS = _get_available_hash_algorithms() -try: - from ansible.module_utils.common._json_compat import json -except ImportError as e: - print('\n{{"msg": "Error: ansible requires the stdlib json: {0}", "failed": true}}'.format(to_native(e))) - sys.exit(1) - from ansible.module_utils.six.moves.collections_abc import ( KeysView, Mapping, MutableMapping, @@ -167,18 +121,19 @@ from ansible.module_utils.common.locale import get_best_parsable_locale from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.file import ( _PERM_BITS as PERM_BITS, - _EXEC_PERM_BITS as EXEC_PERM_BITS, _DEFAULT_PERM as DEFAULT_PERM, is_executable, format_attributes, get_flags_from_attributes, + FILE_ATTRIBUTES, + S_IXANY, + S_IRWU_RWG_RWO, ) from ansible.module_utils.common.sys_info import ( get_distribution, get_distribution_version, get_platform_subclass, ) -from ansible.module_utils.pycompat24 import get_exception, literal_eval from ansible.module_utils.common.parameters import ( env_fallback, remove_values, @@ -189,17 +144,6 @@ from ansible.module_utils.common.parameters import ( ) from ansible.module_utils.errors import AnsibleFallbackNotFound, AnsibleValidationErrorMultiple, UnsupportedError -from ansible.module_utils.six import ( - PY2, - PY3, - b, - binary_type, - integer_types, - iteritems, - string_types, - text_type, -) -from ansible.module_utils.six.moves import map, reduce, shlex_quote from ansible.module_utils.common.validation import ( check_missing_parameters, safe_eval, @@ -221,24 +165,6 @@ PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd) imap = map -try: - # Python 2 - unicode # type: ignore[used-before-def] # pylint: disable=used-before-assignment -except NameError: - # Python 3 - unicode = text_type - -try: - # Python 2 - basestring # type: ignore[used-before-def,has-type] # pylint: disable=used-before-assignment -except NameError: - # Python 3 - basestring = string_types - -_literal_eval = literal_eval - -# End of deprecated names - # Internal global holding passed in params. This is consulted in case # multiple AnsibleModules are created. Otherwise each AnsibleModule would # attempt to read from stdin. Other code should not use this directly as it @@ -274,14 +200,14 @@ PERMS_RE = re.compile(r'^[rwxXstugo]*$') # def get_platform(): - ''' + """ **Deprecated** Use :py:func:`platform.system` directly. :returns: Name of the platform the module is running on in a native string Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is the result of calling :py:func:`platform.system`. - ''' + """ return platform.system() # End deprecated functions @@ -306,7 +232,7 @@ def get_all_subclasses(cls): def heuristic_log_sanitize(data, no_log_values=None): - ''' Remove strings that look like passwords from log messages ''' + """ Remove strings that look like passwords from log messages """ # Currently filters: # user:pass@foo/whatever and http://username:pass@wherever/foo # This code has false positives and consumes parts of logs that are @@ -371,7 +297,7 @@ def heuristic_log_sanitize(data, no_log_values=None): def _load_params(): - ''' read the modules parameters and store them globally. + """ read the modules parameters and store them globally. This function may be needed for certain very dynamic custom modules which want to process the parameters that are being handed the module. Since @@ -380,7 +306,7 @@ def _load_params(): will try not to break it gratuitously. It is certainly more future-proof to call this function and consume its outputs than to implement the logic inside it as a copy in your own code. - ''' + """ global _ANSIBLE_ARGS if _ANSIBLE_ARGS is not None: buffer = _ANSIBLE_ARGS @@ -391,37 +317,28 @@ def _load_params(): # We control the args and we pass them as utf8 if len(sys.argv) > 1: if os.path.isfile(sys.argv[1]): - fd = open(sys.argv[1], 'rb') - buffer = fd.read() - fd.close() + with open(sys.argv[1], 'rb') as fd: + buffer = fd.read() else: - buffer = sys.argv[1] - if PY3: - buffer = buffer.encode('utf-8', errors='surrogateescape') + buffer = sys.argv[1].encode('utf-8', errors='surrogateescape') # default case, read from stdin else: - if PY2: - buffer = sys.stdin.read() - else: - buffer = sys.stdin.buffer.read() + buffer = sys.stdin.buffer.read() _ANSIBLE_ARGS = buffer try: params = json.loads(buffer.decode('utf-8')) except ValueError: - # This helper used too early for fail_json to work. - print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}') + # This helper is used too early for fail_json to work. + print('\n{"msg": "Error: Module unable to decode stdin/parameters as valid JSON. Unable to parse what parameters were passed", "failed": true}') sys.exit(1) - if PY2: - params = json_dict_unicode_to_bytes(params) - try: return params['ANSIBLE_MODULE_ARGS'] except KeyError: # This helper does not have access to fail_json so we have to print # json output on our own. - print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", ' + print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in JSON data from stdin. Unable to figure out what parameters were passed", ' '"failed": true}') sys.exit(1) @@ -446,13 +363,13 @@ class AnsibleModule(object): required_one_of=None, add_file_common_args=False, supports_check_mode=False, required_if=None, required_by=None): - ''' + """ Common code for quickly building an ansible module in Python (although you can write modules with anything that can return JSON). See :ref:`developing_modules_general` for a general introduction and :ref:`developing_program_flow_modules` for more detailed explanation. - ''' + """ self._name = os.path.basename(__file__) # initialize name until we can parse from options self.argument_spec = argument_spec @@ -477,7 +394,6 @@ class AnsibleModule(object): # run_command invocation self.run_command_environ_update = {} self._clean = {} - self._string_conversion_action = '' self.aliases = {} self._legal_inputs = [] @@ -514,6 +430,8 @@ class AnsibleModule(object): try: error = self.validation_result.errors[0] + if isinstance(error, UnsupportedError) and self._ignore_unknown_opts: + error = None except IndexError: error = None @@ -539,7 +457,7 @@ class AnsibleModule(object): self._selinux_mls_enabled = None self._selinux_initial_context = None - # finally, make sure we're in a sane working dir + # finally, make sure we're in a logical working dir self._set_cwd() @property @@ -590,20 +508,20 @@ class AnsibleModule(object): raise AssertionError("implementation error -- version and date must not both be set") deprecate(msg, version=version, date=date, collection_name=collection_name) # For compatibility, we accept that neither version nor date is set, - # and treat that the same as if version would haven been set + # and treat that the same as if version would not have been set if date is not None: self.log('[DEPRECATION WARNING] %s %s' % (msg, date)) else: self.log('[DEPRECATION WARNING] %s %s' % (msg, version)) def load_file_common_arguments(self, params, path=None): - ''' + """ many modules deal with files, this encapsulates common options that the file module accepts such that it is directly available to all modules and they can share code. Allows to overwrite the path/dest module argument by providing path. - ''' + """ if path is None: path = params.get('path', params.get('dest', None)) @@ -716,12 +634,12 @@ class AnsibleModule(object): return (uid, gid) def find_mount_point(self, path): - ''' - Takes a path and returns it's mount point + """ + Takes a path and returns its mount point :param path: a string type with a filesystem path :returns: the path to the mount point as a text type - ''' + """ b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict')) while not os.path.ismount(b_path): @@ -735,9 +653,8 @@ class AnsibleModule(object): NFS or other 'special' fs mount point, otherwise the return will be (False, None). """ try: - f = open('/proc/mounts', 'r') - mount_data = f.readlines() - f.close() + with open('/proc/mounts', 'r') as f: + mount_data = f.readlines() except Exception: return (False, None) @@ -913,7 +830,7 @@ class AnsibleModule(object): details=to_native(e)) if mode != stat.S_IMODE(mode): - # prevent mode from having extra info orbeing invalid long number + # prevent mode from having extra info or being invalid long number path = to_text(b_path) self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode) @@ -990,7 +907,7 @@ class AnsibleModule(object): attr_mod = attributes[0] attributes = attributes[1:] - if existing.get('attr_flags', '') != attributes or attr_mod == '-': + if attributes and (existing.get('attr_flags', '') != attributes or attr_mod == '-'): attrcmd = self.get_bin_path('chattr') if attrcmd: attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path] @@ -1103,7 +1020,7 @@ class AnsibleModule(object): if prev_mode is None: prev_mode = stat.S_IMODE(path_stat.st_mode) is_directory = stat.S_ISDIR(path_stat.st_mode) - has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0 + has_x_permissions = (prev_mode & S_IXANY) > 0 apply_X_permission = is_directory or has_x_permissions # Get the umask, if the 'user' part is empty, the effect is as if (a) were @@ -1196,10 +1113,10 @@ class AnsibleModule(object): return self.set_fs_attributes_if_different(file_args, changed, diff, expand) def add_path_info(self, kwargs): - ''' + """ for results that are files, supplement the info about the file in the return path with stats about the file path. - ''' + """ path = kwargs.get('path', kwargs.get('dest', None)) if path is None: @@ -1236,10 +1153,10 @@ class AnsibleModule(object): return kwargs def _check_locale(self): - ''' + """ Uses the locale module to test the currently set locale (per the LANG and LC_CTYPE environment settings) - ''' + """ try: # setting the locale to '' uses the default locale # as it would be returned by locale.getdefaultlocale() @@ -1283,14 +1200,15 @@ class AnsibleModule(object): setattr(self, PASS_VARS[k][0], PASS_VARS[k][1]) def safe_eval(self, value, locals=None, include_exceptions=False): + # deprecated: description='no longer used in the codebase' core_version='2.21' return safe_eval(value, locals, include_exceptions) def _load_params(self): - ''' read the input and set the params attribute. + """ read the input and set the params attribute. This method is for backwards compatibility. The guts of the function were moved out in 2.1 so that custom modules could read the parameters. - ''' + """ # debug overrides to read args from file or cmdline self.params = _load_params() @@ -1301,7 +1219,7 @@ class AnsibleModule(object): facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER) syslog.openlog(str(module), 0, facility) syslog.syslog(syslog.LOG_INFO, msg) - except TypeError as e: + except (TypeError, ValueError) as e: self.fail_json( msg='Failed to log to syslog (%s). To proceed anyway, ' 'disable syslog logging by setting no_target_syslog ' @@ -1322,25 +1240,26 @@ class AnsibleModule(object): log_args = dict() module = 'ansible-%s' % self._name - if isinstance(module, binary_type): + if isinstance(module, bytes): module = module.decode('utf-8', 'replace') # 6655 - allow for accented characters - if not isinstance(msg, (binary_type, text_type)): + if not isinstance(msg, (bytes, str)): raise TypeError("msg should be a string (got %s)" % type(msg)) # We want journal to always take text type # syslog takes bytes on py2, text type on py3 - if isinstance(msg, binary_type): - journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values) + if isinstance(msg, bytes): + journal_msg = msg.decode('utf-8', 'replace') else: # TODO: surrogateescape is a danger here on Py3 - journal_msg = remove_values(msg, self.no_log_values) + journal_msg = msg - if PY3: - syslog_msg = journal_msg - else: - syslog_msg = journal_msg.encode('utf-8', 'replace') + if self._target_log_info: + journal_msg = ' '.join([self._target_log_info, journal_msg]) + + # ensure we clean up secrets! + journal_msg = remove_values(journal_msg, self.no_log_values) if has_journal: journal_args = [("MODULE", os.path.basename(__file__))] @@ -1371,12 +1290,12 @@ class AnsibleModule(object): **dict(journal_args)) except IOError: # fall back to syslog since logging to journal failed - self._log_to_syslog(syslog_msg) + self._log_to_syslog(journal_msg) else: - self._log_to_syslog(syslog_msg) + self._log_to_syslog(journal_msg) def _log_invocation(self): - ''' log that ansible ran the module ''' + """ log that ansible ran the module """ # TODO: generalize a separate log function and make log_invocation use it # Sanitize possible password argument when logging. log_args = dict() @@ -1394,9 +1313,9 @@ class AnsibleModule(object): log_args[param] = 'NOT_LOGGING_PARAMETER' else: param_val = self.params[param] - if not isinstance(param_val, (text_type, binary_type)): + if not isinstance(param_val, (str, bytes)): param_val = str(param_val) - elif isinstance(param_val, text_type): + elif isinstance(param_val, str): param_val = param_val.encode('utf-8') log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values) @@ -1429,14 +1348,15 @@ class AnsibleModule(object): return None def get_bin_path(self, arg, required=False, opt_dirs=None): - ''' + """ Find system executable in PATH. :param arg: The executable to find. - :param required: if executable is not found and required is ``True``, fail_json + :param required: if the executable is not found and required is ``True``, fail_json :param opt_dirs: optional list of directories to search in addition to ``PATH`` - :returns: if found return full path; otherwise return None - ''' + :returns: if found return full path; otherwise return original arg, unless 'warning' then return None + :raises: Sysexit: if arg is not found and required=True (via fail_json) + """ bin_path = None try: @@ -1444,13 +1364,11 @@ class AnsibleModule(object): except ValueError as e: if required: self.fail_json(msg=to_text(e)) - else: - return bin_path return bin_path def boolean(self, arg): - '''Convert the argument to a boolean''' + """Convert the argument to a boolean""" if arg is None: return arg @@ -1511,18 +1429,26 @@ class AnsibleModule(object): if deprecations: kwargs['deprecations'] = deprecations + # preserve bools/none from no_log + preserved = {k: v for k, v in kwargs.items() if v is None or isinstance(v, bool)} + + # strip no_log collisions kwargs = remove_values(kwargs, self.no_log_values) + + # return preserved + kwargs.update(preserved) + print('\n%s' % self.jsonify(kwargs)) - def exit_json(self, **kwargs): - ''' return from the module, without error ''' + def exit_json(self, **kwargs) -> t.NoReturn: + """ return from the module, without error """ self.do_cleanup_files() self._return_formatted(kwargs) sys.exit(0) - def fail_json(self, msg, **kwargs): - ''' return from the module, with an error message ''' + def fail_json(self, msg, **kwargs) -> t.NoReturn: + """ return from the module, with an error message """ kwargs['failed'] = True kwargs['msg'] = msg @@ -1530,12 +1456,7 @@ class AnsibleModule(object): # Add traceback if debug or high verbosity and it is missing # NOTE: Badly named as exception, it really always has been a traceback if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3): - if PY2: - # On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure - kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\ - ''.join(traceback.format_tb(sys.exc_info()[2])) - else: - kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2])) + kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2])) self.do_cleanup_files() self._return_formatted(kwargs) @@ -1550,7 +1471,7 @@ class AnsibleModule(object): self.fail_json(msg=to_native(e)) def digest_from_file(self, filename, algorithm): - ''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. ''' + """ Return hex digest of local file for a digest_method specified by name, or None if file is not present. """ b_filename = to_bytes(filename, errors='surrogate_or_strict') if not os.path.exists(b_filename): @@ -1578,7 +1499,7 @@ class AnsibleModule(object): return digest_method.hexdigest() def md5(self, filename): - ''' Return MD5 hex digest of local file using digest_from_file(). + """ Return MD5 hex digest of local file using digest_from_file(). Do not use this function unless you have no other choice for: 1) Optional backwards compatibility @@ -1587,21 +1508,21 @@ class AnsibleModule(object): This function will not work on systems complying with FIPS-140-2. Most uses of this function can use the module.sha1 function instead. - ''' + """ if 'md5' not in AVAILABLE_HASH_ALGORITHMS: raise ValueError('MD5 not available. Possibly running in FIPS mode') return self.digest_from_file(filename, 'md5') def sha1(self, filename): - ''' Return SHA1 hex digest of local file using digest_from_file(). ''' + """ Return SHA1 hex digest of local file using digest_from_file(). """ return self.digest_from_file(filename, 'sha1') def sha256(self, filename): - ''' Return SHA-256 hex digest of local file using digest_from_file(). ''' + """ Return SHA-256 hex digest of local file using digest_from_file(). """ return self.digest_from_file(filename, 'sha256') def backup_local(self, fn): - '''make a date-marked backup of the specified file, return True or False on success or failure''' + """make a date-marked backup of the specified file, return True or False on success or failure""" backupdest = '' if os.path.exists(fn): @@ -1630,7 +1551,7 @@ class AnsibleModule(object): # Similar to shutil.copy(), but metadata is copied as well - in fact, # this is just shutil.copy() followed by copystat(). This is similar # to the Unix command cp -p. - # + # shutil.copystat(src, dst) # Copy the permission bits, last access time, last modification time, # and flags from src to dst. The file contents, owner, and group are @@ -1658,32 +1579,20 @@ class AnsibleModule(object): current_attribs = current_attribs.get('attr_flags', '') self.set_attributes_if_different(dest, current_attribs, True) - def atomic_move(self, src, dest, unsafe_writes=False): - '''atomically move src to dest, copying attributes from dest, returns true on success + def atomic_move(self, src, dest, unsafe_writes=False, keep_dest_attrs=True): + """atomically move src to dest, copying attributes from dest, returns true on success it uses os.rename to ensure this as it is an atomic operation, rest of the function is - to work around limitations, corner cases and ensure selinux context is saved if possible''' + to work around limitations, corner cases and ensure selinux context is saved if possible""" context = None dest_stat = None b_src = to_bytes(src, errors='surrogate_or_strict') b_dest = to_bytes(dest, errors='surrogate_or_strict') - if os.path.exists(b_dest): + if os.path.exists(b_dest) and keep_dest_attrs: try: dest_stat = os.stat(b_dest) - - # copy mode and ownership - os.chmod(b_src, dest_stat.st_mode & PERM_BITS) os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid) - - # try to copy flags if possible - if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'): - try: - os.chflags(b_src, dest_stat.st_flags) - except OSError as e: - for err in 'EOPNOTSUPP', 'ENOTSUP': - if hasattr(errno, err) and e.errno == getattr(errno, err): - break - else: - raise + shutil.copystat(b_dest, b_src) + os.utime(b_src, times=(time.time(), time.time())) except OSError as e: if e.errno != errno.EPERM: raise @@ -1731,19 +1640,24 @@ class AnsibleModule(object): os.close(tmp_dest_fd) # leaves tmp file behind when sudo and not root try: - shutil.move(b_src, b_tmp_dest_name) + shutil.move(b_src, b_tmp_dest_name, copy_function=shutil.copy if keep_dest_attrs else shutil.copy2) except OSError: # cleanup will happen by 'rm' of tmpdir # copy2 will preserve some metadata - shutil.copy2(b_src, b_tmp_dest_name) + if keep_dest_attrs: + shutil.copy(b_src, b_tmp_dest_name) + else: + shutil.copy2(b_src, b_tmp_dest_name) if self.selinux_enabled(): self.set_context_if_different( b_tmp_dest_name, context, False) try: tmp_stat = os.stat(b_tmp_dest_name) - if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): - os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid) + if keep_dest_attrs: + if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): + os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid) + os.utime(b_tmp_dest_name, times=(time.time(), time.time())) except OSError as e: if e.errno != errno.EPERM: raise @@ -1768,9 +1682,13 @@ class AnsibleModule(object): # based on the current value of umask umask = os.umask(0) os.umask(umask) - os.chmod(b_dest, DEFAULT_PERM & ~umask) + os.chmod(b_dest, S_IRWU_RWG_RWO & ~umask) + dest_dir_stat = os.stat(os.path.dirname(b_dest)) try: - os.chown(b_dest, os.geteuid(), os.getegid()) + if dest_dir_stat.st_mode & stat.S_ISGID: + os.chown(b_dest, os.geteuid(), dest_dir_stat.st_gid) + else: + os.chown(b_dest, os.geteuid(), os.getegid()) except OSError: # We're okay with trying our best here. If the user is not # root (or old Unices) they won't be able to chown. @@ -1804,13 +1722,9 @@ class AnsibleModule(object): # create a printable version of the command for use in reporting later, # which strips out things like passwords from the args list to_clean_args = args - if PY2: - if isinstance(args, text_type): - to_clean_args = to_bytes(args) - else: - if isinstance(args, binary_type): - to_clean_args = to_text(args) - if isinstance(args, (text_type, binary_type)): + if isinstance(args, bytes): + to_clean_args = to_text(args) + if isinstance(args, (str, bytes)): to_clean_args = shlex.split(to_clean_args) clean_args = [] @@ -1829,21 +1743,24 @@ class AnsibleModule(object): is_passwd = True arg = heuristic_log_sanitize(arg, self.no_log_values) clean_args.append(arg) - self._clean = ' '.join(shlex_quote(arg) for arg in clean_args) + self._clean = ' '.join(shlex.quote(arg) for arg in clean_args) return self._clean - def _restore_signal_handlers(self): - # Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses. - if PY2 and sys.platform != 'win32': - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict', expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None, ignore_invalid_cwd=True, handle_exceptions=True): - ''' + """ Execute a command, returns rc, stdout, and stderr. + The mechanism of this method for reading stdout and stderr differs from + that of CPython subprocess.Popen.communicate, in that this method will + stop reading once the spawned command has exited and stdout and stderr + have been consumed, as opposed to waiting until stdout/stderr are + closed. This can be an important distinction, when taken into account + that a forked or backgrounded process may hold stdout or stderr open + for longer than the spawned command. + :arg args: is the command to run * If args is a list, the command will be run with shell=False. * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False @@ -1902,11 +1819,11 @@ class AnsibleModule(object): byte strings. On python3, stdout and stderr are text strings converted according to the encoding and errors parameters. If you want byte strings on python3, use encoding=None to turn decoding to text off. - ''' + """ # used by clean args later on self._clean = None - if not isinstance(args, (list, binary_type, text_type)): + if not isinstance(args, (list, bytes, str)): msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) @@ -1915,7 +1832,7 @@ class AnsibleModule(object): # stringify args for unsafe/direct shell usage if isinstance(args, list): - args = b" ".join([to_bytes(shlex_quote(x), errors='surrogate_or_strict') for x in args]) + args = b" ".join([to_bytes(shlex.quote(x), errors='surrogate_or_strict') for x in args]) else: args = to_bytes(args, errors='surrogate_or_strict') @@ -1929,14 +1846,8 @@ class AnsibleModule(object): shell = True else: # ensure args are a list - if isinstance(args, (binary_type, text_type)): - # On python2.6 and below, shlex has problems with text type - # On python3, shlex needs a text type. - if PY2: - args = to_bytes(args, errors='surrogate_or_strict') - elif PY3: - args = to_text(args, errors='surrogateescape') - args = shlex.split(args) + if isinstance(args, (bytes, str)): + args = shlex.split(to_text(args, errors='surrogateescape')) # expand ``~`` in paths, and all environment vars if expand_user_and_vars: @@ -1946,11 +1857,8 @@ class AnsibleModule(object): prompt_re = None if prompt_regex: - if isinstance(prompt_regex, text_type): - if PY3: - prompt_regex = to_bytes(prompt_regex, errors='surrogateescape') - elif PY2: - prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict') + if isinstance(prompt_regex, str): + prompt_regex = to_bytes(prompt_regex, errors='surrogateescape') try: prompt_re = re.compile(prompt_regex, re.MULTILINE) except re.error: @@ -1989,7 +1897,6 @@ class AnsibleModule(object): st_in = subprocess.PIPE def preexec(): - self._restore_signal_handlers() if umask: os.umask(umask) @@ -2003,10 +1910,8 @@ class AnsibleModule(object): preexec_fn=preexec, env=env, ) - if PY3 and pass_fds: + if pass_fds: kwargs["pass_fds"] = pass_fds - elif PY2 and pass_fds: - kwargs['close_fds'] = False # make sure we're in the right working directory if cwd: @@ -2023,71 +1928,76 @@ class AnsibleModule(object): if before_communicate_callback: before_communicate_callback(cmd) - # the communication logic here is essentially taken from that - # of the _communicate() function in ssh.py - stdout = b'' stderr = b'' - try: - selector = selectors.DefaultSelector() - except (IOError, OSError): - # Failed to detect default selector for the given platform - # Select PollSelector which is supported by major platforms + + # Mirror the CPython subprocess logic and preference for the selector to use. + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): selector = selectors.PollSelector() + else: + selector = selectors.SelectSelector() if data: if not binary_data: data += '\n' - if isinstance(data, text_type): + if isinstance(data, str): data = to_bytes(data) - if not prompt_re: - stdout, stderr = cmd.communicate(input=data) - else: - # We only need this to look for a prompt, to abort instead of hanging - selector.register(cmd.stdout, selectors.EVENT_READ) - selector.register(cmd.stderr, selectors.EVENT_READ) - if os.name == 'posix': - fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) - fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) - - if data: - cmd.stdin.write(data) - cmd.stdin.close() - - while True: - events = selector.select(1) - for key, event in events: - b_chunk = key.fileobj.read() - if b_chunk == b(''): - selector.unregister(key.fileobj) - if key.fileobj == cmd.stdout: - stdout += b_chunk - elif key.fileobj == cmd.stderr: - stderr += b_chunk - # if we're checking for prompts, do it now - if prompt_re: - if prompt_re.search(stdout) and not data: - if encoding: - stdout = to_native(stdout, encoding=encoding, errors=errors) - return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") - # only break out if no pipes are left to read or - # the pipes are completely read and - # the process is terminated - if (not events or not selector.get_map()) and cmd.poll() is not None: - break - # No pipes are left to read but process is not yet terminated - # Only then it is safe to wait for the process to be finished - # NOTE: Actually cmd.poll() is always None here if no selectors are left - elif not selector.get_map() and cmd.poll() is None: - cmd.wait() - # The process is terminated. Since no pipes to read from are - # left, there is no need to call select() again. - break - - cmd.stdout.close() - cmd.stderr.close() - selector.close() + selector.register(cmd.stdout, selectors.EVENT_READ) + selector.register(cmd.stderr, selectors.EVENT_READ) + + if os.name == 'posix': + fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) + fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) + + if data: + cmd.stdin.write(data) + cmd.stdin.close() + + while True: + # A timeout of 1 is both a little short and a little long. + # With None we could deadlock, with a lower value we would + # waste cycles. As it is, this is a mild inconvenience if + # we need to exit, and likely doesn't waste too many cycles + events = selector.select(1) + stdout_changed = False + for key, event in events: + b_chunk = key.fileobj.read(32768) + if not b_chunk: + selector.unregister(key.fileobj) + elif key.fileobj == cmd.stdout: + stdout += b_chunk + stdout_changed = True + elif key.fileobj == cmd.stderr: + stderr += b_chunk + + # if we're checking for prompts, do it now, but only if stdout + # actually changed since the last loop + if prompt_re and stdout_changed and prompt_re.search(stdout) and not data: + if encoding: + stdout = to_native(stdout, encoding=encoding, errors=errors) + return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") + + # break out if no pipes are left to read or the pipes are completely read + # and the process is terminated + if (not events or not selector.get_map()) and cmd.poll() is not None: + break + + # No pipes are left to read but process is not yet terminated + # Only then it is safe to wait for the process to be finished + # NOTE: Actually cmd.poll() is always None here if no selectors are left + elif not selector.get_map() and cmd.poll() is None: + cmd.wait() + # The process is terminated. Since no pipes to read from are + # left, there is no need to call select() again. + break + + cmd.stdout.close() + cmd.stderr.close() + selector.close() rc = cmd.returncode except (OSError, IOError) as e: @@ -2115,9 +2025,8 @@ class AnsibleModule(object): def append_to_file(self, filename, str): filename = os.path.expandvars(os.path.expanduser(filename)) - fh = open(filename, 'a') - fh.write(str) - fh.close() + with open(filename, 'a') as fh: + fh.write(str) def bytes_to_human(self, size): return bytes_to_human(size) @@ -2145,10 +2054,53 @@ class AnsibleModule(object): # not as exact as above, but should be good enough for most platforms that fail the previous call buffer_size = select.PIPE_BUF except Exception: - buffer_size = 9000 # use sane default JIC + buffer_size = 9000 # use logical default JIC return buffer_size def get_module_path(): return os.path.dirname(os.path.realpath(__file__)) + + +def __getattr__(importable_name): + """Inject import-time deprecation warnings.""" + if importable_name == 'datetime': + import datetime + importable = datetime + elif importable_name == 'signal': + import signal + importable = signal + elif importable_name == 'types': + import types + importable = types + elif importable_name == 'chain': + from itertools import chain + importable = chain + elif importable_name == 'repeat': + from itertools import repeat + importable = repeat + elif importable_name in { + 'PY2', 'PY3', 'b', 'binary_type', 'integer_types', + 'iteritems', 'string_types', 'test_type' + }: + import importlib + importable = getattr( + importlib.import_module('ansible.module_utils.six'), + importable_name + ) + elif importable_name == 'map': + importable = map + elif importable_name == 'shlex_quote': + importable = shlex.quote + else: + raise AttributeError( + f'cannot import name {importable_name !r} ' + f"from '{__name__}' ({__file__ !s})" + ) + + deprecate( + msg=f"Importing '{importable_name}' from '{__name__}' is deprecated.", + version="2.21", + ) + return importable diff --git a/lib/ansible/module_utils/common/_collections_compat.py b/lib/ansible/module_utils/common/_collections_compat.py index f0f8f0d01cc..25f7889d8ef 100644 --- a/lib/ansible/module_utils/common/_collections_compat.py +++ b/lib/ansible/module_utils/common/_collections_compat.py @@ -6,8 +6,7 @@ Use `ansible.module_utils.six.moves.collections_abc` instead, which has been ava This module exists only for backwards compatibility. """ -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations # Although this was originally intended for internal use only, it has wide adoption in collections. # This is due in part to sanity tests previously recommending its use over `collections` imports. diff --git a/lib/ansible/module_utils/common/_json_compat.py b/lib/ansible/module_utils/common/_json_compat.py deleted file mode 100644 index 787af0ff1ab..00000000000 --- a/lib/ansible/module_utils/common/_json_compat.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2019 Ansible Project -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import types -import json - -# Detect the python-json library which is incompatible -try: - if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType): - raise ImportError('json.loads or json.dumps were not found in the imported json library.') -except AttributeError: - raise ImportError('python-json was detected, which is incompatible.') diff --git a/lib/ansible/module_utils/common/_utils.py b/lib/ansible/module_utils/common/_utils.py index 66df3167771..deab1fcdf9c 100644 --- a/lib/ansible/module_utils/common/_utils.py +++ b/lib/ansible/module_utils/common/_utils.py @@ -1,18 +1,16 @@ # Copyright (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - """ Modules in _utils are waiting to find a better home. If you need to use them, be prepared for them to move to a different location in the future. """ +from __future__ import annotations def get_all_subclasses(cls): - ''' + """ Recursively search and find all subclasses of a given class :arg cls: A python class @@ -23,7 +21,7 @@ def get_all_subclasses(cls): of a class exist. However, `__subclasses__` only goes one level deep. This function searches each child class's `__subclasses__` method to find all of the descendent classes. It then returns an iterable of the descendent classes. - ''' + """ # Retrieve direct subclasses subclasses = set(cls.__subclasses__()) to_visit = list(subclasses) diff --git a/lib/ansible/module_utils/common/arg_spec.py b/lib/ansible/module_utils/common/arg_spec.py index d9f716efce6..37019e7df33 100644 --- a/lib/ansible/module_utils/common/arg_spec.py +++ b/lib/ansible/module_utils/common/arg_spec.py @@ -2,8 +2,7 @@ # Copyright (c) 2021 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from copy import deepcopy diff --git a/lib/ansible/module_utils/common/collections.py b/lib/ansible/module_utils/common/collections.py index 06f08a82d7e..28c53e14e2c 100644 --- a/lib/ansible/module_utils/common/collections.py +++ b/lib/ansible/module_utils/common/collections.py @@ -3,8 +3,7 @@ # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) """Collection of low-level utility functions.""" -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.six import binary_type, text_type @@ -66,7 +65,7 @@ class ImmutableDict(Hashable, Mapping): def is_string(seq): - """Identify whether the input has a string-like type (inclding bytes).""" + """Identify whether the input has a string-like type (including bytes).""" # AnsibleVaultEncryptedUnicode inherits from Sequence, but is expected to be a string like object return isinstance(seq, (text_type, binary_type)) or getattr(seq, '__ENCRYPTED__', False) diff --git a/lib/ansible/module_utils/common/dict_transformations.py b/lib/ansible/module_utils/common/dict_transformations.py index 9ee7878f393..8d318f5ef63 100644 --- a/lib/ansible/module_utils/common/dict_transformations.py +++ b/lib/ansible/module_utils/common/dict_transformations.py @@ -3,8 +3,7 @@ # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import re @@ -110,9 +109,9 @@ def _camel_to_snake(name, reversible=False): def dict_merge(a, b): - '''recursively merges dicts. not just simple a['key'] = b['key'], if + """recursively merges dicts. not just simple a['key'] = b['key'], if both a and b have a key whose value is a dict then dict_merge is called - on both values and the result stored in the returned dictionary.''' + on both values and the result stored in the returned dictionary.""" if not isinstance(b, dict): return b result = deepcopy(a) diff --git a/lib/ansible/module_utils/common/file.py b/lib/ansible/module_utils/common/file.py index 72b0d2cf0f0..4c54b184111 100644 --- a/lib/ansible/module_utils/common/file.py +++ b/lib/ansible/module_utils/common/file.py @@ -1,19 +1,12 @@ # Copyright (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import stat import re -try: - import selinux # pylint: disable=unused-import - HAVE_SELINUX = True -except ImportError: - HAVE_SELINUX = False - FILE_ATTRIBUTES = { 'A': 'noatime', @@ -45,9 +38,15 @@ USERS_RE = re.compile(r'[^ugo]') PERMS_RE = re.compile(r'[^rwxXstugo]') -_PERM_BITS = 0o7777 # file mode permission bits -_EXEC_PERM_BITS = 0o0111 # execute permission bits -_DEFAULT_PERM = 0o0666 # default file permission bits +S_IRANY = 0o0444 # read by user, group, others +S_IWANY = 0o0222 # write by user, group, others +S_IXANY = 0o0111 # execute by user, group, others +S_IRWU_RWG_RWO = S_IRANY | S_IWANY # read, write by user, group, others +S_IRWU_RG_RO = S_IRANY | stat.S_IWUSR # read by user, group, others and write only by user +S_IRWXU_RXG_RXO = S_IRANY | S_IXANY | stat.S_IWUSR # read, execute by user, group, others and write only by user +_PERM_BITS = 0o7777 # file mode permission bits +_EXEC_PERM_BITS = S_IXANY # execute permission bits +_DEFAULT_PERM = S_IRWU_RWG_RWO # default file permission bits def is_executable(path): @@ -56,7 +55,7 @@ def is_executable(path): # This method is reused by the basic module, # the repetition helps the basic module's html documentation come out right. # http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_docstring_signature - '''is_executable(path) + """is_executable(path) is the given path executable? @@ -67,7 +66,7 @@ def is_executable(path): * Does not account for FSACLs. * Most times we really want to know "Can the current user execute this file". This function does not tell us that, only if any execute bit is set. - ''' + """ # These are all bitfields so first bitwise-or all the permissions we're # looking for, then bitwise-and with the file's mode to determine if any # execute bits are set. diff --git a/lib/ansible/module_utils/common/json.py b/lib/ansible/module_utils/common/json.py index 33f1010424b..fe65a8d701c 100644 --- a/lib/ansible/module_utils/common/json.py +++ b/lib/ansible/module_utils/common/json.py @@ -2,9 +2,7 @@ # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -44,9 +42,9 @@ def json_dump(structure): class AnsibleJSONEncoder(json.JSONEncoder): - ''' + """ Simple encoder class to deal with JSON encoding of Ansible internal types - ''' + """ def __init__(self, preprocess_unsafe=False, vault_to_text=False, **kwargs): self._preprocess_unsafe = preprocess_unsafe diff --git a/lib/ansible/module_utils/common/locale.py b/lib/ansible/module_utils/common/locale.py index 08216f59f00..872931ced10 100644 --- a/lib/ansible/module_utils/common/locale.py +++ b/lib/ansible/module_utils/common/locale.py @@ -1,14 +1,13 @@ # Copyright (c), Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.common.text.converters import to_native def get_best_parsable_locale(module, preferences=None, raise_on_locale=False): - ''' + """ Attempts to return the best possible locale for parsing output in English useful for scraping output with i18n tools. When this raises an exception and the caller wants to continue, it should use the 'C' locale. @@ -18,7 +17,7 @@ def get_best_parsable_locale(module, preferences=None, raise_on_locale=False): :param raise_on_locale: boolean that determines if we raise exception or not due to locale CLI issues :returns: The first matched preferred locale or 'C' which is the default - ''' + """ found = 'C' # default posix, its ascii but always there try: diff --git a/lib/ansible/module_utils/common/network.py b/lib/ansible/module_utils/common/network.py index c3874f89e1d..a85fc1ce4ab 100644 --- a/lib/ansible/module_utils/common/network.py +++ b/lib/ansible/module_utils/common/network.py @@ -3,8 +3,7 @@ # General networking tools that may be used by all modules -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re from struct import pack @@ -62,7 +61,7 @@ def to_masklen(val): def to_subnet(addr, mask, dotted_notation=False): - """ coverts an addr / mask pair to a subnet in cidr notation """ + """ converts an addr / mask pair to a subnet in cidr notation """ try: if not is_masklen(mask): raise ValueError diff --git a/lib/ansible/module_utils/common/parameters.py b/lib/ansible/module_utils/common/parameters.py index 386eb875cef..c80ca6ccf16 100644 --- a/lib/ansible/module_utils/common/parameters.py +++ b/lib/ansible/module_utils/common/parameters.py @@ -2,8 +2,7 @@ # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import datetime import os @@ -83,25 +82,27 @@ _ADDITIONAL_CHECKS = ( # if adding boolean attribute, also add to PASS_BOOL # some of this dupes defaults from controller config +# keep in sync with copy in lib/ansible/module_utils/csharp/Ansible.Basic.cs PASS_VARS = { 'check_mode': ('check_mode', False), 'debug': ('_debug', False), 'diff': ('_diff', False), 'keep_remote_files': ('_keep_remote_files', False), + 'ignore_unknown_opts': ('_ignore_unknown_opts', False), 'module_name': ('_name', None), 'no_log': ('no_log', False), 'remote_tmp': ('_remote_tmp', None), + 'target_log_info': ('_target_log_info', None), 'selinux_special_fs': ('_selinux_special_fs', ['fuse', 'nfs', 'vboxsf', 'ramfs', '9p', 'vfat']), 'shell_executable': ('_shell', '/bin/sh'), 'socket': ('_socket_path', None), - 'string_conversion_action': ('_string_conversion_action', 'warn'), 'syslog_facility': ('_syslog_facility', 'INFO'), 'tmpdir': ('_tmpdir', None), 'verbosity': ('_verbosity', 0), 'version': ('ansible_version', '0.0'), } -PASS_BOOLS = ('check_mode', 'debug', 'diff', 'keep_remote_files', 'no_log') +PASS_BOOLS = ('check_mode', 'debug', 'diff', 'keep_remote_files', 'ignore_unknown_opts', 'no_log') DEFAULT_TYPE_VALIDATORS = { 'str': check_type_str, @@ -345,7 +346,7 @@ def _list_no_log_values(argument_spec, params): sub_param = check_type_dict(sub_param) if not isinstance(sub_param, Mapping): - raise TypeError("Value '{1}' in the sub parameter field '{0}' must by a {2}, " + raise TypeError("Value '{1}' in the sub parameter field '{0}' must be a {2}, " "not '{1.__class__.__name__}'".format(arg_name, sub_param, wanted_type)) no_log_values.update(_list_no_log_values(sub_argument_spec, sub_param)) @@ -363,12 +364,10 @@ def _return_datastructure_name(obj): return elif isinstance(obj, Mapping): for element in obj.items(): - for subelement in _return_datastructure_name(element[1]): - yield subelement + yield from _return_datastructure_name(element[1]) elif is_iterable(obj): for element in obj: - for subelement in _return_datastructure_name(element): - yield subelement + yield from _return_datastructure_name(element) elif obj is None or isinstance(obj, bool): # This must come before int because bools are also ints return @@ -663,7 +662,7 @@ def _validate_argument_values(argument_spec, parameters, options_context=None, e diff_list = [item for item in parameters[param] if item not in choices] if diff_list: choices_str = ", ".join([to_native(c) for c in choices]) - diff_str = ", ".join(diff_list) + diff_str = ", ".join([to_native(c) for c in diff_list]) msg = "value of %s must be one or more of: %s. Got no match for: %s" % (param, choices_str, diff_str) if options_context: msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) diff --git a/lib/ansible/module_utils/common/process.py b/lib/ansible/module_utils/common/process.py index 97761a4707d..eb11f8e44d1 100644 --- a/lib/ansible/module_utils/common/process.py +++ b/lib/ansible/module_utils/common/process.py @@ -1,45 +1,65 @@ # Copyright (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os from ansible.module_utils.common.file import is_executable +from ansible.module_utils.common.warnings import deprecate def get_bin_path(arg, opt_dirs=None, required=None): - ''' - Find system executable in PATH. Raises ValueError if executable is not found. - Optional arguments: - - required: [Deprecated] Prior to 2.10, if executable is not found and required is true it raises an Exception. - In 2.10 and later, an Exception is always raised. This parameter will be removed in 2.14. - - opt_dirs: optional list of directories to search in addition to PATH + """ + Find system executable in PATH. Raises ValueError if the executable is not found. + + :param arg: the executable to find + :type arg: string + :param opt_dirs: optional list of directories to search in addition to PATH + :type opt_dirs: list of strings + :param required: DEPRECATED. This parameter will be removed in 2.21 + :type required: boolean + :returns: path to arg (should be abs path unless PATH or opt_dirs are relative paths) + :raises: ValueError: if arg is not found + In addition to PATH and opt_dirs, this function also looks through /sbin, /usr/sbin and /usr/local/sbin. A lot of modules, especially for gathering facts, depend on this behaviour. - If found return full path, otherwise raise ValueError. - ''' - opt_dirs = [] if opt_dirs is None else opt_dirs + """ + if required is not None: + deprecate( + msg="The `required` parameter in `get_bin_path` API is deprecated.", + version="2.21", + collection_name="ansible.builtin", + ) - sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] paths = [] + sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] + opt_dirs = [] if opt_dirs is None else opt_dirs + + # Construct possible paths with precedence + # passed in paths for d in opt_dirs: if d is not None and os.path.exists(d): paths.append(d) + # system configured paths paths += os.environ.get('PATH', '').split(os.pathsep) - bin_path = None - # mangle PATH to include /sbin dirs + + # existing /sbin dirs, if not there already for p in sbin_paths: if p not in paths and os.path.exists(p): paths.append(p) + + # Search for binary + bin_path = None for d in paths: if not d: continue path = os.path.join(d, arg) if os.path.exists(path) and not os.path.isdir(path) and is_executable(path): + # fist found wins bin_path = path break + if bin_path is None: raise ValueError('Failed to find required executable "%s" in paths: %s' % (arg, os.pathsep.join(paths))) diff --git a/lib/ansible/module_utils/common/respawn.py b/lib/ansible/module_utils/common/respawn.py index 3bc526af840..2938c86a487 100644 --- a/lib/ansible/module_utils/common/respawn.py +++ b/lib/ansible/module_utils/common/respawn.py @@ -1,26 +1,26 @@ # Copyright: (c) 2021, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import subprocess import sys +import typing as t -from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes def has_respawned(): return hasattr(sys.modules['__main__'], '_respawned') -def respawn_module(interpreter_path): +def respawn_module(interpreter_path) -> t.NoReturn: """ Respawn the currently-running Ansible Python module under the specified Python interpreter. Ansible modules that require libraries that are typically available only under well-known interpreters - (eg, ``yum``, ``apt``, ``dnf``) can use bespoke logic to determine the libraries they need are not + (eg, ``apt``, ``dnf``) can use bespoke logic to determine the libraries they need are not available, then call `respawn_module` to re-execute the current module under a different interpreter and exit the current process when the new subprocess has completed. The respawned process inherits only stdout/stderr from the current process. @@ -75,14 +75,13 @@ def _create_payload(): raise Exception('unable to access ansible.module_utils.basic._ANSIBLE_ARGS (not launched by AnsiballZ?)') module_fqn = sys.modules['__main__']._module_fqn modlib_path = sys.modules['__main__']._modlib_path - respawn_code_template = ''' + respawn_code_template = """ import runpy import sys -module_fqn = '{module_fqn}' -modlib_path = '{modlib_path}' -smuggled_args = b"""{smuggled_args}""".strip() - +module_fqn = {module_fqn!r} +modlib_path = {modlib_path!r} +smuggled_args = {smuggled_args!r} if __name__ == '__main__': sys.path.insert(0, modlib_path) @@ -91,8 +90,8 @@ if __name__ == '__main__': basic._ANSIBLE_ARGS = smuggled_args runpy.run_module(module_fqn, init_globals=dict(_respawned=True), run_name='__main__', alter_sys=True) - ''' + """ - respawn_code = respawn_code_template.format(module_fqn=module_fqn, modlib_path=modlib_path, smuggled_args=to_native(smuggled_args)) + respawn_code = respawn_code_template.format(module_fqn=module_fqn, modlib_path=modlib_path, smuggled_args=smuggled_args.strip()) return respawn_code diff --git a/lib/ansible/module_utils/common/sentinel.py b/lib/ansible/module_utils/common/sentinel.py new file mode 100644 index 00000000000..0fdbf4ce318 --- /dev/null +++ b/lib/ansible/module_utils/common/sentinel.py @@ -0,0 +1,66 @@ +# Copyright (c) 2019 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import annotations + + +class Sentinel: + """ + Object which can be used to mark whether an entry as being special + + A sentinel value demarcates a value or marks an entry as having a special meaning. In C, the + Null byte is used as a sentinel for the end of a string. In Python, None is often used as + a Sentinel in optional parameters to mean that the parameter was not set by the user. + + You should use None as a Sentinel value any Python code where None is not a valid entry. If + None is a valid entry, though, then you need to create a different value, which is the purpose + of this class. + + Example of using Sentinel as a default parameter value:: + + def confirm_big_red_button(tristate=Sentinel): + if tristate is Sentinel: + print('You must explicitly press the big red button to blow up the base') + elif tristate is True: + print('Countdown to destruction activated') + elif tristate is False: + print('Countdown stopped') + elif tristate is None: + print('Waiting for more input') + + Example of using Sentinel to tell whether a dict which has a default value has been changed:: + + values = {'one': Sentinel, 'two': Sentinel} + defaults = {'one': 1, 'two': 2} + + # [.. Other code which does things including setting a new value for 'one' ..] + values['one'] = None + # [..] + + print('You made changes to:') + for key, value in values.items(): + if value is Sentinel: + continue + print('%s: %s' % (key, value) + """ + + def __new__(cls): + """ + Return the cls itself. This makes both equality and identity True for comparing the class + to an instance of the class, preventing common usage errors. + + Preferred usage:: + + a = Sentinel + if a is Sentinel: + print('Sentinel value') + + However, these are True as well, eliminating common usage errors:: + + if Sentinel is Sentinel(): + print('Sentinel value') + + if Sentinel == Sentinel(): + print('Sentinel value') + """ + return cls diff --git a/lib/ansible/module_utils/common/sys_info.py b/lib/ansible/module_utils/common/sys_info.py index 206b36c764f..98dc3d631af 100644 --- a/lib/ansible/module_utils/common/sys_info.py +++ b/lib/ansible/module_utils/common/sys_info.py @@ -2,8 +2,7 @@ # Copyright (c), Toshio Kuratomi 2016 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import platform @@ -15,7 +14,7 @@ __all__ = ('get_distribution', 'get_distribution_version', 'get_platform_subclas def get_distribution(): - ''' + """ Return the name of the distribution the module is running on. :rtype: NativeString or None @@ -24,7 +23,7 @@ def get_distribution(): This function attempts to determine what distribution the code is running on and return a string representing that value. If the platform is Linux and the distribution cannot be determined, it returns ``OtherLinux``. - ''' + """ distribution = distro.id().capitalize() if platform.system() == 'Linux': @@ -39,14 +38,14 @@ def get_distribution(): def get_distribution_version(): - ''' + """ Get the version of the distribution the code is running on :rtype: NativeString or None :returns: A string representation of the version of the distribution. If it cannot determine the version, it returns an empty string. If this is not run on a Linux machine it returns None. - ''' + """ version = None needs_best_version = frozenset(( @@ -80,12 +79,12 @@ def get_distribution_version(): def get_distribution_codename(): - ''' + """ Return the code name for this Linux Distribution :rtype: NativeString or None :returns: A string representation of the distribution's codename or None if not a Linux distro - ''' + """ codename = None if platform.system() == 'Linux': # Until this gets merged and we update our bundled copy of distro: @@ -110,7 +109,7 @@ def get_distribution_codename(): def get_platform_subclass(cls): - ''' + """ Finds a subclass implementing desired functionality on the platform the code is running on :arg cls: Class to find an appropriate subclass for @@ -136,7 +135,7 @@ def get_platform_subclass(cls): def __new__(cls, *args, **kwargs): new_cls = get_platform_subclass(User) return super(cls, new_cls).__new__(new_cls) - ''' + """ this_platform = platform.system() distribution = get_distribution() diff --git a/lib/ansible/module_utils/common/text/converters.py b/lib/ansible/module_utils/common/text/converters.py index 5b41315bc8d..6bfa8470b69 100644 --- a/lib/ansible/module_utils/common/text/converters.py +++ b/lib/ansible/module_utils/common/text/converters.py @@ -3,8 +3,7 @@ # (c) 2016 Toshio Kuratomi # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import codecs import datetime @@ -279,11 +278,11 @@ def jsonify(data, **kwargs): def container_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'): - ''' Recursively convert dict keys and values to byte str + """ Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, and dict container types (the containers that the json module returns) - ''' + """ if isinstance(d, text_type): return to_bytes(d, encoding=encoding, errors=errors) diff --git a/lib/ansible/module_utils/common/text/formatters.py b/lib/ansible/module_utils/common/text/formatters.py index 0c3d4951103..d548085c57f 100644 --- a/lib/ansible/module_utils/common/text/formatters.py +++ b/lib/ansible/module_utils/common/text/formatters.py @@ -2,8 +2,7 @@ # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import re @@ -21,6 +20,18 @@ SIZE_RANGES = { 'B': 1, } +VALID_UNITS = { + 'B': (('byte', 'B'), ('bit', 'b')), + 'K': (('kilobyte', 'KB'), ('kilobit', 'Kb')), + 'M': (('megabyte', 'MB'), ('megabit', 'Mb')), + 'G': (('gigabyte', 'GB'), ('gigabit', 'Gb')), + 'T': (('terabyte', 'TB'), ('terabit', 'Tb')), + 'P': (('petabyte', 'PB'), ('petabit', 'Pb')), + 'E': (('exabyte', 'EB'), ('exabit', 'Eb')), + 'Z': (('zetabyte', 'ZB'), ('zetabit', 'Zb')), + 'Y': (('yottabyte', 'YB'), ('yottabit', 'Yb')), +} + def lenient_lowercase(lst): """Lowercase elements of a list. @@ -54,7 +65,8 @@ def human_to_bytes(number, default_unit=None, isbits=False): The function expects 'b' (lowercase) as a bit identifier, e.g. 'Mb'/'Kb'/etc. if 'MB'/'KB'/... is passed, the ValueError will be rased. """ - m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE) + m = re.search(r'^([0-9]*\.?[0-9]+)(?:\s*([A-Za-z]+))?\s*$', str(number)) + if m is None: raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number)) try: @@ -87,10 +99,13 @@ def human_to_bytes(number, default_unit=None, isbits=False): expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key) if range_key == 'B': expect_message = 'expect %s or %s' % (unit_class, unit_class_name) - - if unit_class_name in unit.lower(): + unit_group = VALID_UNITS.get(range_key, None) + if unit_group is None: + raise ValueError(f"human_to_bytes() can't interpret a valid unit for {range_key}") + isbits_flag = 1 if isbits else 0 + if unit.lower() == unit_group[isbits_flag][0]: pass - elif unit[1] != unit_class: + elif unit != unit_group[isbits_flag][1]: raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message)) return int(round(num * limit)) diff --git a/lib/ansible/module_utils/common/validation.py b/lib/ansible/module_utils/common/validation.py index cc5478999c7..1098f27336e 100644 --- a/lib/ansible/module_utils/common/validation.py +++ b/lib/ansible/module_utils/common/validation.py @@ -2,22 +2,22 @@ # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +import decimal +import json import os import re from ast import literal_eval from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common._json_compat import json from ansible.module_utils.common.collections import is_iterable from ansible.module_utils.common.text.converters import jsonify from ansible.module_utils.common.text.formatters import human_to_bytes +from ansible.module_utils.common.warnings import deprecate from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six import ( binary_type, - integer_types, string_types, text_type, ) @@ -40,6 +40,10 @@ def count_terms(terms, parameters): def safe_eval(value, locals=None, include_exceptions=False): + deprecate( + "The safe_eval function should not be used.", + version="2.21", + ) # do not allow method calls to modules if not isinstance(value, string_types): # already templated to a datavaluestructure, perhaps? @@ -181,7 +185,7 @@ def check_required_by(requirements, parameters, options_context=None): :kwarg options_context: List of strings of parent key names if ``requirements`` are in a sub spec. - :returns: Empty dictionary or raises :class:`TypeError` if the + :returns: Empty dictionary or raises :class:`TypeError` if the check fails. """ result = {} @@ -191,22 +195,15 @@ def check_required_by(requirements, parameters, options_context=None): for (key, value) in requirements.items(): if key not in parameters or parameters[key] is None: continue - result[key] = [] # Support strings (single-item lists) if isinstance(value, string_types): value = [value] - for required in value: - if required not in parameters or parameters[required] is None: - result[key].append(required) - - if result: - for key, missing in result.items(): - if len(missing) > 0: - msg = "missing parameter(s) required by '%s': %s" % (key, ', '.join(missing)) - if options_context: - msg = "{0} found in {1}".format(msg, " -> ".join(options_context)) - raise TypeError(to_native(msg)) + if missing := [required for required in value if required not in parameters or parameters[required] is None]: + msg = f"missing parameter(s) required by '{key}': {', '.join(missing)}" + if options_context: + msg = f"{msg} found in {' -> '.join(options_context)}" + raise TypeError(to_native(msg)) return result @@ -416,7 +413,7 @@ def check_type_dict(value): Raises :class:`TypeError` if unable to convert to a dict - :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2``. + :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2`` or ``k1=v2 k2=v2``. :returns: value converted to a dictionary """ @@ -428,10 +425,14 @@ def check_type_dict(value): try: return json.loads(value) except Exception: - (result, exc) = safe_eval(value, dict(), include_exceptions=True) - if exc is not None: - raise TypeError('unable to evaluate string as dictionary') - return result + try: + result = literal_eval(value) + except Exception: + pass + else: + if isinstance(result, dict): + return result + raise TypeError('unable to evaluate string as dictionary') elif '=' in value: fields = [] field_buffer = [] @@ -458,7 +459,11 @@ def check_type_dict(value): field = ''.join(field_buffer) if field: fields.append(field) - return dict(x.split("=", 1) for x in fields) + try: + return dict(x.split("=", 1) for x in fields) + except ValueError: + # no "=" to split on: "k1=v1, k2" + raise TypeError('unable to evaluate string in the "key=value" format as dictionary') else: raise TypeError("dictionary requested, could not parse JSON or key=value") @@ -494,16 +499,15 @@ def check_type_int(value): :return: int of given value """ - if isinstance(value, integer_types): - return value - - if isinstance(value, string_types): + if not isinstance(value, int): try: - return int(value) - except ValueError: - pass - - raise TypeError('%s cannot be converted to an int' % type(value)) + if (decimal_value := decimal.Decimal(value)) != (int_value := int(decimal_value)): + raise ValueError("Significant decimal part found") + else: + value = int_value + except (decimal.DecimalException, TypeError, ValueError) as e: + raise TypeError(f'"{value!r}" cannot be converted to an int') from e + return value def check_type_float(value): @@ -515,16 +519,12 @@ def check_type_float(value): :returns: float of given value. """ - if isinstance(value, float): - return value - - if isinstance(value, (binary_type, text_type, int)): + if not isinstance(value, float): try: - return float(value) - except ValueError: - pass - - raise TypeError('%s cannot be converted to a float' % type(value)) + value = float(value) + except (TypeError, ValueError) as e: + raise TypeError(f'{type(value)} cannot be converted to a float') + return value def check_type_path(value,): @@ -543,7 +543,7 @@ def check_type_raw(value): def check_type_bytes(value): """Convert a human-readable string value to bytes - Raises :class:`TypeError` if unable to covert the value + Raises :class:`TypeError` if unable to convert the value """ try: return human_to_bytes(value) @@ -556,7 +556,7 @@ def check_type_bits(value): Example: ``check_type_bits('1Mb')`` returns integer 1048576. - Raises :class:`TypeError` if unable to covert the value. + Raises :class:`TypeError` if unable to convert the value. """ try: return human_to_bytes(value, isbits=True) @@ -568,7 +568,7 @@ def check_type_jsonarg(value): """Return a jsonified string. Sometimes the controller turns a json string into a dict/list so transform it back into json here - Raises :class:`TypeError` if unable to covert the value + Raises :class:`TypeError` if unable to convert the value """ if isinstance(value, (text_type, binary_type)): diff --git a/lib/ansible/module_utils/common/warnings.py b/lib/ansible/module_utils/common/warnings.py index 9423e6a429b..14fe516cf5b 100644 --- a/lib/ansible/module_utils/common/warnings.py +++ b/lib/ansible/module_utils/common/warnings.py @@ -2,8 +2,7 @@ # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.six import string_types diff --git a/lib/ansible/module_utils/common/yaml.py b/lib/ansible/module_utils/common/yaml.py index b4d766bbee2..2e1ee52dc0b 100644 --- a/lib/ansible/module_utils/common/yaml.py +++ b/lib/ansible/module_utils/common/yaml.py @@ -6,8 +6,7 @@ This file provides ease of use shortcuts for loading and dumping YAML, preferring the YAML compiled C extensions to reduce duplicated code. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from functools import partial as _partial diff --git a/lib/ansible/module_utils/compat/_selectors2.py b/lib/ansible/module_utils/compat/_selectors2.py deleted file mode 100644 index 4a4fcc3212b..00000000000 --- a/lib/ansible/module_utils/compat/_selectors2.py +++ /dev/null @@ -1,655 +0,0 @@ -# This file is from the selectors2.py package. It backports the PSF Licensed -# selectors module from the Python-3.5 stdlib to older versions of Python. -# The author, Seth Michael Larson, dual licenses his modifications under the -# PSF License and MIT License: -# https://github.com/SethMichaelLarson/selectors2#license -# -# Copyright (c) 2016 Seth Michael Larson -# -# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0) -# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT) -# - - -# Backport of selectors.py from Python 3.5+ to support Python < 3.4 -# Also has the behavior specified in PEP 475 which is to retry syscalls -# in the case of an EINTR error. This module is required because selectors34 -# does not follow this behavior and instead returns that no file descriptor -# events have occurred rather than retry the syscall. The decision to drop -# support for select.devpoll is made to maintain 100% test coverage. - -import errno -import math -import select -import socket -import sys -import time -from collections import namedtuple -from ansible.module_utils.six.moves.collections_abc import Mapping - -try: - monotonic = time.monotonic -except (AttributeError, ImportError): # Python 3.3< - monotonic = time.time - -__author__ = 'Seth Michael Larson' -__email__ = 'sethmichaellarson@protonmail.com' -__version__ = '1.1.1' -__license__ = 'MIT' - -__all__ = [ - 'EVENT_READ', - 'EVENT_WRITE', - 'SelectorError', - 'SelectorKey', - 'DefaultSelector' -] - -EVENT_READ = (1 << 0) -EVENT_WRITE = (1 << 1) - -HAS_SELECT = True # Variable that shows whether the platform has a selector. -_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. - - -class SelectorError(Exception): - def __init__(self, errcode): - super(SelectorError, self).__init__() - self.errno = errcode - - def __repr__(self): - return "".format(self.errno) - - def __str__(self): - return self.__repr__() - - -def _fileobj_to_fd(fileobj): - """ Return a file descriptor from a file object. If - given an integer will simply return that integer back. """ - if isinstance(fileobj, int): - fd = fileobj - else: - try: - fd = int(fileobj.fileno()) - except (AttributeError, TypeError, ValueError): - raise ValueError("Invalid file object: {0!r}".format(fileobj)) - if fd < 0: - raise ValueError("Invalid file descriptor: {0}".format(fd)) - return fd - - -# Python 3.5 uses a more direct route to wrap system calls to increase speed. -if sys.version_info >= (3, 5): - def _syscall_wrapper(func, dummy, *args, **kwargs): - """ This is the short-circuit version of the below logic - because in Python 3.5+ all selectors restart system calls. """ - try: - return func(*args, **kwargs) - except (OSError, IOError, select.error) as e: - errcode = None - if hasattr(e, "errno"): - errcode = e.errno - elif hasattr(e, "args"): - errcode = e.args[0] - raise SelectorError(errcode) -else: - def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): - """ Wrapper function for syscalls that could fail due to EINTR. - All functions should be retried if there is time left in the timeout - in accordance with PEP 475. """ - timeout = kwargs.get("timeout", None) - if timeout is None: - expires = None - recalc_timeout = False - else: - timeout = float(timeout) - if timeout < 0.0: # Timeout less than 0 treated as no timeout. - expires = None - else: - expires = monotonic() + timeout - - args = list(args) - if recalc_timeout and "timeout" not in kwargs: - raise ValueError( - "Timeout must be in args or kwargs to be recalculated") - - result = _SYSCALL_SENTINEL - while result is _SYSCALL_SENTINEL: - try: - result = func(*args, **kwargs) - # OSError is thrown by select.select - # IOError is thrown by select.epoll.poll - # select.error is thrown by select.poll.poll - # Aren't we thankful for Python 3.x rework for exceptions? - except (OSError, IOError, select.error) as e: - # select.error wasn't a subclass of OSError in the past. - errcode = None - if hasattr(e, "errno"): - errcode = e.errno - elif hasattr(e, "args"): - errcode = e.args[0] - - # Also test for the Windows equivalent of EINTR. - is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and - errcode == errno.WSAEINTR)) - - if is_interrupt: - if expires is not None: - current_time = monotonic() - if current_time > expires: - raise OSError(errno.ETIMEDOUT) - if recalc_timeout: - if "timeout" in kwargs: - kwargs["timeout"] = expires - current_time - continue - if errcode: - raise SelectorError(errcode) - else: - raise - return result - - -SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) - - -class _SelectorMapping(Mapping): - """ Mapping of file objects to selector keys """ - - def __init__(self, selector): - self._selector = selector - - def __len__(self): - return len(self._selector._fd_to_key) - - def __getitem__(self, fileobj): - try: - fd = self._selector._fileobj_lookup(fileobj) - return self._selector._fd_to_key[fd] - except KeyError: - raise KeyError("{0!r} is not registered.".format(fileobj)) - - def __iter__(self): - return iter(self._selector._fd_to_key) - - -class BaseSelector(object): - """ Abstract Selector class - - A selector supports registering file objects to be monitored - for specific I/O events. - - A file object is a file descriptor or any object with a - `fileno()` method. An arbitrary object can be attached to the - file object which can be used for example to store context info, - a callback, etc. - - A selector can use various implementations (select(), poll(), epoll(), - and kqueue()) depending on the platform. The 'DefaultSelector' class uses - the most efficient implementation for the current platform. - """ - def __init__(self): - # Maps file descriptors to keys. - self._fd_to_key = {} - - # Read-only mapping returned by get_map() - self._map = _SelectorMapping(self) - - def _fileobj_lookup(self, fileobj): - """ Return a file descriptor from a file object. - This wraps _fileobj_to_fd() to do an exhaustive - search in case the object is invalid but we still - have it in our map. Used by unregister() so we can - unregister an object that was previously registered - even if it is closed. It is also used by _SelectorMapping - """ - try: - return _fileobj_to_fd(fileobj) - except ValueError: - - # Search through all our mapped keys. - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - return key.fd - - # Raise ValueError after all. - raise - - def register(self, fileobj, events, data=None): - """ Register a file object for a set of events to monitor. """ - if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): - raise ValueError("Invalid events: {0!r}".format(events)) - - key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) - - if key.fd in self._fd_to_key: - raise KeyError("{0!r} (FD {1}) is already registered" - .format(fileobj, key.fd)) - - self._fd_to_key[key.fd] = key - return key - - def unregister(self, fileobj): - """ Unregister a file object from being monitored. """ - try: - key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - # Getting the fileno of a closed socket on Windows errors with EBADF. - except socket.error as err: - if err.errno != errno.EBADF: - raise - else: - for key in self._fd_to_key.values(): - if key.fileobj is fileobj: - self._fd_to_key.pop(key.fd) - break - else: - raise KeyError("{0!r} is not registered".format(fileobj)) - return key - - def modify(self, fileobj, events, data=None): - """ Change a registered file object monitored events and data. """ - # NOTE: Some subclasses optimize this operation even further. - try: - key = self._fd_to_key[self._fileobj_lookup(fileobj)] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - if events != key.events: - self.unregister(fileobj) - key = self.register(fileobj, events, data) - - elif data != key.data: - # Use a shortcut to update the data. - key = key._replace(data=data) - self._fd_to_key[key.fd] = key - - return key - - def select(self, timeout=None): - """ Perform the actual selection until some monitored file objects - are ready or the timeout expires. """ - raise NotImplementedError() - - def close(self): - """ Close the selector. This must be called to ensure that all - underlying resources are freed. """ - self._fd_to_key.clear() - self._map = None - - def get_key(self, fileobj): - """ Return the key associated with a registered file object. """ - mapping = self.get_map() - if mapping is None: - raise RuntimeError("Selector is closed") - try: - return mapping[fileobj] - except KeyError: - raise KeyError("{0!r} is not registered".format(fileobj)) - - def get_map(self): - """ Return a mapping of file objects to selector keys """ - return self._map - - def _key_from_fd(self, fd): - """ Return the key associated to a given file descriptor - Return None if it is not found. """ - try: - return self._fd_to_key[fd] - except KeyError: - return None - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - -# Almost all platforms have select.select() -if hasattr(select, "select"): - class SelectSelector(BaseSelector): - """ Select-based selector. """ - def __init__(self): - super(SelectSelector, self).__init__() - self._readers = set() - self._writers = set() - - def register(self, fileobj, events, data=None): - key = super(SelectSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - self._readers.add(key.fd) - if events & EVENT_WRITE: - self._writers.add(key.fd) - return key - - def unregister(self, fileobj): - key = super(SelectSelector, self).unregister(fileobj) - self._readers.discard(key.fd) - self._writers.discard(key.fd) - return key - - def _select(self, r, w, timeout=None): - """ Wrapper for select.select because timeout is a positional arg """ - return select.select(r, w, [], timeout) - - def select(self, timeout=None): - # Selecting on empty lists on Windows errors out. - if not len(self._readers) and not len(self._writers): - return [] - - timeout = None if timeout is None else max(timeout, 0.0) - ready = [] - r, w, dummy = _syscall_wrapper(self._select, True, self._readers, - self._writers, timeout=timeout) - r = set(r) - w = set(w) - for fd in r | w: - events = 0 - if fd in r: - events |= EVENT_READ - if fd in w: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - __all__.append('SelectSelector') - - -if hasattr(select, "poll"): - class PollSelector(BaseSelector): - """ Poll-based selector """ - def __init__(self): - super(PollSelector, self).__init__() - self._poll = select.poll() - - def register(self, fileobj, events, data=None): - key = super(PollSelector, self).register(fileobj, events, data) - event_mask = 0 - if events & EVENT_READ: - event_mask |= select.POLLIN - if events & EVENT_WRITE: - event_mask |= select.POLLOUT - self._poll.register(key.fd, event_mask) - return key - - def unregister(self, fileobj): - key = super(PollSelector, self).unregister(fileobj) - self._poll.unregister(key.fd) - return key - - def _wrap_poll(self, timeout=None): - """ Wrapper function for select.poll.poll() so that - _syscall_wrapper can work with only seconds. """ - if timeout is not None: - if timeout <= 0: - timeout = 0 - else: - # select.poll.poll() has a resolution of 1 millisecond, - # round away from zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1e3) - - result = self._poll.poll(timeout) - return result - - def select(self, timeout=None): - ready = [] - fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.POLLIN: - events |= EVENT_WRITE - if event_mask & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - - return ready - - __all__.append('PollSelector') - -if hasattr(select, "epoll"): - class EpollSelector(BaseSelector): - """ Epoll-based selector """ - def __init__(self): - super(EpollSelector, self).__init__() - self._epoll = select.epoll() - - def fileno(self): - return self._epoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(EpollSelector, self).register(fileobj, events, data) - events_mask = 0 - if events & EVENT_READ: - events_mask |= select.EPOLLIN - if events & EVENT_WRITE: - events_mask |= select.EPOLLOUT - _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) - return key - - def unregister(self, fileobj): - key = super(EpollSelector, self).unregister(fileobj) - try: - _syscall_wrapper(self._epoll.unregister, False, key.fd) - except SelectorError: - # This can occur when the fd was closed since registry. - pass - return key - - def select(self, timeout=None): - if timeout is not None: - if timeout <= 0: - timeout = 0.0 - else: - # select.epoll.poll() has a resolution of 1 millisecond - # but luckily takes seconds so we don't need a wrapper - # like PollSelector. Just for better rounding. - timeout = math.ceil(timeout * 1e3) * 1e-3 - timeout = float(timeout) - else: - timeout = -1.0 # epoll.poll() must have a float. - - # We always want at least 1 to ensure that select can be called - # with no file descriptors registered. Otherwise will fail. - max_events = max(len(self._fd_to_key), 1) - - ready = [] - fd_events = _syscall_wrapper(self._epoll.poll, True, - timeout=timeout, - maxevents=max_events) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.EPOLLIN: - events |= EVENT_WRITE - if event_mask & ~select.EPOLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - return ready - - def close(self): - self._epoll.close() - super(EpollSelector, self).close() - - __all__.append('EpollSelector') - - -if hasattr(select, "devpoll"): - class DevpollSelector(BaseSelector): - """Solaris /dev/poll selector.""" - - def __init__(self): - super(DevpollSelector, self).__init__() - self._devpoll = select.devpoll() - - def fileno(self): - return self._devpoll.fileno() - - def register(self, fileobj, events, data=None): - key = super(DevpollSelector, self).register(fileobj, events, data) - poll_events = 0 - if events & EVENT_READ: - poll_events |= select.POLLIN - if events & EVENT_WRITE: - poll_events |= select.POLLOUT - self._devpoll.register(key.fd, poll_events) - return key - - def unregister(self, fileobj): - key = super(DevpollSelector, self).unregister(fileobj) - self._devpoll.unregister(key.fd) - return key - - def _wrap_poll(self, timeout=None): - """ Wrapper function for select.poll.poll() so that - _syscall_wrapper can work with only seconds. """ - if timeout is not None: - if timeout <= 0: - timeout = 0 - else: - # select.devpoll.poll() has a resolution of 1 millisecond, - # round away from zero to wait *at least* timeout seconds. - timeout = math.ceil(timeout * 1e3) - - result = self._devpoll.poll(timeout) - return result - - def select(self, timeout=None): - ready = [] - fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) - for fd, event_mask in fd_events: - events = 0 - if event_mask & ~select.POLLIN: - events |= EVENT_WRITE - if event_mask & ~select.POLLOUT: - events |= EVENT_READ - - key = self._key_from_fd(fd) - if key: - ready.append((key, events & key.events)) - - return ready - - def close(self): - self._devpoll.close() - super(DevpollSelector, self).close() - - __all__.append('DevpollSelector') - - -if hasattr(select, "kqueue"): - class KqueueSelector(BaseSelector): - """ Kqueue / Kevent-based selector """ - def __init__(self): - super(KqueueSelector, self).__init__() - self._kqueue = select.kqueue() - - def fileno(self): - return self._kqueue.fileno() - - def register(self, fileobj, events, data=None): - key = super(KqueueSelector, self).register(fileobj, events, data) - if events & EVENT_READ: - kevent = select.kevent(key.fd, - select.KQ_FILTER_READ, - select.KQ_EV_ADD) - - _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0) - - if events & EVENT_WRITE: - kevent = select.kevent(key.fd, - select.KQ_FILTER_WRITE, - select.KQ_EV_ADD) - - _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0) - - return key - - def unregister(self, fileobj): - key = super(KqueueSelector, self).unregister(fileobj) - if key.events & EVENT_READ: - kevent = select.kevent(key.fd, - select.KQ_FILTER_READ, - select.KQ_EV_DELETE) - try: - _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0) - except SelectorError: - pass - if key.events & EVENT_WRITE: - kevent = select.kevent(key.fd, - select.KQ_FILTER_WRITE, - select.KQ_EV_DELETE) - try: - _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0) - except SelectorError: - pass - - return key - - def select(self, timeout=None): - if timeout is not None: - timeout = max(timeout, 0) - - max_events = len(self._fd_to_key) * 2 - ready_fds = {} - - kevent_list = _syscall_wrapper(self._wrap_control, True, - None, max_events, timeout=timeout) - - for kevent in kevent_list: - fd = kevent.ident - event_mask = kevent.filter - events = 0 - if event_mask == select.KQ_FILTER_READ: - events |= EVENT_READ - if event_mask == select.KQ_FILTER_WRITE: - events |= EVENT_WRITE - - key = self._key_from_fd(fd) - if key: - if key.fd not in ready_fds: - ready_fds[key.fd] = (key, events & key.events) - else: - old_events = ready_fds[key.fd][1] - ready_fds[key.fd] = (key, (events | old_events) & key.events) - - return list(ready_fds.values()) - - def close(self): - self._kqueue.close() - super(KqueueSelector, self).close() - - def _wrap_control(self, changelist, max_events, timeout): - return self._kqueue.control(changelist, max_events, timeout) - - __all__.append('KqueueSelector') - - -# Choose the best implementation, roughly: -# kqueue == epoll == devpoll > poll > select. -# select() also can't accept a FD > FD_SETSIZE (usually around 1024) -if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD - DefaultSelector = KqueueSelector -elif 'DevpollSelector' in globals(): - DefaultSelector = DevpollSelector -elif 'EpollSelector' in globals(): # Platform-specific: Linux - DefaultSelector = EpollSelector -elif 'PollSelector' in globals(): # Platform-specific: Linux - DefaultSelector = PollSelector -elif 'SelectSelector' in globals(): # Platform-specific: Windows - DefaultSelector = SelectSelector -else: # Platform-specific: AppEngine - def no_selector(dummy): - raise ValueError("Platform does not have a selector") - DefaultSelector = no_selector - HAS_SELECT = False diff --git a/lib/ansible/module_utils/compat/datetime.py b/lib/ansible/module_utils/compat/datetime.py new file mode 100644 index 00000000000..d3cdc0d3d38 --- /dev/null +++ b/lib/ansible/module_utils/compat/datetime.py @@ -0,0 +1,38 @@ +# Copyright (c) 2023 Ansible +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import annotations + +from ansible.module_utils.six import PY3 + +import datetime + + +if PY3: + UTC = datetime.timezone.utc +else: + _ZERO = datetime.timedelta(0) + + class _UTC(datetime.tzinfo): + __slots__ = () + + def utcoffset(self, dt): + return _ZERO + + def dst(self, dt): + return _ZERO + + def tzname(self, dt): + return "UTC" + + UTC = _UTC() + + +def utcfromtimestamp(timestamp): # type: (float) -> datetime.datetime + """Construct an aware UTC datetime from a POSIX timestamp.""" + return datetime.datetime.fromtimestamp(timestamp, UTC) + + +def utcnow(): # type: () -> datetime.datetime + """Construct an aware UTC datetime from time.time().""" + return datetime.datetime.now(UTC) diff --git a/lib/ansible/module_utils/compat/importlib.py b/lib/ansible/module_utils/compat/importlib.py deleted file mode 100644 index a3dca6b21c8..00000000000 --- a/lib/ansible/module_utils/compat/importlib.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2020 Matt Martz -# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys - -try: - from importlib import import_module # pylint: disable=unused-import -except ImportError: - # importlib.import_module returns the tail - # whereas __import__ returns the head - # compat to work like importlib.import_module - def import_module(name): # type: ignore[misc] - __import__(name) - return sys.modules[name] diff --git a/lib/ansible/module_utils/compat/paramiko.py b/lib/ansible/module_utils/compat/paramiko.py index 095dfa50dde..302309cdaa8 100644 --- a/lib/ansible/module_utils/compat/paramiko.py +++ b/lib/ansible/module_utils/compat/paramiko.py @@ -2,8 +2,7 @@ # Copyright (c) 2019 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import types # pylint: disable=unused-import import warnings @@ -12,7 +11,12 @@ PARAMIKO_IMPORT_ERR = None try: with warnings.catch_warnings(): - warnings.filterwarnings('ignore', message='Blowfish has been deprecated', category=UserWarning) + # Blowfish has been moved, but the deprecated import is used by paramiko versions older than 2.9.5. + # See: https://github.com/paramiko/paramiko/pull/2039 + warnings.filterwarnings('ignore', message='Blowfish has been ', category=UserWarning) + # TripleDES has been moved, but the deprecated import is used by paramiko versions older than 3.3.2 and 3.4.1. + # See: https://github.com/paramiko/paramiko/pull/2421 + warnings.filterwarnings('ignore', message='TripleDES has been ', category=UserWarning) import paramiko # pylint: disable=unused-import # paramiko and gssapi are incompatible and raise AttributeError not ImportError # When running in FIPS mode, cryptography raises InternalError diff --git a/lib/ansible/module_utils/compat/selectors.py b/lib/ansible/module_utils/compat/selectors.py deleted file mode 100644 index 0c4adc9f0f5..00000000000 --- a/lib/ansible/module_utils/compat/selectors.py +++ /dev/null @@ -1,56 +0,0 @@ -# (c) 2014, 2017 Toshio Kuratomi -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Compat selectors library. Python-3.5 has this builtin. The selectors2 -package exists on pypi to backport the functionality as far as python-2.6. -''' -# The following makes it easier for us to script updates of the bundled code -_BUNDLED_METADATA = {"pypi_name": "selectors2", "version": "1.1.1", "version_constraints": ">1.0,<2.0"} - -# Added these bugfix commits from 2.1.0: -# * https://github.com/SethMichaelLarson/selectors2/commit/3bd74f2033363b606e1e849528ccaa76f5067590 -# Wrap kqueue.control so that timeout is a keyword arg -# * https://github.com/SethMichaelLarson/selectors2/commit/6f6a26f42086d8aab273b30be492beecb373646b -# Fix formatting of the kqueue.control patch for pylint -# * https://github.com/SethMichaelLarson/selectors2/commit/f0c2c6c66cfa7662bc52beaf4e2d65adfa25e189 -# Fix use of OSError exception for py3 and use the wrapper of kqueue.control so retries of -# interrupted syscalls work with kqueue - -import sys -import types # pylint: disable=unused-import - -try: - # Python 3.4+ - import selectors as _system_selectors -except ImportError: - try: - # backport package installed in the system - import selectors2 as _system_selectors # type: ignore[no-redef] - except ImportError: - _system_selectors = None # type: types.ModuleType | None # type: ignore[no-redef] - -if _system_selectors: - selectors = _system_selectors -else: - # Our bundled copy - from ansible.module_utils.compat import _selectors2 as selectors # type: ignore[no-redef] -sys.modules['ansible.module_utils.compat.selectors'] = selectors diff --git a/lib/ansible/module_utils/compat/selinux.py b/lib/ansible/module_utils/compat/selinux.py index 7191713c157..a7a19cfd63f 100644 --- a/lib/ansible/module_utils/compat/selinux.py +++ b/lib/ansible/module_utils/compat/selinux.py @@ -1,8 +1,7 @@ # Copyright: (c) 2021, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import sys @@ -12,8 +11,8 @@ from ctypes import CDLL, c_char_p, c_int, byref, POINTER, get_errno try: _selinux_lib = CDLL('libselinux.so.1', use_errno=True) -except OSError: - raise ImportError('unable to load libselinux.so') +except OSError as ex: + raise ImportError('unable to load libselinux.so') from ex def _module_setup(): @@ -62,7 +61,7 @@ def _module_setup(): fn.restype = cfg.get('restype', c_int) # just patch simple directly callable functions directly onto the module - if not fn.argtypes or not any(argtype for argtype in fn.argtypes if type(argtype) == base_ptr_type): + if not fn.argtypes or not any(argtype for argtype in fn.argtypes if type(argtype) is base_ptr_type): setattr(_thismod, fname, fn) continue diff --git a/lib/ansible/module_utils/compat/typing.py b/lib/ansible/module_utils/compat/typing.py index 94b1dee7cf7..d753f72b25e 100644 --- a/lib/ansible/module_utils/compat/typing.py +++ b/lib/ansible/module_utils/compat/typing.py @@ -1,6 +1,5 @@ """Compatibility layer for the `typing` module, providing all Python versions access to the newest type-hinting features.""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # pylint: disable=wildcard-import,unused-wildcard-import diff --git a/lib/ansible/module_utils/compat/version.py b/lib/ansible/module_utils/compat/version.py index f4db1ef3d52..61a39dfa2d9 100644 --- a/lib/ansible/module_utils/compat/version.py +++ b/lib/ansible/module_utils/compat/version.py @@ -25,8 +25,7 @@ Every version number class implements the following interface: of the same class, thus must follow the same rules) """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re diff --git a/lib/ansible/module_utils/connection.py b/lib/ansible/module_utils/connection.py index e4e507dbb3b..b6720125855 100644 --- a/lib/ansible/module_utils/connection.py +++ b/lib/ansible/module_utils/connection.py @@ -26,12 +26,11 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os -import hashlib import json +import pickle import socket import struct import traceback @@ -41,30 +40,14 @@ from functools import partial from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.module_utils.common.json import AnsibleJSONEncoder from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import cPickle -def write_to_file_descriptor(fd, obj): - """Handles making sure all data is properly written to file descriptor fd. +def write_to_stream(stream, obj): + """Write a length+newline-prefixed pickled object to a stream.""" + src = pickle.dumps(obj) - In particular, that data is encoded in a character stream-friendly way and - that all data gets written before returning. - """ - # Need to force a protocol that is compatible with both py2 and py3. - # That would be protocol=2 or less. - # Also need to force a protocol that excludes certain control chars as - # stdin in this case is a pty and control chars will cause problems. - # that means only protocol=0 will work. - src = cPickle.dumps(obj, protocol=0) - - # raw \r characters will not survive pty round-trip - # They should be rehydrated on the receiving end - src = src.replace(b'\r', br'\r') - data_hash = to_bytes(hashlib.sha1(src).hexdigest()) - - os.write(fd, b'%d\n' % len(src)) - os.write(fd, src) - os.write(fd, b'%s\n' % data_hash) + stream.write(b'%d\n' % len(src)) + stream.write(src) def send_data(s, data): @@ -147,7 +130,7 @@ class Connection(object): data = json.dumps(req, cls=AnsibleJSONEncoder, vault_to_text=True) except TypeError as exc: raise ConnectionError( - "Failed to encode some variables as JSON for communication with ansible-connection. " + "Failed to encode some variables as JSON for communication with the persistent connection helper. " "The original exception was: %s" % to_text(exc) ) @@ -177,7 +160,7 @@ class Connection(object): if response['id'] != reqid: raise ConnectionError('invalid json-rpc id received') if "result_type" in response: - response["result"] = cPickle.loads(to_bytes(response["result"])) + response["result"] = pickle.loads(to_bytes(response["result"], errors="surrogateescape")) return response diff --git a/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs b/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs index 48c4a197ae6..a7959efb305 100644 --- a/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs +++ b/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs @@ -2,7 +2,6 @@ using Microsoft.Win32.SafeHandles; using System; using System.Collections.Generic; using System.Linq; -using System.Runtime.ConstrainedExecution; using System.Runtime.InteropServices; using System.Security.Principal; using System.Text; @@ -123,7 +122,6 @@ namespace Ansible.AccessToken base.SetHandle(handle); } - [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] protected override bool ReleaseHandle() { Marshal.FreeHGlobal(handle); @@ -247,7 +245,6 @@ namespace Ansible.AccessToken public SafeNativeHandle() : base(true) { } public SafeNativeHandle(IntPtr handle) : base(true) { this.handle = handle; } - [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] protected override bool ReleaseHandle() { return NativeMethods.CloseHandle(handle); @@ -342,19 +339,47 @@ namespace Ansible.AccessToken public static IEnumerable EnumerateUserTokens(SecurityIdentifier sid, TokenAccessLevels access = TokenAccessLevels.Query) { + return EnumerateUserTokens(sid, access, (p, h) => true); + } + + public static IEnumerable EnumerateUserTokens( + SecurityIdentifier sid, + TokenAccessLevels access, + Func processFilter) + { + // We always need the Query access level so we can query the TokenUser + access |= TokenAccessLevels.Query; + foreach (System.Diagnostics.Process process in System.Diagnostics.Process.GetProcesses()) { - // We always need the Query access level so we can query the TokenUser using (process) - using (SafeNativeHandle hToken = TryOpenAccessToken(process, access | TokenAccessLevels.Query)) + using (SafeNativeHandle processHandle = NativeMethods.OpenProcess(ProcessAccessFlags.QueryInformation, false, (UInt32)process.Id)) { - if (hToken == null) + if (processHandle.IsInvalid) + { continue; + } - if (!sid.Equals(GetTokenUser(hToken))) + if (!processFilter(process, processHandle)) + { continue; + } + + SafeNativeHandle accessToken; + if (!NativeMethods.OpenProcessToken(processHandle, access, out accessToken)) + { + continue; + } + + using (accessToken) + { + if (!sid.Equals(GetTokenUser(accessToken))) + { + continue; + } - yield return hToken; + yield return accessToken; + } } } } @@ -443,18 +468,5 @@ namespace Ansible.AccessToken for (int i = 0; i < array.Length; i++, ptrOffset = IntPtr.Add(ptrOffset, Marshal.SizeOf(typeof(T)))) array[i] = (T)Marshal.PtrToStructure(ptrOffset, typeof(T)); } - - private static SafeNativeHandle TryOpenAccessToken(System.Diagnostics.Process process, TokenAccessLevels access) - { - try - { - using (SafeNativeHandle hProcess = OpenProcess(process.Id, ProcessAccessFlags.QueryInformation, false)) - return OpenProcessToken(hProcess, access); - } - catch (Win32Exception) - { - return null; - } - } } } diff --git a/lib/ansible/module_utils/csharp/Ansible.Basic.cs b/lib/ansible/module_utils/csharp/Ansible.Basic.cs index 97f5f3e2d78..1095042fe17 100644 --- a/lib/ansible/module_utils/csharp/Ansible.Basic.cs +++ b/lib/ansible/module_utils/csharp/Ansible.Basic.cs @@ -49,6 +49,7 @@ namespace Ansible.Basic private static List BOOLEANS_TRUE = new List() { "y", "yes", "on", "1", "true", "t", "1.0" }; private static List BOOLEANS_FALSE = new List() { "n", "no", "off", "0", "false", "f", "0.0" }; + private bool ignoreUnknownOpts = false; private string remoteTmp = Path.GetTempPath(); private string tmpdir = null; private HashSet noLogValues = new HashSet(); @@ -60,23 +61,25 @@ namespace Ansible.Basic private Dictionary passVars = new Dictionary() { // null values means no mapping, not used in Ansible.Basic.AnsibleModule + // keep in sync with python counterpart in lib/ansible/module_utils/common/parameters.py { "check_mode", "CheckMode" }, { "debug", "DebugMode" }, { "diff", "DiffMode" }, { "keep_remote_files", "KeepRemoteFiles" }, + { "ignore_unknown_opts", "ignoreUnknownOpts" }, { "module_name", "ModuleName" }, { "no_log", "NoLog" }, { "remote_tmp", "remoteTmp" }, { "selinux_special_fs", null }, { "shell_executable", null }, { "socket", null }, - { "string_conversion_action", null }, { "syslog_facility", null }, + { "target_log_info", "TargetLogInfo"}, { "tmpdir", "tmpdir" }, { "verbosity", "Verbosity" }, { "version", "AnsibleVersion" }, }; - private List passBools = new List() { "check_mode", "debug", "diff", "keep_remote_files", "no_log" }; + private List passBools = new List() { "check_mode", "debug", "diff", "keep_remote_files", "ignore_unknown_opts", "no_log" }; private List passInts = new List() { "verbosity" }; private Dictionary> specDefaults = new Dictionary>() { @@ -125,6 +128,7 @@ namespace Ansible.Basic public bool KeepRemoteFiles { get; private set; } public string ModuleName { get; private set; } public bool NoLog { get; private set; } + public string TargetLogInfo { get; private set; } public int Verbosity { get; private set; } public string AnsibleVersion { get; private set; } @@ -257,6 +261,7 @@ namespace Ansible.Basic DiffMode = false; KeepRemoteFiles = false; ModuleName = "undefined win module"; + TargetLogInfo = ""; NoLog = (bool)argumentSpec["no_log"]; Verbosity = 0; AppDomain.CurrentDomain.ProcessExit += CleanupFiles; @@ -372,9 +377,20 @@ namespace Ansible.Basic logSource = "Application"; } } + + if (String.IsNullOrWhiteSpace(TargetLogInfo)) + { + message = String.Format("{0} - {1}", ModuleName, message); + } + else + { + message = String.Format("{0} {1} - {2}", ModuleName, TargetLogInfo, message); + } + if (sanitise) + { message = (string)RemoveNoLogValues(message, noLogValues); - message = String.Format("{0} - {1}", ModuleName, message); + } using (EventLog eventLog = new EventLog("Application")) { @@ -1008,7 +1024,16 @@ namespace Ansible.Basic foreach (DictionaryEntry entry in param) { string paramKey = (string)entry.Key; - if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase)) + if (paramKey == "_ansible_exec_wrapper_warnings") + { + // Special key used in module_powershell_wrapper to pass + // along any warnings that should be returned back to + // Ansible. + removedParameters.Add(paramKey); + foreach (string warning in (IList)entry.Value) + Warn(warning); + } + else if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase)) unsupportedParameters.Add(paramKey); else if (!legalInputs.Contains(paramKey)) // For backwards compatibility we do not care about the case but we need to warn the users as this will @@ -1043,7 +1068,7 @@ namespace Ansible.Basic foreach (string parameter in removedParameters) param.Remove(parameter); - if (unsupportedParameters.Count > 0) + if (unsupportedParameters.Count > 0 && !ignoreUnknownOpts) { legalInputs.RemoveAll(x => passVars.Keys.Contains(x.Replace("_ansible_", ""))); string msg = String.Format("Unsupported parameters for ({0}) module: {1}", ModuleName, String.Join(", ", unsupportedParameters)); diff --git a/lib/ansible/module_utils/csharp/Ansible.Become.cs b/lib/ansible/module_utils/csharp/Ansible.Become.cs index a6f645cabd3..08b73d404bf 100644 --- a/lib/ansible/module_utils/csharp/Ansible.Become.cs +++ b/lib/ansible/module_utils/csharp/Ansible.Become.cs @@ -4,7 +4,6 @@ using System.Collections; using System.Collections.Generic; using System.IO; using System.Linq; -using System.Runtime.ConstrainedExecution; using System.Runtime.InteropServices; using System.Security.AccessControl; using System.Security.Principal; @@ -94,10 +93,21 @@ namespace Ansible.Become CachedRemoteInteractive, CachedUnlock } + + [Flags] + public enum ProcessChildProcessPolicyFlags + { + None = 0x0, + NoChildProcessCreation = 0x1, + AuditNoChildProcessCreation = 0x2, + AllowSecureProcessCreation = 0x4, + } } internal class NativeMethods { + public const int ProcessChildProcessPolicy = 13; + [DllImport("advapi32.dll", SetLastError = true)] public static extern bool AllocateLocallyUniqueId( out Luid Luid); @@ -117,6 +127,13 @@ namespace Ansible.Become [DllImport("kernel32.dll")] public static extern UInt32 GetCurrentThreadId(); + [DllImport("kernel32.dll", SetLastError = true)] + public static extern bool GetProcessMitigationPolicy( + SafeNativeHandle hProcess, + int MitigationPolicy, + ref NativeHelpers.ProcessChildProcessPolicyFlags lpBuffer, + IntPtr dwLength); + [DllImport("user32.dll", SetLastError = true)] public static extern NoopSafeHandle GetProcessWindowStation(); @@ -175,7 +192,6 @@ namespace Ansible.Become { public SafeLsaHandle() : base(true) { } - [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] protected override bool ReleaseHandle() { UInt32 res = NativeMethods.LsaDeregisterLogonProcess(handle); @@ -187,7 +203,6 @@ namespace Ansible.Become { public SafeLsaMemoryBuffer() : base(true) { } - [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] protected override bool ReleaseHandle() { UInt32 res = NativeMethods.LsaFreeReturnBuffer(handle); @@ -200,7 +215,6 @@ namespace Ansible.Become public NoopSafeHandle() : base(IntPtr.Zero, false) { } public override bool IsInvalid { get { return false; } } - [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] protected override bool ReleaseHandle() { return true; } } @@ -221,6 +235,7 @@ namespace Ansible.Become }; private static int WINDOWS_STATION_ALL_ACCESS = 0x000F037F; private static int DESKTOP_RIGHTS_ALL_ACCESS = 0x000F01FF; + private static bool _getProcessMitigationPolicySupported = true; public static Result CreateProcessAsUser(string username, string password, string command) { @@ -341,9 +356,9 @@ namespace Ansible.Become // account or have administrative rights on the become access token. // If we ultimately are becoming the SYSTEM account we want the token with the most privileges available. // https://github.com/ansible/ansible/issues/71453 - bool mostPrivileges = becomeSid == "S-1-5-18"; + bool usedForProcess = becomeSid == "S-1-5-18"; systemToken = GetPrimaryTokenForUser(new SecurityIdentifier("S-1-5-18"), - new List() { "SeTcbPrivilege" }, mostPrivileges); + new List() { "SeTcbPrivilege" }, usedForProcess); if (systemToken != null) { try @@ -429,8 +444,10 @@ namespace Ansible.Become return userTokens; } - private static SafeNativeHandle GetPrimaryTokenForUser(SecurityIdentifier sid, - List requiredPrivileges = null, bool mostPrivileges = false) + private static SafeNativeHandle GetPrimaryTokenForUser( + SecurityIdentifier sid, + List requiredPrivileges = null, + bool usedForProcess = false) { // According to CreateProcessWithTokenW we require a token with // TOKEN_QUERY, TOKEN_DUPLICATE and TOKEN_ASSIGN_PRIMARY @@ -443,7 +460,16 @@ namespace Ansible.Become SafeNativeHandle userToken = null; int privilegeCount = 0; - foreach (SafeNativeHandle hToken in TokenUtil.EnumerateUserTokens(sid, dwAccess)) + // If we are using this token for the process, we need to check the + // process mitigation policy allows child processes to be created. + var processFilter = usedForProcess + ? (Func)((p, t) => + { + return GetProcessChildProcessPolicyFlags(t) == NativeHelpers.ProcessChildProcessPolicyFlags.None; + }) + : ((p, t) => true); + + foreach (SafeNativeHandle hToken in TokenUtil.EnumerateUserTokens(sid, dwAccess, processFilter)) { // Filter out any Network logon tokens, using become with that is useless when S4U // can give us a Batch logon @@ -454,7 +480,7 @@ namespace Ansible.Become List actualPrivileges = TokenUtil.GetTokenPrivileges(hToken).Select(x => x.Name).ToList(); // If the token has less or the same number of privileges than the current token, skip it. - if (mostPrivileges && privilegeCount >= actualPrivileges.Count) + if (usedForProcess && privilegeCount >= actualPrivileges.Count) continue; // Check that the required privileges are on the token @@ -479,7 +505,7 @@ namespace Ansible.Become // If we don't care about getting the token with the most privileges, escape the loop as we already // have a token. - if (!mostPrivileges) + if (!usedForProcess) break; } @@ -596,6 +622,35 @@ namespace Ansible.Become return null; } + private static NativeHelpers.ProcessChildProcessPolicyFlags GetProcessChildProcessPolicyFlags(SafeNativeHandle processHandle) + { + // Because this is only used to check the policy, we ignore any + // errors and pretend that the policy is None. + NativeHelpers.ProcessChildProcessPolicyFlags policy = NativeHelpers.ProcessChildProcessPolicyFlags.None; + + if (_getProcessMitigationPolicySupported) + { + try + { + if (NativeMethods.GetProcessMitigationPolicy( + processHandle, + NativeMethods.ProcessChildProcessPolicy, + ref policy, + (IntPtr)4)) + { + return policy; + } + } + catch (EntryPointNotFoundException) + { + // If the function is not available, we won't try to call it again + _getProcessMitigationPolicySupported = false; + } + } + + return policy; + } + private static NativeHelpers.SECURITY_LOGON_TYPE GetTokenLogonType(SafeNativeHandle hToken) { TokenStatistics stats = TokenUtil.GetTokenStatistics(hToken); @@ -652,4 +707,4 @@ namespace Ansible.Become { } } } -} +} \ No newline at end of file diff --git a/lib/ansible/module_utils/csharp/Ansible.Privilege.cs b/lib/ansible/module_utils/csharp/Ansible.Privilege.cs index 2c0b266bf71..9d5c0b17190 100644 --- a/lib/ansible/module_utils/csharp/Ansible.Privilege.cs +++ b/lib/ansible/module_utils/csharp/Ansible.Privilege.cs @@ -3,7 +3,6 @@ using System; using System.Collections; using System.Collections.Generic; using System.Linq; -using System.Runtime.ConstrainedExecution; using System.Runtime.InteropServices; using System.Security.Principal; using System.Text; @@ -92,7 +91,6 @@ namespace Ansible.Privilege { base.SetHandle(handle); } - [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] protected override bool ReleaseHandle() { Marshal.FreeHGlobal(handle); @@ -104,7 +102,7 @@ namespace Ansible.Privilege { public SafeNativeHandle() : base(true) { } public SafeNativeHandle(IntPtr handle) : base(true) { this.handle = handle; } - [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] + protected override bool ReleaseHandle() { return NativeMethods.CloseHandle(handle); diff --git a/lib/ansible/module_utils/csharp/Ansible.Process.cs b/lib/ansible/module_utils/csharp/Ansible.Process.cs index f4c68f0529e..a351dcd0493 100644 --- a/lib/ansible/module_utils/csharp/Ansible.Process.cs +++ b/lib/ansible/module_utils/csharp/Ansible.Process.cs @@ -3,7 +3,6 @@ using System; using System.Collections; using System.IO; using System.Linq; -using System.Runtime.ConstrainedExecution; using System.Runtime.InteropServices; using System.Text; using System.Threading; @@ -176,7 +175,6 @@ namespace Ansible.Process base.SetHandle(handle); } - [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)] protected override bool ReleaseHandle() { Marshal.FreeHGlobal(handle); @@ -399,7 +397,7 @@ namespace Ansible.Process internal static Result WaitProcess(SafeFileHandle stdoutRead, SafeFileHandle stdoutWrite, SafeFileHandle stderrRead, SafeFileHandle stderrWrite, FileStream stdinStream, byte[] stdin, IntPtr hProcess, string outputEncoding = null) { - // Default to using UTF-8 as the output encoding, this should be a sane default for most scenarios. + // Default to using UTF-8 as the output encoding, this should be a logical default for most scenarios. outputEncoding = String.IsNullOrEmpty(outputEncoding) ? "utf-8" : outputEncoding; Encoding encodingInstance = Encoding.GetEncoding(outputEncoding); diff --git a/lib/ansible/module_utils/distro/__init__.py b/lib/ansible/module_utils/distro/__init__.py index b70f29c9416..6cdb84ae505 100644 --- a/lib/ansible/module_utils/distro/__init__.py +++ b/lib/ansible/module_utils/distro/__init__.py @@ -15,15 +15,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -''' +""" Compat distro library. -''' +""" +from __future__ import annotations + # The following makes it easier for us to script updates of the bundled code -_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.6.0"} +_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.9.0"} # The following additional changes have been made: # * Remove optparse since it is not needed for our use. diff --git a/lib/ansible/module_utils/distro/_distro.py b/lib/ansible/module_utils/distro/_distro.py index 19262a41db1..a67edae735c 100644 --- a/lib/ansible/module_utils/distro/_distro.py +++ b/lib/ansible/module_utils/distro/_distro.py @@ -1,4 +1,4 @@ -# Copyright 2015,2016,2017 Nir Cohen +# Copyright 2015-2021 Nir Cohen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,6 +30,7 @@ Python 2.6 and removed in Python 3.8. Still, there are many cases in which access to OS distribution information is needed. See `Python issue 1322 `_ for more information. """ +from __future__ import annotations import argparse import json @@ -40,40 +41,39 @@ import shlex import subprocess import sys import warnings +from typing import ( + Any, + Callable, + Dict, + Iterable, + Optional, + Sequence, + TextIO, + Tuple, + Type, +) -__version__ = "1.6.0" - -# Use `if False` to avoid an ImportError on Python 2. After dropping Python 2 -# support, can use typing.TYPE_CHECKING instead. See: -# https://docs.python.org/3/library/typing.html#typing.TYPE_CHECKING -if False: # pragma: nocover - from typing import ( - Any, - Callable, - Dict, - Iterable, - Optional, - Sequence, - TextIO, - Tuple, - Type, - TypedDict, - Union, - ) +try: + from typing import TypedDict +except ImportError: + # Python 3.7 + TypedDict = dict - VersionDict = TypedDict( - "VersionDict", {"major": str, "minor": str, "build_number": str} - ) - InfoDict = TypedDict( - "InfoDict", - { - "id": str, - "version": str, - "version_parts": VersionDict, - "like": str, - "codename": str, - }, - ) +__version__ = "1.9.0" + + +class VersionDict(TypedDict): + major: str + minor: str + build_number: str + + +class InfoDict(TypedDict): + id: str + version: str + version_parts: VersionDict + like: str + codename: str _UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") @@ -126,6 +126,27 @@ _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( # Pattern for base file name of distro release file _DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") +# Base file names to be looked up for if _UNIXCONFDIR is not readable. +_DISTRO_RELEASE_BASENAMES = [ + "SuSE-release", + "altlinux-release", + "arch-release", + "base-release", + "centos-release", + "fedora-release", + "gentoo-release", + "mageia-release", + "mandrake-release", + "mandriva-release", + "mandrivalinux-release", + "manjaro-release", + "oracle-release", + "redhat-release", + "rocky-release", + "sl-release", + "slackware-version", +] + # Base file names to be ignored when searching for distro release file _DISTRO_RELEASE_IGNORE_BASENAMES = ( "debian_version", @@ -135,11 +156,12 @@ _DISTRO_RELEASE_IGNORE_BASENAMES = ( "system-release", "plesk-release", "iredmail-release", + "board-release", + "ec2_version", ) -def linux_distribution(full_distribution_name=True): - # type: (bool) -> Tuple[str, str, str] +def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]: """ .. deprecated:: 1.6.0 @@ -182,8 +204,7 @@ def linux_distribution(full_distribution_name=True): return _distro.linux_distribution(full_distribution_name) -def id(): - # type: () -> str +def id() -> str: """ Return the distro ID of the current distribution, as a machine-readable string. @@ -227,7 +248,9 @@ def id(): "freebsd" FreeBSD "midnightbsd" MidnightBSD "rocky" Rocky Linux + "aix" AIX "guix" Guix System + "altlinux" ALT Linux ============== ========================================= If you have a need to get distros for reliable IDs added into this set, @@ -265,8 +288,7 @@ def id(): return _distro.id() -def name(pretty=False): - # type: (bool) -> str +def name(pretty: bool = False) -> str: """ Return the name of the current OS distribution, as a human-readable string. @@ -305,8 +327,7 @@ def name(pretty=False): return _distro.name(pretty) -def version(pretty=False, best=False): - # type: (bool, bool) -> str +def version(pretty: bool = False, best: bool = False) -> str: """ Return the version of the current OS distribution, as a human-readable string. @@ -354,8 +375,7 @@ def version(pretty=False, best=False): return _distro.version(pretty, best) -def version_parts(best=False): - # type: (bool) -> Tuple[str, str, str] +def version_parts(best: bool = False) -> Tuple[str, str, str]: """ Return the version of the current OS distribution as a tuple ``(major, minor, build_number)`` with items as follows: @@ -372,8 +392,7 @@ def version_parts(best=False): return _distro.version_parts(best) -def major_version(best=False): - # type: (bool) -> str +def major_version(best: bool = False) -> str: """ Return the major version of the current OS distribution, as a string, if provided. @@ -386,8 +405,7 @@ def major_version(best=False): return _distro.major_version(best) -def minor_version(best=False): - # type: (bool) -> str +def minor_version(best: bool = False) -> str: """ Return the minor version of the current OS distribution, as a string, if provided. @@ -400,8 +418,7 @@ def minor_version(best=False): return _distro.minor_version(best) -def build_number(best=False): - # type: (bool) -> str +def build_number(best: bool = False) -> str: """ Return the build number of the current OS distribution, as a string, if provided. @@ -414,8 +431,7 @@ def build_number(best=False): return _distro.build_number(best) -def like(): - # type: () -> str +def like() -> str: """ Return a space-separated list of distro IDs of distributions that are closely related to the current OS distribution in regards to packaging @@ -432,8 +448,7 @@ def like(): return _distro.like() -def codename(): - # type: () -> str +def codename() -> str: """ Return the codename for the release of the current OS distribution, as a string. @@ -457,8 +472,7 @@ def codename(): return _distro.codename() -def info(pretty=False, best=False): - # type: (bool, bool) -> InfoDict +def info(pretty: bool = False, best: bool = False) -> InfoDict: """ Return certain machine-readable information items about the current OS distribution in a dictionary, as shown in the following example: @@ -502,8 +516,7 @@ def info(pretty=False, best=False): return _distro.info(pretty, best) -def os_release_info(): - # type: () -> Dict[str, str] +def os_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the current OS distribution. @@ -513,8 +526,7 @@ def os_release_info(): return _distro.os_release_info() -def lsb_release_info(): - # type: () -> Dict[str, str] +def lsb_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the current OS distribution. @@ -525,8 +537,7 @@ def lsb_release_info(): return _distro.lsb_release_info() -def distro_release_info(): - # type: () -> Dict[str, str] +def distro_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. @@ -536,8 +547,7 @@ def distro_release_info(): return _distro.distro_release_info() -def uname_info(): - # type: () -> Dict[str, str] +def uname_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. @@ -545,8 +555,7 @@ def uname_info(): return _distro.uname_info() -def os_release_attr(attribute): - # type: (str) -> str +def os_release_attr(attribute: str) -> str: """ Return a single named information item from the os-release file data source of the current OS distribution. @@ -565,8 +574,7 @@ def os_release_attr(attribute): return _distro.os_release_attr(attribute) -def lsb_release_attr(attribute): - # type: (str) -> str +def lsb_release_attr(attribute: str) -> str: """ Return a single named information item from the lsb_release command output data source of the current OS distribution. @@ -586,8 +594,7 @@ def lsb_release_attr(attribute): return _distro.lsb_release_attr(attribute) -def distro_release_attr(attribute): - # type: (str) -> str +def distro_release_attr(attribute: str) -> str: """ Return a single named information item from the distro release file data source of the current OS distribution. @@ -606,8 +613,7 @@ def distro_release_attr(attribute): return _distro.distro_release_attr(attribute) -def uname_attr(attribute): - # type: (str) -> str +def uname_attr(attribute: str) -> str: """ Return a single named information item from the distro release file data source of the current OS distribution. @@ -628,25 +634,23 @@ try: from functools import cached_property except ImportError: # Python < 3.8 - class cached_property(object): # type: ignore + class cached_property: # type: ignore """A version of @property which caches the value. On access, it calls the underlying function and sets the value in `__dict__` so future accesses will not re-call the property. """ - def __init__(self, f): - # type: (Callable[[Any], Any]) -> None + def __init__(self, f: Callable[[Any], Any]) -> None: self._fname = f.__name__ self._f = f - def __get__(self, obj, owner): - # type: (Any, Type[Any]) -> Any - assert obj is not None, "call {} on an instance".format(self._fname) + def __get__(self, obj: Any, owner: Type[Any]) -> Any: + assert obj is not None, f"call {self._fname} on an instance" ret = obj.__dict__[self._fname] = self._f(obj) return ret -class LinuxDistribution(object): +class LinuxDistribution: """ Provides information about a OS distribution. @@ -666,13 +670,13 @@ class LinuxDistribution(object): def __init__( self, - include_lsb=True, - os_release_file="", - distro_release_file="", - include_uname=True, - root_dir=None, - ): - # type: (bool, str, str, bool, Optional[str]) -> None + include_lsb: Optional[bool] = None, + os_release_file: str = "", + distro_release_file: str = "", + include_uname: Optional[bool] = None, + root_dir: Optional[str] = None, + include_oslevel: Optional[bool] = None, + ) -> None: """ The initialization method of this class gathers information from the available data sources, and stores that in private instance attributes. @@ -712,7 +716,13 @@ class LinuxDistribution(object): be empty. * ``root_dir`` (string): The absolute path to the root directory to use - to find distro-related information files. + to find distro-related information files. Note that ``include_*`` + parameters must not be enabled in combination with ``root_dir``. + + * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command + output is included as a data source. If the oslevel command is not + available in the program execution path the data source will be + empty. Public instance attributes: @@ -731,9 +741,20 @@ class LinuxDistribution(object): parameter. This controls whether the uname information will be loaded. + * ``include_oslevel`` (bool): The result of the ``include_oslevel`` + parameter. This controls whether (AIX) oslevel information will be + loaded. + + * ``root_dir`` (string): The result of the ``root_dir`` parameter. + The absolute path to the root directory to use to find distro-related + information files. + Raises: - * :py:exc:`IOError`: Some I/O issue with an os-release file or distro + * :py:exc:`ValueError`: Initialization parameters combination is not + supported. + + * :py:exc:`OSError`: Some I/O issue with an os-release file or distro release file. * :py:exc:`UnicodeError`: A data source has unexpected characters or @@ -763,11 +784,24 @@ class LinuxDistribution(object): self.os_release_file = usr_lib_os_release_file self.distro_release_file = distro_release_file or "" # updated later - self.include_lsb = include_lsb - self.include_uname = include_uname - def __repr__(self): - # type: () -> str + is_root_dir_defined = root_dir is not None + if is_root_dir_defined and (include_lsb or include_uname or include_oslevel): + raise ValueError( + "Including subprocess data sources from specific root_dir is disallowed" + " to prevent false information" + ) + self.include_lsb = ( + include_lsb if include_lsb is not None else not is_root_dir_defined + ) + self.include_uname = ( + include_uname if include_uname is not None else not is_root_dir_defined + ) + self.include_oslevel = ( + include_oslevel if include_oslevel is not None else not is_root_dir_defined + ) + + def __repr__(self) -> str: """Return repr of all info""" return ( "LinuxDistribution(" @@ -775,14 +809,18 @@ class LinuxDistribution(object): "distro_release_file={self.distro_release_file!r}, " "include_lsb={self.include_lsb!r}, " "include_uname={self.include_uname!r}, " + "include_oslevel={self.include_oslevel!r}, " + "root_dir={self.root_dir!r}, " "_os_release_info={self._os_release_info!r}, " "_lsb_release_info={self._lsb_release_info!r}, " "_distro_release_info={self._distro_release_info!r}, " - "_uname_info={self._uname_info!r})".format(self=self) + "_uname_info={self._uname_info!r}, " + "_oslevel_info={self._oslevel_info!r})".format(self=self) ) - def linux_distribution(self, full_distribution_name=True): - # type: (bool) -> Tuple[str, str, str] + def linux_distribution( + self, full_distribution_name: bool = True + ) -> Tuple[str, str, str]: """ Return information about the OS distribution that is compatible with Python's :func:`platform.linux_distribution`, supporting a subset @@ -796,15 +834,13 @@ class LinuxDistribution(object): self._os_release_info.get("release_codename") or self.codename(), ) - def id(self): - # type: () -> str + def id(self) -> str: """Return the distro ID of the OS distribution, as a string. For details, see :func:`distro.id`. """ - def normalize(distro_id, table): - # type: (str, Dict[str, str]) -> str + def normalize(distro_id: str, table: Dict[str, str]) -> str: distro_id = distro_id.lower().replace(" ", "_") return table.get(distro_id, distro_id) @@ -826,8 +862,7 @@ class LinuxDistribution(object): return "" - def name(self, pretty=False): - # type: (bool) -> str + def name(self, pretty: bool = False) -> str: """ Return the name of the OS distribution, as a string. @@ -847,11 +882,10 @@ class LinuxDistribution(object): name = self.distro_release_attr("name") or self.uname_attr("name") version = self.version(pretty=True) if version: - name = name + " " + version + name = f"{name} {version}" return name or "" - def version(self, pretty=False, best=False): - # type: (bool, bool) -> str + def version(self, pretty: bool = False, best: bool = False) -> str: """ Return the version of the OS distribution, as a string. @@ -869,7 +903,10 @@ class LinuxDistribution(object): ).get("version_id", ""), self.uname_attr("release"), ] - if self.id() == "debian" or "debian" in self.like().split(): + if self.uname_attr("id").startswith("aix"): + # On AIX platforms, prefer oslevel command output. + versions.insert(0, self.oslevel_info()) + elif self.id() == "debian" or "debian" in self.like().split(): # On Debian-like, add debian_version file content to candidates list. versions.append(self._debian_version) version = "" @@ -887,11 +924,10 @@ class LinuxDistribution(object): version = v break if pretty and version and self.codename(): - version = "{0} ({1})".format(version, self.codename()) + version = f"{version} ({self.codename()})" return version - def version_parts(self, best=False): - # type: (bool) -> Tuple[str, str, str] + def version_parts(self, best: bool = False) -> Tuple[str, str, str]: """ Return the version of the OS distribution, as a tuple of version numbers. @@ -907,8 +943,7 @@ class LinuxDistribution(object): return major, minor or "", build_number or "" return "", "", "" - def major_version(self, best=False): - # type: (bool) -> str + def major_version(self, best: bool = False) -> str: """ Return the major version number of the current distribution. @@ -916,8 +951,7 @@ class LinuxDistribution(object): """ return self.version_parts(best)[0] - def minor_version(self, best=False): - # type: (bool) -> str + def minor_version(self, best: bool = False) -> str: """ Return the minor version number of the current distribution. @@ -925,8 +959,7 @@ class LinuxDistribution(object): """ return self.version_parts(best)[1] - def build_number(self, best=False): - # type: (bool) -> str + def build_number(self, best: bool = False) -> str: """ Return the build number of the current distribution. @@ -934,8 +967,7 @@ class LinuxDistribution(object): """ return self.version_parts(best)[2] - def like(self): - # type: () -> str + def like(self) -> str: """ Return the IDs of distributions that are like the OS distribution. @@ -943,8 +975,7 @@ class LinuxDistribution(object): """ return self.os_release_attr("id_like") or "" - def codename(self): - # type: () -> str + def codename(self) -> str: """ Return the codename of the OS distribution. @@ -961,18 +992,17 @@ class LinuxDistribution(object): or "" ) - def info(self, pretty=False, best=False): - # type: (bool, bool) -> InfoDict + def info(self, pretty: bool = False, best: bool = False) -> InfoDict: """ Return certain machine-readable information about the OS distribution. For details, see :func:`distro.info`. """ - return dict( + return InfoDict( id=self.id(), version=self.version(pretty, best), - version_parts=dict( + version_parts=VersionDict( major=self.major_version(best), minor=self.minor_version(best), build_number=self.build_number(best), @@ -981,8 +1011,7 @@ class LinuxDistribution(object): codename=self.codename(), ) - def os_release_info(self): - # type: () -> Dict[str, str] + def os_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the OS distribution. @@ -991,8 +1020,7 @@ class LinuxDistribution(object): """ return self._os_release_info - def lsb_release_info(self): - # type: () -> Dict[str, str] + def lsb_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the OS @@ -1002,8 +1030,7 @@ class LinuxDistribution(object): """ return self._lsb_release_info - def distro_release_info(self): - # type: () -> Dict[str, str] + def distro_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the OS @@ -1013,8 +1040,7 @@ class LinuxDistribution(object): """ return self._distro_release_info - def uname_info(self): - # type: () -> Dict[str, str] + def uname_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the uname command data source of the OS distribution. @@ -1023,8 +1049,13 @@ class LinuxDistribution(object): """ return self._uname_info - def os_release_attr(self, attribute): - # type: (str) -> str + def oslevel_info(self) -> str: + """ + Return AIX' oslevel command output. + """ + return self._oslevel_info + + def os_release_attr(self, attribute: str) -> str: """ Return a single named information item from the os-release file data source of the OS distribution. @@ -1033,8 +1064,7 @@ class LinuxDistribution(object): """ return self._os_release_info.get(attribute, "") - def lsb_release_attr(self, attribute): - # type: (str) -> str + def lsb_release_attr(self, attribute: str) -> str: """ Return a single named information item from the lsb_release command output data source of the OS distribution. @@ -1043,8 +1073,7 @@ class LinuxDistribution(object): """ return self._lsb_release_info.get(attribute, "") - def distro_release_attr(self, attribute): - # type: (str) -> str + def distro_release_attr(self, attribute: str) -> str: """ Return a single named information item from the distro release file data source of the OS distribution. @@ -1053,8 +1082,7 @@ class LinuxDistribution(object): """ return self._distro_release_info.get(attribute, "") - def uname_attr(self, attribute): - # type: (str) -> str + def uname_attr(self, attribute: str) -> str: """ Return a single named information item from the uname command output data source of the OS distribution. @@ -1064,8 +1092,7 @@ class LinuxDistribution(object): return self._uname_info.get(attribute, "") @cached_property - def _os_release_info(self): - # type: () -> Dict[str, str] + def _os_release_info(self) -> Dict[str, str]: """ Get the information items from the specified os-release file. @@ -1073,13 +1100,12 @@ class LinuxDistribution(object): A dictionary containing all information items. """ if os.path.isfile(self.os_release_file): - with open(self.os_release_file) as release_file: + with open(self.os_release_file, encoding="utf-8") as release_file: return self._parse_os_release_content(release_file) return {} @staticmethod - def _parse_os_release_content(lines): - # type: (TextIO) -> Dict[str, str] + def _parse_os_release_content(lines: TextIO) -> Dict[str, str]: """ Parse the lines of an os-release file. @@ -1096,16 +1122,6 @@ class LinuxDistribution(object): lexer = shlex.shlex(lines, posix=True) lexer.whitespace_split = True - # The shlex module defines its `wordchars` variable using literals, - # making it dependent on the encoding of the Python source file. - # In Python 2.6 and 2.7, the shlex source file is encoded in - # 'iso-8859-1', and the `wordchars` variable is defined as a byte - # string. This causes a UnicodeDecodeError to be raised when the - # parsed content is a unicode object. The following fix resolves that - # (... but it should be fixed in shlex...): - if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes): - lexer.wordchars = lexer.wordchars.decode("iso-8859-1") - tokens = list(lexer) for token in tokens: # At this point, all shell-like parsing has been done (i.e. @@ -1139,8 +1155,7 @@ class LinuxDistribution(object): return props @cached_property - def _lsb_release_info(self): - # type: () -> Dict[str, str] + def _lsb_release_info(self) -> Dict[str, str]: """ Get the information items from the lsb_release command output. @@ -1149,19 +1164,17 @@ class LinuxDistribution(object): """ if not self.include_lsb: return {} - with open(os.devnull, "wb") as devnull: - try: - cmd = ("lsb_release", "-a") - stdout = subprocess.check_output(cmd, stderr=devnull) - # Command not found or lsb_release returned error - except (OSError, subprocess.CalledProcessError): - return {} + try: + cmd = ("lsb_release", "-a") + stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + # Command not found or lsb_release returned error + except (OSError, subprocess.CalledProcessError): + return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content) @staticmethod - def _parse_lsb_release_content(lines): - # type: (Iterable[str]) -> Dict[str, str] + def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]: """ Parse the output of the lsb_release command. @@ -1185,31 +1198,39 @@ class LinuxDistribution(object): return props @cached_property - def _uname_info(self): - # type: () -> Dict[str, str] + def _uname_info(self) -> Dict[str, str]: if not self.include_uname: return {} - with open(os.devnull, "wb") as devnull: - try: - cmd = ("uname", "-rs") - stdout = subprocess.check_output(cmd, stderr=devnull) - except OSError: - return {} + try: + cmd = ("uname", "-rs") + stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + except OSError: + return {} content = self._to_str(stdout).splitlines() return self._parse_uname_content(content) @cached_property - def _debian_version(self): - # type: () -> str + def _oslevel_info(self) -> str: + if not self.include_oslevel: + return "" + try: + stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL) + except (OSError, subprocess.CalledProcessError): + return "" + return self._to_str(stdout).strip() + + @cached_property + def _debian_version(self) -> str: try: - with open(os.path.join(self.etc_dir, "debian_version")) as fp: + with open( + os.path.join(self.etc_dir, "debian_version"), encoding="ascii" + ) as fp: return fp.readline().rstrip() - except (OSError, IOError): + except FileNotFoundError: return "" @staticmethod - def _parse_uname_content(lines): - # type: (Sequence[str]) -> Dict[str, str] + def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]: if not lines: return {} props = {} @@ -1228,23 +1249,12 @@ class LinuxDistribution(object): return props @staticmethod - def _to_str(text): - # type: (Union[bytes, str]) -> str + def _to_str(bytestring: bytes) -> str: encoding = sys.getfilesystemencoding() - encoding = "utf-8" if encoding == "ascii" else encoding - - if sys.version_info[0] >= 3: - if isinstance(text, bytes): - return text.decode(encoding) - else: - if isinstance(text, unicode): # noqa - return text.encode(encoding) - - return text + return bytestring.decode(encoding) @cached_property - def _distro_release_info(self): - # type: () -> Dict[str, str] + def _distro_release_info(self) -> Dict[str, str]: """ Get the information items from the specified distro release file. @@ -1261,14 +1271,14 @@ class LinuxDistribution(object): # file), because we want to use what was specified as best as # possible. match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if "name" in distro_info and "cloudlinux" in distro_info["name"].lower(): - distro_info["id"] = "cloudlinux" - elif match: - distro_info["id"] = match.group(1) - return distro_info else: try: - basenames = os.listdir(self.etc_dir) + basenames = [ + basename + for basename in os.listdir(self.etc_dir) + if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES + and os.path.isfile(os.path.join(self.etc_dir, basename)) + ] # We sort for repeatability in cases where there are multiple # distro specific files; e.g. CentOS, Oracle, Enterprise all # containing `redhat-release` on top of their own. @@ -1278,42 +1288,31 @@ class LinuxDistribution(object): # sure about the *-release files. Check common entries of # /etc for information. If they turn out to not be there the # error is handled in `_parse_distro_release_file()`. - basenames = [ - "SuSE-release", - "arch-release", - "base-release", - "centos-release", - "fedora-release", - "gentoo-release", - "mageia-release", - "mandrake-release", - "mandriva-release", - "mandrivalinux-release", - "manjaro-release", - "oracle-release", - "redhat-release", - "rocky-release", - "sl-release", - "slackware-version", - ] + basenames = _DISTRO_RELEASE_BASENAMES for basename in basenames: - if basename in _DISTRO_RELEASE_IGNORE_BASENAMES: - continue match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if match: - filepath = os.path.join(self.etc_dir, basename) - distro_info = self._parse_distro_release_file(filepath) - if "name" in distro_info: - # The name is always present if the pattern matches - self.distro_release_file = filepath - distro_info["id"] = match.group(1) - if "cloudlinux" in distro_info["name"].lower(): - distro_info["id"] = "cloudlinux" - return distro_info - return {} + if match is None: + continue + filepath = os.path.join(self.etc_dir, basename) + distro_info = self._parse_distro_release_file(filepath) + # The name is always present if the pattern matches. + if "name" not in distro_info: + continue + self.distro_release_file = filepath + break + else: # the loop didn't "break": no candidate. + return {} + + if match is not None: + distro_info["id"] = match.group(1) + + # CloudLinux < 7: manually enrich info with proper id. + if "cloudlinux" in distro_info.get("name", "").lower(): + distro_info["id"] = "cloudlinux" + + return distro_info - def _parse_distro_release_file(self, filepath): - # type: (str) -> Dict[str, str] + def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]: """ Parse a distro release file. @@ -1325,19 +1324,18 @@ class LinuxDistribution(object): A dictionary containing all information items. """ try: - with open(filepath) as fp: + with open(filepath, encoding="utf-8") as fp: # Only parse the first line. For instance, on SLES there # are multiple lines. We don't want them... return self._parse_distro_release_content(fp.readline()) - except (OSError, IOError): + except OSError: # Ignore not being able to read a specific, seemingly version # related file. # See https://github.com/python-distro/distro/issues/162 return {} @staticmethod - def _parse_distro_release_content(line): - # type: (str) -> Dict[str, str] + def _parse_distro_release_content(line: str) -> Dict[str, str]: """ Parse a line from a distro release file. @@ -1365,8 +1363,7 @@ class LinuxDistribution(object): _distro = LinuxDistribution() -def main(): - # type: () -> None +def main() -> None: logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) @@ -1388,7 +1385,10 @@ def main(): if args.root_dir: dist = LinuxDistribution( - include_lsb=False, include_uname=False, root_dir=args.root_dir + include_lsb=False, + include_uname=False, + include_oslevel=False, + root_dir=args.root_dir, ) else: dist = _distro diff --git a/lib/ansible/module_utils/errors.py b/lib/ansible/module_utils/errors.py index cbbd86c01cb..1196fac2b26 100644 --- a/lib/ansible/module_utils/errors.py +++ b/lib/ansible/module_utils/errors.py @@ -2,8 +2,7 @@ # Copyright (c) 2021 Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations class AnsibleFallbackNotFound(Exception): diff --git a/lib/ansible/module_utils/facts/__init__.py b/lib/ansible/module_utils/facts/__init__.py index 96ab778bf01..6d2469137fa 100644 --- a/lib/ansible/module_utils/facts/__init__.py +++ b/lib/ansible/module_utils/facts/__init__.py @@ -26,8 +26,7 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # import from the compat api because 2.0-2.3 had a module_utils.facts.ansible_facts # and get_all_facts in top level namespace diff --git a/lib/ansible/module_utils/facts/ansible_collector.py b/lib/ansible/module_utils/facts/ansible_collector.py index e9bafe297c5..9fe1c8a84ee 100644 --- a/lib/ansible/module_utils/facts/ansible_collector.py +++ b/lib/ansible/module_utils/facts/ansible_collector.py @@ -26,8 +26,7 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import fnmatch import sys @@ -40,13 +39,13 @@ from ansible.module_utils.common.collections import is_string class AnsibleFactCollector(collector.BaseFactCollector): - '''A FactCollector that returns results under 'ansible_facts' top level key. + """A FactCollector that returns results under 'ansible_facts' top level key. If a namespace if provided, facts will be collected under that namespace. For ex, a ansible.module_utils.facts.namespace.PrefixFactNamespace(prefix='ansible_') Has a 'from_gather_subset() constructor that populates collectors based on a - gather_subset specifier.''' + gather_subset specifier.""" def __init__(self, collectors=None, namespace=None, filter_spec=None): @@ -103,7 +102,7 @@ class AnsibleFactCollector(collector.BaseFactCollector): class CollectorMetaDataCollector(collector.BaseFactCollector): - '''Collector that provides a facts with the gather_subset metadata.''' + """Collector that provides a facts with the gather_subset metadata.""" name = 'gather_subset' _fact_ids = set() # type: t.Set[str] diff --git a/lib/ansible/module_utils/facts/collector.py b/lib/ansible/module_utils/facts/collector.py index ac52fe8b4d3..f3e144f7dda 100644 --- a/lib/ansible/module_utils/facts/collector.py +++ b/lib/ansible/module_utils/facts/collector.py @@ -26,8 +26,7 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from collections import defaultdict @@ -39,13 +38,13 @@ from ansible.module_utils.facts import timeout class CycleFoundInFactDeps(Exception): - '''Indicates there is a cycle in fact collector deps + """Indicates there is a cycle in fact collector deps If collector-B requires collector-A, and collector-A requires collector-B, that is a cycle. In that case, there is no ordering that will satisfy B before A and A and before B. That will cause this error to be raised. - ''' + """ pass @@ -65,9 +64,9 @@ class BaseFactCollector: required_facts = set() # type: t.Set[str] def __init__(self, collectors=None, namespace=None): - '''Base class for things that collect facts. + """Base class for things that collect facts. - 'collectors' is an optional list of other FactCollectors for composing.''' + 'collectors' is an optional list of other FactCollectors for composing.""" self.collectors = collectors or [] # self.namespace is a object with a 'transform' method that transforms @@ -89,8 +88,10 @@ class BaseFactCollector: return key_name def _transform_dict_keys(self, fact_dict): - '''update a dicts keys to use new names as transformed by self._transform_name''' + """update a dicts keys to use new names as transformed by self._transform_name""" + if fact_dict is None: + return {} for old_key in list(fact_dict.keys()): new_key = self._transform_name(old_key) # pop the item by old_key and replace it using new_key @@ -106,7 +107,7 @@ class BaseFactCollector: return facts_dict def collect(self, module=None, collected_facts=None): - '''do the fact collection + """do the fact collection 'collected_facts' is a object (a dict, likely) that holds all previously facts. This is intended to be used if a FactCollector needs to reference @@ -114,7 +115,7 @@ class BaseFactCollector: Returns a dict of facts. - ''' + """ facts_dict = {} return facts_dict @@ -124,12 +125,12 @@ def get_collector_names(valid_subsets=None, gather_subset=None, aliases_map=None, platform_info=None): - '''return a set of FactCollector names based on gather_subset spec. + """return a set of FactCollector names based on gather_subset spec. gather_subset is a spec describing which facts to gather. valid_subsets is a frozenset of potential matches for gather_subset ('all', 'network') etc minimal_gather_subsets is a frozenset of matches to always use, even for gather_subset='!all' - ''' + """ # Retrieve module parameters gather_subset = gather_subset or ['all'] @@ -266,11 +267,11 @@ def _get_requires_by_collector_name(collector_name, all_fact_subsets): def find_unresolved_requires(collector_names, all_fact_subsets): - '''Find any collector names that have unresolved requires + """Find any collector names that have unresolved requires Returns a list of collector names that correspond to collector classes whose .requires_facts() are not in collector_names. - ''' + """ unresolved = set() for collector_name in collector_names: @@ -350,7 +351,7 @@ def collector_classes_from_gather_subset(all_collector_classes=None, gather_subset=None, gather_timeout=None, platform_info=None): - '''return a list of collector classes that match the args''' + """return a list of collector classes that match the args""" # use gather_name etc to get the list of collectors diff --git a/lib/ansible/module_utils/facts/compat.py b/lib/ansible/module_utils/facts/compat.py index a69fee3729d..7d389cbc44e 100644 --- a/lib/ansible/module_utils/facts/compat.py +++ b/lib/ansible/module_utils/facts/compat.py @@ -26,8 +26,7 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.namespace import PrefixFactNamespace from ansible.module_utils.facts import default_collectors @@ -35,19 +34,19 @@ from ansible.module_utils.facts import ansible_collector def get_all_facts(module): - '''compat api for ansible 2.2/2.3 module_utils.facts.get_all_facts method + """compat api for ansible 2.2/2.3 module_utils.facts.get_all_facts method Expects module to be an instance of AnsibleModule, with a 'gather_subset' param. returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to - the fact value.''' + the fact value.""" gather_subset = module.params['gather_subset'] return ansible_facts(module, gather_subset=gather_subset) def ansible_facts(module, gather_subset=None): - '''Compat api for ansible 2.0/2.2/2.3 module_utils.facts.ansible_facts method + """Compat api for ansible 2.0/2.2/2.3 module_utils.facts.ansible_facts method 2.3/2.3 expects a gather_subset arg. 2.0/2.1 does not except a gather_subset arg @@ -58,7 +57,7 @@ def ansible_facts(module, gather_subset=None): returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to the fact value. - ''' + """ gather_subset = gather_subset or module.params.get('gather_subset', ['all']) gather_timeout = module.params.get('gather_timeout', 10) diff --git a/lib/ansible/module_utils/facts/default_collectors.py b/lib/ansible/module_utils/facts/default_collectors.py index cf0ef23ef4f..af4391576c0 100644 --- a/lib/ansible/module_utils/facts/default_collectors.py +++ b/lib/ansible/module_utils/facts/default_collectors.py @@ -25,8 +25,7 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t @@ -54,6 +53,7 @@ from ansible.module_utils.facts.system.python import PythonFactCollector from ansible.module_utils.facts.system.selinux import SelinuxFactCollector from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector +from ansible.module_utils.facts.system.systemd import SystemdFactCollector from ansible.module_utils.facts.system.user import UserFactCollector from ansible.module_utils.facts.hardware.base import HardwareCollector @@ -119,7 +119,8 @@ _general = [ EnvFactCollector, LoadAvgFactCollector, SshPubKeyFactCollector, - UserFactCollector + UserFactCollector, + SystemdFactCollector ] # type: t.List[t.Type[BaseFactCollector]] # virtual, this might also limit hardware/networking diff --git a/lib/ansible/module_utils/facts/hardware/aix.py b/lib/ansible/module_utils/facts/hardware/aix.py index dc37394f450..c2a074bf8ea 100644 --- a/lib/ansible/module_utils/facts/hardware/aix.py +++ b/lib/ansible/module_utils/facts/hardware/aix.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re @@ -196,34 +195,35 @@ class AIXHardware(Hardware): # AIX does not have mtab but mount command is only source of info (or to use # api calls to get same info) mount_path = self.module.get_bin_path('mount') - rc, mount_out, err = self.module.run_command(mount_path) - if mount_out: - for line in mount_out.split('\n'): - fields = line.split() - if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]): - if re.match('^/', fields[0]): - # normal mount - mount = fields[1] - mount_info = {'mount': mount, - 'device': fields[0], - 'fstype': fields[2], - 'options': fields[6], - 'time': '%s %s %s' % (fields[3], fields[4], fields[5])} - mount_info.update(get_mount_size(mount)) - else: - # nfs or cifs based mount - # in case of nfs if no mount options are provided on command line - # add into fields empty string... - if len(fields) < 8: - fields.append("") - - mount_info = {'mount': fields[2], - 'device': '%s:%s' % (fields[0], fields[1]), - 'fstype': fields[3], - 'options': fields[7], - 'time': '%s %s %s' % (fields[4], fields[5], fields[6])} - - mounts.append(mount_info) + if mount_path: + rc, mount_out, err = self.module.run_command(mount_path) + if mount_out: + for line in mount_out.split('\n'): + fields = line.split() + if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]): + if re.match('^/', fields[0]): + # normal mount + mount = fields[1] + mount_info = {'mount': mount, + 'device': fields[0], + 'fstype': fields[2], + 'options': fields[6], + 'time': '%s %s %s' % (fields[3], fields[4], fields[5])} + mount_info.update(get_mount_size(mount)) + else: + # nfs or cifs based mount + # in case of nfs if no mount options are provided on command line + # add into fields empty string... + if len(fields) < 8: + fields.append("") + + mount_info = {'mount': fields[2], + 'device': '%s:%s' % (fields[0], fields[1]), + 'fstype': fields[3], + 'options': fields[7], + 'time': '%s %s %s' % (fields[4], fields[5], fields[6])} + + mounts.append(mount_info) mount_facts['mounts'] = mounts @@ -233,30 +233,31 @@ class AIXHardware(Hardware): device_facts = {} device_facts['devices'] = {} - lsdev_cmd = self.module.get_bin_path('lsdev', True) - lsattr_cmd = self.module.get_bin_path('lsattr', True) - rc, out_lsdev, err = self.module.run_command(lsdev_cmd) - - for line in out_lsdev.splitlines(): - field = line.split() - - device_attrs = {} - device_name = field[0] - device_state = field[1] - device_type = field[2:] - lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name] - rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args) - for attr in out_lsattr.splitlines(): - attr_fields = attr.split() - attr_name = attr_fields[0] - attr_parameter = attr_fields[1] - device_attrs[attr_name] = attr_parameter - - device_facts['devices'][device_name] = { - 'state': device_state, - 'type': ' '.join(device_type), - 'attributes': device_attrs - } + lsdev_cmd = self.module.get_bin_path('lsdev') + lsattr_cmd = self.module.get_bin_path('lsattr') + if lsdev_cmd and lsattr_cmd: + rc, out_lsdev, err = self.module.run_command(lsdev_cmd) + + for line in out_lsdev.splitlines(): + field = line.split() + + device_attrs = {} + device_name = field[0] + device_state = field[1] + device_type = field[2:] + lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name] + rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args) + for attr in out_lsattr.splitlines(): + attr_fields = attr.split() + attr_name = attr_fields[0] + attr_parameter = attr_fields[1] + device_attrs[attr_name] = attr_parameter + + device_facts['devices'][device_name] = { + 'state': device_state, + 'type': ' '.join(device_type), + 'attributes': device_attrs + } return device_facts diff --git a/lib/ansible/module_utils/facts/hardware/base.py b/lib/ansible/module_utils/facts/hardware/base.py index 846bb302320..8710ed57fcc 100644 --- a/lib/ansible/module_utils/facts/hardware/base.py +++ b/lib/ansible/module_utils/facts/hardware/base.py @@ -26,8 +26,7 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t diff --git a/lib/ansible/module_utils/facts/hardware/darwin.py b/lib/ansible/module_utils/facts/hardware/darwin.py index d6a8e11e191..ac159d5fd2b 100644 --- a/lib/ansible/module_utils/facts/hardware/darwin.py +++ b/lib/ansible/module_utils/facts/hardware/darwin.py @@ -14,13 +14,11 @@ # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import struct import time -from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector from ansible.module_utils.facts.sysctl import get_sysctl @@ -42,7 +40,7 @@ class DarwinHardware(Hardware): def populate(self, collected_facts=None): hardware_facts = {} - self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern']) + self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern', 'hw.model']) mac_facts = self.get_mac_facts() cpu_facts = self.get_cpu_facts() memory_facts = self.get_memory_facts() @@ -68,9 +66,8 @@ class DarwinHardware(Hardware): def get_mac_facts(self): mac_facts = {} - rc, out, err = self.module.run_command("sysctl hw.model") - if rc == 0: - mac_facts['model'] = mac_facts['product_name'] = out.splitlines()[-1].split()[1] + if 'hw.model' in self.sysctl: + mac_facts['model'] = mac_facts['product_name'] = self.sysctl['hw.model'] mac_facts['osversion'] = self.sysctl['kern.osversion'] mac_facts['osrevision'] = self.sysctl['kern.osrevision'] @@ -97,44 +94,49 @@ class DarwinHardware(Hardware): total_used = 0 page_size = 4096 - try: - vm_stat_command = get_bin_path('vm_stat') - except ValueError: + + vm_stat_command = self.module.get_bin_path('vm_stat') + if vm_stat_command is None: return memory_facts - rc, out, err = self.module.run_command(vm_stat_command) - if rc == 0: - # Free = Total - (Wired + active + inactive) - # Get a generator of tuples from the command output so we can later - # turn it into a dictionary - memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines()) - - # Strip extra left spaces from the value - memory_stats = dict((k, v.lstrip()) for k, v in memory_stats) - - for k, v in memory_stats.items(): - try: - memory_stats[k] = int(v) - except ValueError: - # Most values convert cleanly to integer values but if the field does - # not convert to an integer, just leave it alone. - pass - - if memory_stats.get('Pages wired down'): - total_used += memory_stats['Pages wired down'] * page_size - if memory_stats.get('Pages active'): - total_used += memory_stats['Pages active'] * page_size - if memory_stats.get('Pages inactive'): - total_used += memory_stats['Pages inactive'] * page_size - - memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024) + if vm_stat_command: + rc, out, err = self.module.run_command(vm_stat_command) + if rc == 0: + # Free = Total - (Wired + active + inactive) + # Get a generator of tuples from the command output so we can later + # turn it into a dictionary + memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines()) + + # Strip extra left spaces from the value + memory_stats = dict((k, v.lstrip()) for k, v in memory_stats) + + for k, v in memory_stats.items(): + try: + memory_stats[k] = int(v) + except ValueError: + # Most values convert cleanly to integer values but if the field does + # not convert to an integer, just leave it alone. + pass + + if memory_stats.get('Pages wired down'): + total_used += memory_stats['Pages wired down'] * page_size + if memory_stats.get('Pages active'): + total_used += memory_stats['Pages active'] * page_size + if memory_stats.get('Pages inactive'): + total_used += memory_stats['Pages inactive'] * page_size + + memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024) return memory_facts def get_uptime_facts(self): + # On Darwin, the default format is annoying to parse. # Use -b to get the raw value and decode it. sysctl_cmd = self.module.get_bin_path('sysctl') + if not sysctl_cmd: + return {} + cmd = [sysctl_cmd, '-b', 'kern.boottime'] # We need to get raw bytes, not UTF-8. diff --git a/lib/ansible/module_utils/facts/hardware/dragonfly.py b/lib/ansible/module_utils/facts/hardware/dragonfly.py index ea24151fdb7..ffbde723101 100644 --- a/lib/ansible/module_utils/facts/hardware/dragonfly.py +++ b/lib/ansible/module_utils/facts/hardware/dragonfly.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.hardware.base import HardwareCollector from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardware diff --git a/lib/ansible/module_utils/facts/hardware/freebsd.py b/lib/ansible/module_utils/facts/hardware/freebsd.py index cce2ab268d8..2ae52239632 100644 --- a/lib/ansible/module_utils/facts/hardware/freebsd.py +++ b/lib/ansible/module_utils/facts/hardware/freebsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import json @@ -24,7 +23,6 @@ import time from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector from ansible.module_utils.facts.timeout import TimeoutError, timeout - from ansible.module_utils.facts.utils import get_file_content, get_mount_size @@ -174,13 +172,50 @@ class FreeBSDHardware(Hardware): sysdir = '/dev' device_facts['devices'] = {} - drives = re.compile(r'(ada?\d+|da\d+|a?cd\d+)') # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks") - slices = re.compile(r'(ada?\d+s\d+\w*|da\d+s\d+\w*)') + # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks") + drives = re.compile( + r"""(?x)( + (?: + ada? # ATA/SATA disk device + |da # SCSI disk device + |a?cd # SCSI CDROM drive + |amrd # AMI MegaRAID drive + |idad # Compaq RAID array + |ipsd # IBM ServeRAID RAID array + |md # md(4) disk device + |mfid # LSI MegaRAID SAS array + |mlxd # Mylex RAID disk + |twed # 3ware ATA RAID array + |vtbd # VirtIO Block Device + )\d+ + ) + """ + ) + + slices = re.compile( + r"""(?x)( + (?: + ada? # ATA/SATA disk device + |a?cd # SCSI CDROM drive + |amrd # AMI MegaRAID drive + |da # SCSI disk device + |idad # Compaq RAID array + |ipsd # IBM ServeRAID RAID array + |md # md(4) disk device + |mfid # LSI MegaRAID SAS array + |mlxd # Mylex RAID disk + |twed # 3ware ATA RAID array + |vtbd # VirtIO Block Device + )\d+[ps]\d+\w* + ) + """ + ) + if os.path.isdir(sysdir): dirlist = sorted(os.listdir(sysdir)) for device in dirlist: d = drives.match(device) - if d: + if d and d.group(1) not in device_facts['devices']: device_facts['devices'][d.group(1)] = [] s = slices.match(device) if s: @@ -189,9 +224,9 @@ class FreeBSDHardware(Hardware): return device_facts def get_dmi_facts(self): - ''' learn dmi facts from system + """ learn dmi facts from system - Use dmidecode executable if available''' + Use dmidecode executable if available""" dmi_facts = {} @@ -217,18 +252,22 @@ class FreeBSDHardware(Hardware): 'product_version': 'system-version', 'system_vendor': 'system-manufacturer', } + if dmi_bin is None: + dmi_facts = dict.fromkeys( + DMI_DICT.keys(), + 'NA' + ) + return dmi_facts + for (k, v) in DMI_DICT.items(): - if dmi_bin is not None: - (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v)) - if rc == 0: - # Strip out commented lines (specific dmidecode output) - # FIXME: why add the fact and then test if it is json? - dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')]) - try: - json.dumps(dmi_facts[k]) - except UnicodeDecodeError: - dmi_facts[k] = 'NA' - else: + (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v)) + if rc == 0: + # Strip out commented lines (specific dmidecode output) + # FIXME: why add the fact and then test if it is json? + dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')]) + try: + json.dumps(dmi_facts[k]) + except UnicodeDecodeError: dmi_facts[k] = 'NA' else: dmi_facts[k] = 'NA' diff --git a/lib/ansible/module_utils/facts/hardware/hpux.py b/lib/ansible/module_utils/facts/hardware/hpux.py index ae72ed8e486..efb63a98c2e 100644 --- a/lib/ansible/module_utils/facts/hardware/hpux.py +++ b/lib/ansible/module_utils/facts/hardware/hpux.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import re @@ -41,6 +40,9 @@ class HPUXHardware(Hardware): def populate(self, collected_facts=None): hardware_facts = {} + # TODO: very inefficient calls to machinfo, + # should just make one and then deal with finding the data (see facts/sysctl) + # but not going to change unless there is hp/ux for testing cpu_facts = self.get_cpu_facts(collected_facts=collected_facts) memory_facts = self.get_memory_facts() hw_facts = self.get_hw_facts() diff --git a/lib/ansible/module_utils/facts/hardware/hurd.py b/lib/ansible/module_utils/facts/hardware/hurd.py index 306e13c1354..491670c56b1 100644 --- a/lib/ansible/module_utils/facts/hardware/hurd.py +++ b/lib/ansible/module_utils/facts/hardware/hurd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.timeout import TimeoutError from ansible.module_utils.facts.hardware.base import HardwareCollector diff --git a/lib/ansible/module_utils/facts/hardware/linux.py b/lib/ansible/module_utils/facts/hardware/linux.py index 4e6305cb185..f431c4e1f8c 100644 --- a/lib/ansible/module_utils/facts/hardware/linux.py +++ b/lib/ansible/module_utils/facts/hardware/linux.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import collections import errno @@ -25,12 +24,9 @@ import re import sys import time -from multiprocessing import cpu_count -from multiprocessing.pool import ThreadPool - -from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils._internal._concurrent import _futures from ansible.module_utils.common.locale import get_best_parsable_locale -from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.common.text.formatters import bytes_to_human from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size @@ -92,6 +88,7 @@ class LinuxHardware(Hardware): cpu_facts = self.get_cpu_facts(collected_facts=collected_facts) memory_facts = self.get_memory_facts() dmi_facts = self.get_dmi_facts() + sysinfo_facts = self.get_sysinfo_facts() device_facts = self.get_device_facts() uptime_facts = self.get_uptime_facts() lvm_facts = self.get_lvm_facts() @@ -105,6 +102,7 @@ class LinuxHardware(Hardware): hardware_facts.update(cpu_facts) hardware_facts.update(memory_facts) hardware_facts.update(dmi_facts) + hardware_facts.update(sysinfo_facts) hardware_facts.update(device_facts) hardware_facts.update(uptime_facts) hardware_facts.update(lvm_facts) @@ -209,6 +207,9 @@ class LinuxHardware(Hardware): if 'vme' not in val: xen_paravirt = True + if key == "flags": + cpu_facts['flags'] = val.split() + # model name is for Intel arch, Processor (mind the uppercase P) # works for some ARM devices, like the Sheevaplug. if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']: @@ -258,7 +259,7 @@ class LinuxHardware(Hardware): if collected_facts.get('ansible_architecture') == 's390x': # getting sockets would require 5.7+ with CONFIG_SCHED_TOPOLOGY cpu_facts['processor_count'] = 1 - cpu_facts['processor_cores'] = zp // zmt + cpu_facts['processor_cores'] = round(zp / zmt) cpu_facts['processor_threads_per_core'] = zmt cpu_facts['processor_vcpus'] = zp cpu_facts['processor_nproc'] = zp @@ -283,9 +284,9 @@ class LinuxHardware(Hardware): core_values = list(cores.values()) if core_values: - cpu_facts['processor_threads_per_core'] = core_values[0] // cpu_facts['processor_cores'] + cpu_facts['processor_threads_per_core'] = round(core_values[0] / cpu_facts['processor_cores']) else: - cpu_facts['processor_threads_per_core'] = 1 // cpu_facts['processor_cores'] + cpu_facts['processor_threads_per_core'] = round(1 / cpu_facts['processor_cores']) cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] * cpu_facts['processor_count'] * cpu_facts['processor_cores']) @@ -301,22 +302,19 @@ class LinuxHardware(Hardware): ) except AttributeError: # In Python < 3.3, os.sched_getaffinity() is not available - try: - cmd = get_bin_path('nproc') - except ValueError: - pass - else: - rc, out, _err = self.module.run_command(cmd) + nproc_cmd = self.module.get_bin_path('nproc') + if nproc_cmd is not None: + rc, out, _err = self.module.run_command(nproc_cmd) if rc == 0: cpu_facts['processor_nproc'] = int(out) return cpu_facts def get_dmi_facts(self): - ''' learn dmi facts from system + """ learn dmi facts from system Try /sys first for dmi related facts. - If that is not available, fall back to dmidecode executable ''' + If that is not available, fall back to dmidecode executable """ dmi_facts = {} @@ -371,7 +369,6 @@ class LinuxHardware(Hardware): else: # Fall back to using dmidecode, if available - dmi_bin = self.module.get_bin_path('dmidecode') DMI_DICT = { 'bios_date': 'bios-release-date', 'bios_vendor': 'bios-vendor', @@ -392,25 +389,54 @@ class LinuxHardware(Hardware): 'product_version': 'system-version', 'system_vendor': 'system-manufacturer', } + dmi_bin = self.module.get_bin_path('dmidecode') + if dmi_bin is None: + dmi_facts = dict.fromkeys( + DMI_DICT.keys(), + 'NA' + ) + return dmi_facts + for (k, v) in DMI_DICT.items(): - if dmi_bin is not None: - (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v)) - if rc == 0: - # Strip out commented lines (specific dmidecode output) - thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')]) - try: - json.dumps(thisvalue) - except UnicodeDecodeError: - thisvalue = "NA" + (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v)) + if rc == 0: + # Strip out commented lines (specific dmidecode output) + thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')]) + try: + json.dumps(thisvalue) + except UnicodeDecodeError: + thisvalue = "NA" - dmi_facts[k] = thisvalue - else: - dmi_facts[k] = 'NA' + dmi_facts[k] = thisvalue else: dmi_facts[k] = 'NA' return dmi_facts + def get_sysinfo_facts(self): + """Fetch /proc/sysinfo facts from s390 Linux on IBM Z""" + if not os.path.exists('/proc/sysinfo'): + return {} + + sysinfo_facts = dict.fromkeys( + ('system_vendor', 'product_version', 'product_serial', 'product_name', 'product_uuid'), + 'NA' + ) + sysinfo_re = re.compile( + r""" + ^ + (?:Manufacturer:\s+(?P.+))| + (?:Type:\s+(?P.+))| + (?:Sequence\ Code:\s+0+(?P.+)) + $ + """, + re.VERBOSE | re.MULTILINE + ) + data = get_file_content('/proc/sysinfo') + for match in sysinfo_re.finditer(data): + sysinfo_facts.update({k: v for k, v in match.groupdict().items() if v is not None}) + return sysinfo_facts + def _run_lsblk(self, lsblk_path): # call lsblk and collect all uuids # --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts @@ -549,13 +575,14 @@ class LinuxHardware(Hardware): # start threads to query each mount results = {} - pool = ThreadPool(processes=min(len(mtab_entries), cpu_count())) + executor = _futures.DaemonThreadPoolExecutor() maxtime = timeout.GATHER_TIMEOUT or timeout.DEFAULT_GATHER_TIMEOUT for fields in mtab_entries: # Transform octal escape sequences fields = [self._replace_octal_escapes(field) for field in fields] device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3] + dump, passno = int(fields[4]), int(fields[5]) if not device.startswith(('/', '\\')) and ':/' not in device or fstype == 'none': continue @@ -563,37 +590,38 @@ class LinuxHardware(Hardware): mount_info = {'mount': mount, 'device': device, 'fstype': fstype, - 'options': options} + 'options': options, + 'dump': dump, + 'passno': passno} if mount in bind_mounts: # only add if not already there, we might have a plain /etc/mtab if not self.MTAB_BIND_MOUNT_RE.match(options): mount_info['options'] += ",bind" - results[mount] = {'info': mount_info, - 'extra': pool.apply_async(self.get_mount_info, (mount, device, uuids)), - 'timelimit': time.time() + maxtime} + results[mount] = {'info': mount_info, 'timelimit': time.monotonic() + maxtime} + results[mount]['extra'] = executor.submit(self.get_mount_info, mount, device, uuids) - pool.close() # done with new workers, start gc + # done with spawning new workers, start gc + executor.shutdown() - # wait for workers and get results - while results: + while results: # wait for workers and get results for mount in list(results): done = False res = results[mount]['extra'] try: - if res.ready(): + if res.done(): done = True - if res.successful(): - mount_size, uuid = res.get() + if res.exception() is None: + mount_size, uuid = res.result() if mount_size: results[mount]['info'].update(mount_size) results[mount]['info']['uuid'] = uuid or 'N/A' else: # failed, try to find out why, if 'res.successful' we know there are no exceptions - results[mount]['info']['note'] = 'Could not get extra information: %s.' % (to_text(res.get())) + results[mount]['info']['note'] = f'Could not get extra information: {res.exception()}' - elif time.time() > results[mount]['timelimit']: + elif time.monotonic() > results[mount]['timelimit']: done = True self.module.warn("Timeout exceeded when getting mount info for %s" % mount) results[mount]['info']['note'] = 'Could not get extra information due to timeout' @@ -742,10 +770,24 @@ class LinuxHardware(Hardware): if serial: d['serial'] = serial - for key, test in [('removable', '/removable'), - ('support_discard', '/queue/discard_granularity'), - ]: - d[key] = get_file_content(sysdir + test) + d['removable'] = get_file_content(sysdir + '/removable') + + # Historically, `support_discard` simply returned the value of + # `/sys/block/{device}/queue/discard_granularity`. When its value + # is `0`, then the block device doesn't support discards; + # _however_, it being greater than zero doesn't necessarily mean + # that the block device _does_ support discards. + # + # Another indication that a block device doesn't support discards + # is `/sys/block/{device}/queue/discard_max_hw_bytes` being equal + # to `0` (with the same caveat as above). So if either of those are + # `0`, set `support_discard` to zero, otherwise set it to the value + # of `discard_granularity` for backwards compatibility. + d['support_discard'] = ( + '0' + if get_file_content(sysdir + '/queue/discard_max_hw_bytes') == '0' + else get_file_content(sysdir + '/queue/discard_granularity') + ) if diskname in devs_wwn: d['wwn'] = devs_wwn[diskname] @@ -763,12 +805,12 @@ class LinuxHardware(Hardware): part['links'][link_type] = link_values.get(partname, []) part['start'] = get_file_content(part_sysdir + "/start", 0) - part['sectors'] = get_file_content(part_sysdir + "/size", 0) - part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size") if not part['sectorsize']: part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512) - part['size'] = bytes_to_human((float(part['sectors']) * 512.0)) + # sysfs sectorcount assumes 512 blocksize. Convert using the correct sectorsize + part['sectors'] = int(get_file_content(part_sysdir + "/size", 0)) * 512 // int(part['sectorsize']) + part['size'] = bytes_to_human(float(part['sectors']) * float(part['sectorsize'])) part['uuid'] = get_partition_uuid(partname) self.get_holders(part, part_sysdir) @@ -782,13 +824,14 @@ class LinuxHardware(Hardware): if m: d['scheduler_mode'] = m.group(2) - d['sectors'] = get_file_content(sysdir + "/size") - if not d['sectors']: - d['sectors'] = 0 d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size") if not d['sectorsize']: d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512) - d['size'] = bytes_to_human(float(d['sectors']) * 512.0) + # sysfs sectorcount assumes 512 blocksize. Convert using the correct sectorsize + d['sectors'] = int(get_file_content(sysdir + "/size")) * 512 // int(d['sectorsize']) + if not d['sectors']: + d['sectors'] = 0 + d['size'] = bytes_to_human(float(d['sectors']) * float(d['sectorsize'])) d['host'] = "" @@ -831,21 +874,24 @@ class LinuxHardware(Hardware): """ Get LVM Facts if running as root and lvm utils are available """ lvm_facts = {'lvm': 'N/A'} + vgs_cmd = self.module.get_bin_path('vgs') + if vgs_cmd is None: + return lvm_facts - if os.getuid() == 0 and self.module.get_bin_path('vgs'): + if os.getuid() == 0: lvm_util_options = '--noheadings --nosuffix --units g --separator ,' - vgs_path = self.module.get_bin_path('vgs') # vgs fields: VG #PV #LV #SN Attr VSize VFree vgs = {} - if vgs_path: - rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options)) - for vg_line in vg_lines.splitlines(): - items = vg_line.strip().split(',') - vgs[items[0]] = {'size_g': items[-2], - 'free_g': items[-1], - 'num_lvs': items[2], - 'num_pvs': items[1]} + rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_cmd, lvm_util_options)) + for vg_line in vg_lines.splitlines(): + items = vg_line.strip().split(',') + vgs[items[0]] = { + 'size_g': items[-2], + 'free_g': items[-1], + 'num_lvs': items[2], + 'num_pvs': items[1] + } lvs_path = self.module.get_bin_path('lvs') # lvs fields: diff --git a/lib/ansible/module_utils/facts/hardware/netbsd.py b/lib/ansible/module_utils/facts/hardware/netbsd.py index c6557aa67da..69ac583df64 100644 --- a/lib/ansible/module_utils/facts/hardware/netbsd.py +++ b/lib/ansible/module_utils/facts/hardware/netbsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import re @@ -163,6 +162,9 @@ class NetBSDHardware(Hardware): def get_uptime_facts(self): # On NetBSD, we need to call sysctl with -n to get this value as an int. sysctl_cmd = self.module.get_bin_path('sysctl') + if sysctl_cmd is None: + return {} + cmd = [sysctl_cmd, '-n', 'kern.boottime'] rc, out, err = self.module.run_command(cmd) diff --git a/lib/ansible/module_utils/facts/hardware/openbsd.py b/lib/ansible/module_utils/facts/hardware/openbsd.py index cd5e21e9613..b5f08c0092b 100644 --- a/lib/ansible/module_utils/facts/hardware/openbsd.py +++ b/lib/ansible/module_utils/facts/hardware/openbsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import time @@ -55,7 +54,7 @@ class OpenBSDHardware(Hardware): hardware_facts.update(self.get_dmi_facts()) hardware_facts.update(self.get_uptime_facts()) - # storage devices notorioslly prone to hang/block so they are under a timeout + # storage devices notoriously prone to hang/block so they are under a timeout try: hardware_facts.update(self.get_mount_facts()) except timeout.TimeoutError: @@ -114,6 +113,9 @@ class OpenBSDHardware(Hardware): def get_uptime_facts(self): # On openbsd, we need to call it with -n to get this value as an int. sysctl_cmd = self.module.get_bin_path('sysctl') + if sysctl_cmd is None: + return {} + cmd = [sysctl_cmd, '-n', 'kern.boottime'] rc, out, err = self.module.run_command(cmd) diff --git a/lib/ansible/module_utils/facts/hardware/sunos.py b/lib/ansible/module_utils/facts/hardware/sunos.py index 54850fe3bd8..134e59a8c2c 100644 --- a/lib/ansible/module_utils/facts/hardware/sunos.py +++ b/lib/ansible/module_utils/facts/hardware/sunos.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import time @@ -108,7 +107,7 @@ class SunOSHardware(Hardware): # Counting cores on Solaris can be complicated. # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu # Treat 'processor_count' as physical sockets and 'processor_cores' as - # virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as + # virtual CPUs visible to Solaris. Not a true count of cores for modern SPARC as # these processors have: sockets -> cores -> threads/virtual CPU. if len(sockets) > 0: cpu_facts['processor_count'] = len(sockets) @@ -173,7 +172,13 @@ class SunOSHardware(Hardware): rc, platform, err = self.module.run_command('/usr/bin/uname -i') platform_sbin = '/usr/platform/' + platform.rstrip() + '/sbin' - prtdiag_path = self.module.get_bin_path("prtdiag", opt_dirs=[platform_sbin]) + prtdiag_path = self.module.get_bin_path( + "prtdiag", + opt_dirs=[platform_sbin] + ) + if prtdiag_path is None: + return dmi_facts + rc, out, err = self.module.run_command(prtdiag_path) # rc returns 1 if out: diff --git a/lib/ansible/module_utils/facts/namespace.py b/lib/ansible/module_utils/facts/namespace.py index 2d6bf8a5e91..af195b21a15 100644 --- a/lib/ansible/module_utils/facts/namespace.py +++ b/lib/ansible/module_utils/facts/namespace.py @@ -25,8 +25,7 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class FactNamespace: @@ -34,7 +33,7 @@ class FactNamespace: self.namespace_name = namespace_name def transform(self, name): - '''Take a text name, and transforms it as needed (add a namespace prefix, etc)''' + """Take a text name, and transforms it as needed (add a namespace prefix, etc)""" return name def _underscore(self, name): diff --git a/lib/ansible/module_utils/facts/network/aix.py b/lib/ansible/module_utils/facts/network/aix.py index e9c90c64139..17516d927d8 100644 --- a/lib/ansible/module_utils/facts/network/aix.py +++ b/lib/ansible/module_utils/facts/network/aix.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re @@ -33,20 +32,21 @@ class AIXNetwork(GenericBsdIfconfigNetwork): interface = dict(v4={}, v6={}) netstat_path = self.module.get_bin_path('netstat') - - if netstat_path: - rc, out, err = self.module.run_command([netstat_path, '-nr']) - - lines = out.splitlines() - for line in lines: - words = line.split() - if len(words) > 1 and words[0] == 'default': - if '.' in words[1]: - interface['v4']['gateway'] = words[1] - interface['v4']['interface'] = words[5] - elif ':' in words[1]: - interface['v6']['gateway'] = words[1] - interface['v6']['interface'] = words[5] + if netstat_path is None: + return interface['v4'], interface['v6'] + + rc, out, err = self.module.run_command([netstat_path, '-nr']) + + lines = out.splitlines() + for line in lines: + words = line.split() + if len(words) > 1 and words[0] == 'default': + if '.' in words[1]: + interface['v4']['gateway'] = words[1] + interface['v4']['interface'] = words[5] + elif ':' in words[1]: + interface['v6']['gateway'] = words[1] + interface['v6']['interface'] = words[5] return interface['v4'], interface['v6'] @@ -59,9 +59,7 @@ class AIXNetwork(GenericBsdIfconfigNetwork): all_ipv6_addresses=[], ) - uname_rc = None - uname_out = None - uname_err = None + uname_rc = uname_out = uname_err = None uname_path = self.module.get_bin_path('uname') if uname_path: uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W']) diff --git a/lib/ansible/module_utils/facts/network/base.py b/lib/ansible/module_utils/facts/network/base.py index 8243f06ccfe..7e13e168b32 100644 --- a/lib/ansible/module_utils/facts/network/base.py +++ b/lib/ansible/module_utils/facts/network/base.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t diff --git a/lib/ansible/module_utils/facts/network/darwin.py b/lib/ansible/module_utils/facts/network/darwin.py index 90117e5360c..775d40719d9 100644 --- a/lib/ansible/module_utils/facts/network/darwin.py +++ b/lib/ansible/module_utils/facts/network/darwin.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork diff --git a/lib/ansible/module_utils/facts/network/dragonfly.py b/lib/ansible/module_utils/facts/network/dragonfly.py index e43bbb28ec9..8a3424594e8 100644 --- a/lib/ansible/module_utils/facts/network/dragonfly.py +++ b/lib/ansible/module_utils/facts/network/dragonfly.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork diff --git a/lib/ansible/module_utils/facts/network/fc_wwn.py b/lib/ansible/module_utils/facts/network/fc_wwn.py index dc2e3d6cf9f..fb846cc08a8 100644 --- a/lib/ansible/module_utils/facts/network/fc_wwn.py +++ b/lib/ansible/module_utils/facts/network/fc_wwn.py @@ -15,8 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys import glob @@ -83,7 +82,10 @@ class FcWwnInitiatorFactCollector(BaseFactCollector): fc_facts['fibre_channel_wwn'].append(data[-1].rstrip()) elif sys.platform.startswith('hp-ux'): cmd = module.get_bin_path('ioscan') - fcmsu_cmd = module.get_bin_path('fcmsutil', opt_dirs=['/opt/fcms/bin']) + fcmsu_cmd = module.get_bin_path( + 'fcmsutil', + opt_dirs=['/opt/fcms/bin'], + ) # go ahead if we have both commands available if cmd and fcmsu_cmd: # ioscan / get list of available fibre-channel devices (fcd) diff --git a/lib/ansible/module_utils/facts/network/freebsd.py b/lib/ansible/module_utils/facts/network/freebsd.py index 36f6eec7c43..4497010925a 100644 --- a/lib/ansible/module_utils/facts/network/freebsd.py +++ b/lib/ansible/module_utils/facts/network/freebsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork diff --git a/lib/ansible/module_utils/facts/network/generic_bsd.py b/lib/ansible/module_utils/facts/network/generic_bsd.py index 8d640f2152f..54188638c60 100644 --- a/lib/ansible/module_utils/facts/network/generic_bsd.py +++ b/lib/ansible/module_utils/facts/network/generic_bsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import socket diff --git a/lib/ansible/module_utils/facts/network/hpux.py b/lib/ansible/module_utils/facts/network/hpux.py index add57be8d3f..2f01825bb24 100644 --- a/lib/ansible/module_utils/facts/network/hpux.py +++ b/lib/ansible/module_utils/facts/network/hpux.py @@ -13,15 +13,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.network.base import Network, NetworkCollector class HPUXNetwork(Network): """ - HP-UX-specifig subclass of Network. Defines networking facts: + HP-UX-specific subclass of Network. Defines networking facts: - default_interface - interfaces (a list of interface names) - interface_ dictionary of ipv4 address information. @@ -30,7 +29,10 @@ class HPUXNetwork(Network): def populate(self, collected_facts=None): network_facts = {} - netstat_path = self.module.get_bin_path('netstat') + netstat_path = self.module.get_bin_path( + 'netstat', + opt_dirs=['/usr/bin'] + ) if netstat_path is None: return network_facts @@ -47,7 +49,14 @@ class HPUXNetwork(Network): def get_default_interfaces(self): default_interfaces = {} - rc, out, err = self.module.run_command("/usr/bin/netstat -nr") + netstat_path = self.module.get_bin_path( + 'netstat', + opt_dirs=['/usr/bin'] + ) + + if netstat_path is None: + return default_interfaces + rc, out, err = self.module.run_command("%s -nr" % netstat_path) lines = out.splitlines() for line in lines: words = line.split() @@ -60,7 +69,14 @@ class HPUXNetwork(Network): def get_interfaces_info(self): interfaces = {} - rc, out, err = self.module.run_command("/usr/bin/netstat -niw") + netstat_path = self.module.get_bin_path( + 'netstat', + opt_dirs=['/usr/bin'] + ) + + if netstat_path is None: + return interfaces + rc, out, err = self.module.run_command("%s -niw" % netstat_path) lines = out.splitlines() for line in lines: words = line.split() diff --git a/lib/ansible/module_utils/facts/network/hurd.py b/lib/ansible/module_utils/facts/network/hurd.py index 518df3900a7..05f23e5f445 100644 --- a/lib/ansible/module_utils/facts/network/hurd.py +++ b/lib/ansible/module_utils/facts/network/hurd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/network/iscsi.py b/lib/ansible/module_utils/facts/network/iscsi.py index ef5ac398a98..48f98a682bd 100644 --- a/lib/ansible/module_utils/facts/network/iscsi.py +++ b/lib/ansible/module_utils/facts/network/iscsi.py @@ -15,14 +15,12 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys import ansible.module_utils.compat.typing as t -from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.network.base import NetworkCollector @@ -81,9 +79,8 @@ class IscsiInitiatorNetworkCollector(NetworkCollector): iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1] break elif sys.platform.startswith('aix'): - try: - cmd = get_bin_path('lsattr') - except ValueError: + cmd = module.get_bin_path('lsattr') + if cmd is None: return iscsi_facts cmd += " -E -l iscsi0" @@ -93,10 +90,11 @@ class IscsiInitiatorNetworkCollector(NetworkCollector): iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip() elif sys.platform.startswith('hp-ux'): - # try to find it in the default PATH and opt_dirs - try: - cmd = get_bin_path('iscsiutil', opt_dirs=['/opt/iscsi/bin']) - except ValueError: + cmd = module.get_bin_path( + 'iscsiutil', + opt_dirs=['/opt/iscsi/bin'] + ) + if cmd is None: return iscsi_facts cmd += " -l" diff --git a/lib/ansible/module_utils/facts/network/linux.py b/lib/ansible/module_utils/facts/network/linux.py index a189f387384..d199d5a6ae3 100644 --- a/lib/ansible/module_utils/facts/network/linux.py +++ b/lib/ansible/module_utils/facts/network/linux.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import glob import os @@ -296,8 +295,6 @@ class LinuxNetwork(Network): if not address == '::1': ips['all_ipv6_addresses'].append(address) - ip_path = self.module.get_bin_path("ip") - args = [ip_path, 'addr', 'show', 'primary', 'dev', device] rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace') if rc == 0: diff --git a/lib/ansible/module_utils/facts/network/netbsd.py b/lib/ansible/module_utils/facts/network/netbsd.py index de8ceff60c3..dde9e6c2169 100644 --- a/lib/ansible/module_utils/facts/network/netbsd.py +++ b/lib/ansible/module_utils/facts/network/netbsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork diff --git a/lib/ansible/module_utils/facts/network/nvme.py b/lib/ansible/module_utils/facts/network/nvme.py index 1d759566c99..7eb070dcf5d 100644 --- a/lib/ansible/module_utils/facts/network/nvme.py +++ b/lib/ansible/module_utils/facts/network/nvme.py @@ -15,8 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys diff --git a/lib/ansible/module_utils/facts/network/openbsd.py b/lib/ansible/module_utils/facts/network/openbsd.py index 9e11d82f372..691e6241848 100644 --- a/lib/ansible/module_utils/facts/network/openbsd.py +++ b/lib/ansible/module_utils/facts/network/openbsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.network.base import NetworkCollector from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork diff --git a/lib/ansible/module_utils/facts/network/sunos.py b/lib/ansible/module_utils/facts/network/sunos.py index adba14c684c..f2f064cc61d 100644 --- a/lib/ansible/module_utils/facts/network/sunos.py +++ b/lib/ansible/module_utils/facts/network/sunos.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re diff --git a/lib/ansible/module_utils/facts/other/facter.py b/lib/ansible/module_utils/facts/other/facter.py index 063065251dd..41b3cea7c92 100644 --- a/lib/ansible/module_utils/facts/other/facter.py +++ b/lib/ansible/module_utils/facts/other/facter.py @@ -1,8 +1,7 @@ # Copyright (c) 2023 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -23,8 +22,14 @@ class FacterFactCollector(BaseFactCollector): namespace=namespace) def find_facter(self, module): - facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin']) - cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin']) + facter_path = module.get_bin_path( + 'facter', + opt_dirs=['/opt/puppetlabs/bin'] + ) + cfacter_path = module.get_bin_path( + 'cfacter', + opt_dirs=['/opt/puppetlabs/bin'] + ) # Prefer to use cfacter if available if cfacter_path is not None: @@ -74,7 +79,6 @@ class FacterFactCollector(BaseFactCollector): try: facter_dict = json.loads(facter_output) except Exception: - # FIXME: maybe raise a FactCollectorError with some info attrs? - pass + module.warn("Failed to parse facter facts") return facter_dict diff --git a/lib/ansible/module_utils/facts/other/ohai.py b/lib/ansible/module_utils/facts/other/ohai.py index 90c553980b9..db62fe4d73e 100644 --- a/lib/ansible/module_utils/facts/other/ohai.py +++ b/lib/ansible/module_utils/facts/other/ohai.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -26,7 +25,7 @@ from ansible.module_utils.facts.collector import BaseFactCollector class OhaiFactCollector(BaseFactCollector): - '''This is a subclass of Facts for including information gathered from Ohai.''' + """This is a subclass of Facts for including information gathered from Ohai.""" name = 'ohai' _fact_ids = set() # type: t.Set[str] @@ -37,10 +36,11 @@ class OhaiFactCollector(BaseFactCollector): namespace=namespace) def find_ohai(self, module): - ohai_path = module.get_bin_path('ohai') - return ohai_path + return module.get_bin_path( + 'ohai' + ) - def run_ohai(self, module, ohai_path,): + def run_ohai(self, module, ohai_path): rc, out, err = module.run_command(ohai_path) return rc, out, err @@ -68,7 +68,6 @@ class OhaiFactCollector(BaseFactCollector): try: ohai_facts = json.loads(ohai_output) except Exception: - # FIXME: useful error, logging, something... - pass + module.warn("Failed to gather ohai facts") return ohai_facts diff --git a/lib/ansible/module_utils/facts/packages.py b/lib/ansible/module_utils/facts/packages.py index 53f74a16620..b5b9bcb35ef 100644 --- a/lib/ansible/module_utils/facts/packages.py +++ b/lib/ansible/module_utils/facts/packages.py @@ -1,27 +1,31 @@ # (c) 2018, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations + +import ansible.module_utils.compat.typing as t from abc import ABCMeta, abstractmethod from ansible.module_utils.six import with_metaclass +from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module from ansible.module_utils.common._utils import get_all_subclasses def get_all_pkg_managers(): - return {obj.__name__.lower(): obj for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr)} + return {obj.__name__.lower(): obj for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr, RespawningLibMgr)} class PkgMgr(with_metaclass(ABCMeta, object)): # type: ignore[misc] @abstractmethod - def is_available(self): + def is_available(self, handle_exceptions): # This method is supposed to return True/False if the package manager is currently installed/usable # It can also 'prep' the required systems in the process of detecting availability + # If handle_exceptions is false it should raise exceptions related to manager discovery instead of handling them. pass @abstractmethod @@ -59,16 +63,50 @@ class LibMgr(PkgMgr): self._lib = None super(LibMgr, self).__init__() - def is_available(self): + def is_available(self, handle_exceptions=True): found = False try: self._lib = __import__(self.LIB) found = True except ImportError: - pass + if not handle_exceptions: + raise Exception(missing_required_lib(self.LIB)) return found +class RespawningLibMgr(LibMgr): + + CLI_BINARIES = [] # type: t.List[str] + INTERPRETERS = ['/usr/bin/python3'] + + def is_available(self, handle_exceptions=True): + if super(RespawningLibMgr, self).is_available(): + return True + + for binary in self.CLI_BINARIES: + try: + bin_path = get_bin_path(binary) + except ValueError: + # Not an interesting exception to raise, just a speculative probe + continue + else: + # It looks like this package manager is installed + if not has_respawned(): + # See if respawning will help + interpreter_path = probe_interpreters_for_module(self.INTERPRETERS, self.LIB) + if interpreter_path: + respawn_module(interpreter_path) + # The module will exit when the respawned copy completes + + if not handle_exceptions: + raise Exception(f'Found executable at {bin_path}. {missing_required_lib(self.LIB)}') + + if not handle_exceptions: + raise Exception(missing_required_lib(self.LIB)) + + return False + + class CLIMgr(PkgMgr): CLI = None # type: str | None @@ -78,9 +116,12 @@ class CLIMgr(PkgMgr): self._cli = None super(CLIMgr, self).__init__() - def is_available(self): + def is_available(self, handle_exceptions=True): + found = False try: self._cli = get_bin_path(self.CLI) + found = True except ValueError: - return False - return True + if not handle_exceptions: + raise + return found diff --git a/lib/ansible/module_utils/facts/sysctl.py b/lib/ansible/module_utils/facts/sysctl.py index d7bcc8a1056..639e77c41f0 100644 --- a/lib/ansible/module_utils/facts/sysctl.py +++ b/lib/ansible/module_utils/facts/sysctl.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re @@ -22,41 +21,43 @@ from ansible.module_utils.common.text.converters import to_text def get_sysctl(module, prefixes): - sysctl_cmd = module.get_bin_path('sysctl') - cmd = [sysctl_cmd] - cmd.extend(prefixes) sysctl = dict() - - try: - rc, out, err = module.run_command(cmd) - except (IOError, OSError) as e: - module.warn('Unable to read sysctl: %s' % to_text(e)) - rc = 1 - - if rc == 0: - key = '' - value = '' - for line in out.splitlines(): - if not line.strip(): - continue - - if line.startswith(' '): - # handle multiline values, they will not have a starting key - # Add the newline back in so people can split on it to parse - # lines if they need to. - value += '\n' + line - continue + sysctl_cmd = module.get_bin_path('sysctl') + if sysctl_cmd is not None: + + cmd = [sysctl_cmd] + cmd.extend(prefixes) + + try: + rc, out, err = module.run_command(cmd) + except (IOError, OSError) as e: + module.warn('Unable to read sysctl: %s' % to_text(e)) + rc = 1 + + if rc == 0: + key = '' + value = '' + for line in out.splitlines(): + if not line.strip(): + continue + + if line.startswith(' '): + # handle multiline values, they will not have a starting key + # Add the newline back in so people can split on it to parse + # lines if they need to. + value += '\n' + line + continue + + if key: + sysctl[key] = value.strip() + + try: + (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1) + except Exception as e: + module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e))) if key: sysctl[key] = value.strip() - try: - (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1) - except Exception as e: - module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e))) - - if key: - sysctl[key] = value.strip() - return sysctl diff --git a/lib/ansible/module_utils/facts/system/apparmor.py b/lib/ansible/module_utils/facts/system/apparmor.py index 3b702f9d323..ec29e883e09 100644 --- a/lib/ansible/module_utils/facts/system/apparmor.py +++ b/lib/ansible/module_utils/facts/system/apparmor.py @@ -15,8 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/system/caps.py b/lib/ansible/module_utils/facts/system/caps.py index 3692f2079a0..365a04592ac 100644 --- a/lib/ansible/module_utils/facts/system/caps.py +++ b/lib/ansible/module_utils/facts/system/caps.py @@ -15,8 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t diff --git a/lib/ansible/module_utils/facts/system/chroot.py b/lib/ansible/module_utils/facts/system/chroot.py index 94138a004f5..bbf4b39dd3e 100644 --- a/lib/ansible/module_utils/facts/system/chroot.py +++ b/lib/ansible/module_utils/facts/system/chroot.py @@ -1,7 +1,6 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/system/cmdline.py b/lib/ansible/module_utils/facts/system/cmdline.py index 782186dcaf8..12376dc0ba1 100644 --- a/lib/ansible/module_utils/facts/system/cmdline.py +++ b/lib/ansible/module_utils/facts/system/cmdline.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import shlex diff --git a/lib/ansible/module_utils/facts/system/date_time.py b/lib/ansible/module_utils/facts/system/date_time.py index 481bef42bfd..908d00aa163 100644 --- a/lib/ansible/module_utils/facts/system/date_time.py +++ b/lib/ansible/module_utils/facts/system/date_time.py @@ -15,15 +15,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import datetime import time import ansible.module_utils.compat.typing as t - from ansible.module_utils.facts.collector import BaseFactCollector +from ansible.module_utils.compat.datetime import utcfromtimestamp class DateTimeFactCollector(BaseFactCollector): @@ -37,7 +36,7 @@ class DateTimeFactCollector(BaseFactCollector): # Store the timestamp once, then get local and UTC versions from that epoch_ts = time.time() now = datetime.datetime.fromtimestamp(epoch_ts) - utcnow = datetime.datetime.utcfromtimestamp(epoch_ts) + utcnow = utcfromtimestamp(epoch_ts).replace(tzinfo=None) date_time_facts['year'] = now.strftime('%Y') date_time_facts['month'] = now.strftime('%m') diff --git a/lib/ansible/module_utils/facts/system/distribution.py b/lib/ansible/module_utils/facts/system/distribution.py index 6feece2a47e..66c768a126f 100644 --- a/lib/ansible/module_utils/facts/system/distribution.py +++ b/lib/ansible/module_utils/facts/system/distribution.py @@ -3,8 +3,7 @@ # Copyright: (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import platform @@ -31,7 +30,7 @@ def get_uname(module, flags=('-v')): def _file_exists(path, allow_empty=False): # not finding the file, exit early - if not os.path.exists(path): + if not os.path.isfile(path): return False # if just the path needs to exists (ie, it can be empty) we are done @@ -47,7 +46,7 @@ def _file_exists(path, allow_empty=False): class DistributionFiles: - '''has-a various distro file parsers (os-release, etc) and logic for finding the right one.''' + """has-a various distro file parsers (os-release, etc) and logic for finding the right one.""" # every distribution name mentioned here, must have one of # - allowempty == True # - be listed in SEARCH_STRING @@ -511,14 +510,14 @@ class Distribution(object): # keep keys in sync with Conditionals page of docs OS_FAMILY_MAP = {'RedHat': ['RedHat', 'RHEL', 'Fedora', 'CentOS', 'Scientific', 'SLC', 'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS', - 'OEL', 'Amazon', 'Virtuozzo', 'XenServer', 'Alibaba', + 'OEL', 'Amazon', 'Amzn', 'Virtuozzo', 'XenServer', 'Alibaba', 'EulerOS', 'openEuler', 'AlmaLinux', 'Rocky', 'TencentOS', - 'EuroLinux', 'Kylin Linux Advanced Server'], + 'EuroLinux', 'Kylin Linux Advanced Server', 'MIRACLE'], 'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon', 'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux', 'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin', 'OSMC'], 'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed', - 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap'], + 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap', 'ALP-Dolomite', 'SL-Micro'], 'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'], 'Mandrake': ['Mandrake', 'Mandriva'], 'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'], diff --git a/lib/ansible/module_utils/facts/system/dns.py b/lib/ansible/module_utils/facts/system/dns.py index d913f4a30d4..7ef69d136fc 100644 --- a/lib/ansible/module_utils/facts/system/dns.py +++ b/lib/ansible/module_utils/facts/system/dns.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t diff --git a/lib/ansible/module_utils/facts/system/env.py b/lib/ansible/module_utils/facts/system/env.py index 605443fa39d..4547924532e 100644 --- a/lib/ansible/module_utils/facts/system/env.py +++ b/lib/ansible/module_utils/facts/system/env.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/system/fips.py b/lib/ansible/module_utils/facts/system/fips.py index 7e56610e1b4..131434157d4 100644 --- a/lib/ansible/module_utils/facts/system/fips.py +++ b/lib/ansible/module_utils/facts/system/fips.py @@ -1,22 +1,8 @@ +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Determine if a system is in 'fips' mode -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t @@ -31,9 +17,9 @@ class FipsFactCollector(BaseFactCollector): def collect(self, module=None, collected_facts=None): # NOTE: this is populated even if it is not set - fips_facts = {} - fips_facts['fips'] = False - data = get_file_content('/proc/sys/crypto/fips_enabled') - if data and data == '1': + fips_facts = { + 'fips': False + } + if get_file_content('/proc/sys/crypto/fips_enabled') == '1': fips_facts['fips'] = True return fips_facts diff --git a/lib/ansible/module_utils/facts/system/loadavg.py b/lib/ansible/module_utils/facts/system/loadavg.py index 8475f2ae113..37cb554434f 100644 --- a/lib/ansible/module_utils/facts/system/loadavg.py +++ b/lib/ansible/module_utils/facts/system/loadavg.py @@ -1,8 +1,7 @@ # (c) 2021 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/system/local.py b/lib/ansible/module_utils/facts/system/local.py index 3fb75c94302..66ec58a2e7d 100644 --- a/lib/ansible/module_utils/facts/system/local.py +++ b/lib/ansible/module_utils/facts/system/local.py @@ -1,20 +1,7 @@ -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import annotations import glob import json @@ -91,9 +78,9 @@ class LocalFactCollector(BaseFactCollector): # if that fails read it with ConfigParser cp = configparser.ConfigParser() try: - cp.readfp(StringIO(out)) + cp.read_file(StringIO(out)) except configparser.Error: - fact = "error loading facts as JSON or ini - please check content: %s" % fn + fact = f"error loading facts as JSON or ini - please check content: {fn}" module.warn(fact) else: fact = {} @@ -101,8 +88,14 @@ class LocalFactCollector(BaseFactCollector): if sect not in fact: fact[sect] = {} for opt in cp.options(sect): - val = cp.get(sect, opt) - fact[sect][opt] = val + try: + val = cp.get(sect, opt) + except configparser.Error as ex: + fact = f"error loading facts as ini - please check content: {fn} ({ex})" + module.warn(fact) + continue + else: + fact[sect][opt] = val except Exception as e: fact = "Failed to convert (%s) to JSON: %s" % (fn, to_text(e)) module.warn(fact) diff --git a/lib/ansible/module_utils/facts/system/lsb.py b/lib/ansible/module_utils/facts/system/lsb.py index 2dc1433fd06..5767536b1d7 100644 --- a/lib/ansible/module_utils/facts/system/lsb.py +++ b/lib/ansible/module_utils/facts/system/lsb.py @@ -15,8 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/system/pkg_mgr.py b/lib/ansible/module_utils/facts/system/pkg_mgr.py index 1555c3ec4d2..e9da18647b8 100644 --- a/lib/ansible/module_utils/facts/system/pkg_mgr.py +++ b/lib/ansible/module_utils/facts/system/pkg_mgr.py @@ -2,8 +2,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import subprocess @@ -16,11 +15,11 @@ from ansible.module_utils.facts.collector import BaseFactCollector # package manager, put the preferred one last. If there is an # ansible module, use that as the value for the 'name' key. PKG_MGRS = [{'path': '/usr/bin/rpm-ostree', 'name': 'atomic_container'}, - {'path': '/usr/bin/yum', 'name': 'yum'}, # NOTE the `path` key for dnf/dnf5 is effectively discarded when matched for Red Hat OS family, # special logic to infer the default `pkg_mgr` is used in `PkgMgrFactCollector._check_rh_versions()` # leaving them here so a list of package modules can be constructed by iterating over `name` keys + {'path': '/usr/bin/yum', 'name': 'dnf'}, {'path': '/usr/bin/dnf-3', 'name': 'dnf'}, {'path': '/usr/bin/dnf5', 'name': 'dnf5'}, @@ -46,7 +45,6 @@ PKG_MGRS = [{'path': '/usr/bin/rpm-ostree', 'name': 'atomic_container'}, {'path': '/usr/bin/swupd', 'name': 'swupd'}, {'path': '/usr/sbin/sorcery', 'name': 'sorcery'}, {'path': '/usr/bin/installp', 'name': 'installp'}, - {'path': '/QOpenSys/pkgs/bin/yum', 'name': 'yum'}, ] @@ -70,35 +68,18 @@ class PkgMgrFactCollector(BaseFactCollector): super(PkgMgrFactCollector, self).__init__(*args, **kwargs) self._default_unknown_pkg_mgr = 'unknown' - def _check_rh_versions(self, pkg_mgr_name, collected_facts): + def _check_rh_versions(self): if os.path.exists('/run/ostree-booted'): return "atomic_container" - # Reset whatever was matched from PKG_MGRS, infer the default pkg_mgr below - pkg_mgr_name = self._default_unknown_pkg_mgr # Since /usr/bin/dnf and /usr/bin/microdnf can point to different versions of dnf in different distributions # the only way to infer the default package manager is to look at the binary they are pointing to. # /usr/bin/microdnf is likely used only in fedora minimal container so /usr/bin/dnf takes precedence for bin_path in ('/usr/bin/dnf', '/usr/bin/microdnf'): if os.path.exists(bin_path): - pkg_mgr_name = 'dnf5' if os.path.realpath(bin_path) == '/usr/bin/dnf5' else 'dnf' - break - - try: - distro_major_ver = int(collected_facts['ansible_distribution_major_version']) - except ValueError: - # a non integer magical future version - return self._default_unknown_pkg_mgr - - if ( - (collected_facts['ansible_distribution'] == 'Fedora' and distro_major_ver < 23) - or (collected_facts['ansible_distribution'] == 'Amazon' and distro_major_ver < 2022) - or (collected_facts['ansible_distribution'] == 'TencentOS' and distro_major_ver < 3) - or distro_major_ver < 8 # assume RHEL or a clone - ) and any(pm for pm in PKG_MGRS if pm['name'] == 'yum' and os.path.exists(pm['path'])): - pkg_mgr_name = 'yum' + return 'dnf5' if os.path.realpath(bin_path) == '/usr/bin/dnf5' else 'dnf' - return pkg_mgr_name + return self._default_unknown_pkg_mgr def _check_apt_flavor(self, pkg_mgr_name): # Check if '/usr/bin/apt' is APT-RPM or an ordinary (dpkg-based) APT. @@ -139,9 +120,9 @@ class PkgMgrFactCollector(BaseFactCollector): # installed or available to the distro, the ansible_fact entry should be # the default package manager officially supported by the distro. if collected_facts['ansible_os_family'] == "RedHat": - pkg_mgr_name = self._check_rh_versions(pkg_mgr_name, collected_facts) + pkg_mgr_name = self._check_rh_versions() elif collected_facts['ansible_os_family'] == 'Debian' and pkg_mgr_name != 'apt': - # It's possible to install yum, dnf, zypper, rpm, etc inside of + # It's possible to install dnf, zypper, rpm, etc inside of # Debian. Doing so does not mean the system wants to use them. pkg_mgr_name = 'apt' elif collected_facts['ansible_os_family'] == 'Altlinux': diff --git a/lib/ansible/module_utils/facts/system/platform.py b/lib/ansible/module_utils/facts/system/platform.py index b9478015a72..94819861b4b 100644 --- a/lib/ansible/module_utils/facts/system/platform.py +++ b/lib/ansible/module_utils/facts/system/platform.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import socket diff --git a/lib/ansible/module_utils/facts/system/python.py b/lib/ansible/module_utils/facts/system/python.py index 50b66dde3e4..0252c0c96a7 100644 --- a/lib/ansible/module_utils/facts/system/python.py +++ b/lib/ansible/module_utils/facts/system/python.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import sys diff --git a/lib/ansible/module_utils/facts/system/selinux.py b/lib/ansible/module_utils/facts/system/selinux.py index 5c6b012bb30..c110f17e720 100644 --- a/lib/ansible/module_utils/facts/system/selinux.py +++ b/lib/ansible/module_utils/facts/system/selinux.py @@ -15,8 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t diff --git a/lib/ansible/module_utils/facts/system/service_mgr.py b/lib/ansible/module_utils/facts/system/service_mgr.py index 701def99c0b..20257967c1e 100644 --- a/lib/ansible/module_utils/facts/system/service_mgr.py +++ b/lib/ansible/module_utils/facts/system/service_mgr.py @@ -15,8 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import platform @@ -107,7 +106,7 @@ class ServiceMgrFactCollector(BaseFactCollector): proc_1 = proc_1.strip() if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')): - # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container + # many systems return init, so this cannot be trusted, if it ends in 'sh' it probably is a shell in a container proc_1 = None # if not init/None it should be an identifiable or custom init, so we are done! @@ -145,6 +144,8 @@ class ServiceMgrFactCollector(BaseFactCollector): service_mgr_name = 'systemd' elif os.path.exists('/etc/init.d/'): service_mgr_name = 'sysvinit' + elif os.path.exists('/etc/dinit.d/'): + service_mgr_name = 'dinit' if not service_mgr_name: # if we cannot detect, fallback to generic 'service' diff --git a/lib/ansible/module_utils/facts/system/ssh_pub_keys.py b/lib/ansible/module_utils/facts/system/ssh_pub_keys.py index 85691c73c85..7214dea3de6 100644 --- a/lib/ansible/module_utils/facts/system/ssh_pub_keys.py +++ b/lib/ansible/module_utils/facts/system/ssh_pub_keys.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t diff --git a/lib/ansible/module_utils/facts/system/systemd.py b/lib/ansible/module_utils/facts/system/systemd.py new file mode 100644 index 00000000000..3ba2bbfcbdf --- /dev/null +++ b/lib/ansible/module_utils/facts/system/systemd.py @@ -0,0 +1,47 @@ +# Get systemd version and features +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import annotations + +import ansible.module_utils.compat.typing as t + +from ansible.module_utils.facts.collector import BaseFactCollector +from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector + + +class SystemdFactCollector(BaseFactCollector): + name = "systemd" + _fact_ids = set() # type: t.Set[str] + + def collect(self, module=None, collected_facts=None): + systemctl_bin = module.get_bin_path("systemctl") + systemd_facts = {} + if systemctl_bin and ServiceMgrFactCollector.is_systemd_managed(module): + rc, stdout, dummy = module.run_command( + [systemctl_bin, "--version"], + check_rc=False, + ) + + if rc != 0: + return systemd_facts + + systemd_facts["systemd"] = { + "features": str(stdout.split("\n")[1]), + "version": int(stdout.split(" ")[1]), + } + + return systemd_facts diff --git a/lib/ansible/module_utils/facts/system/user.py b/lib/ansible/module_utils/facts/system/user.py index 2efa9935c4f..64b8fef8be6 100644 --- a/lib/ansible/module_utils/facts/system/user.py +++ b/lib/ansible/module_utils/facts/system/user.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import getpass import os diff --git a/lib/ansible/module_utils/facts/timeout.py b/lib/ansible/module_utils/facts/timeout.py index ebb71cc6986..3b0476245b8 100644 --- a/lib/ansible/module_utils/facts/timeout.py +++ b/lib/ansible/module_utils/facts/timeout.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import multiprocessing import multiprocessing.pool as mp @@ -49,7 +48,7 @@ def timeout(seconds=None, error_message="Timer expired"): return res.get(timeout_value) except multiprocessing.TimeoutError: # This is an ansible.module_utils.common.facts.timeout.TimeoutError - raise TimeoutError('Timer expired after %s seconds' % timeout_value) + raise TimeoutError(f'{error_message} after {timeout_value} seconds') finally: pool.terminate() diff --git a/lib/ansible/module_utils/facts/utils.py b/lib/ansible/module_utils/facts/utils.py index a6027ab5dbc..9131cd1c965 100644 --- a/lib/ansible/module_utils/facts/utils.py +++ b/lib/ansible/module_utils/facts/utils.py @@ -13,15 +13,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import fcntl import os def get_file_content(path, default=None, strip=True): - ''' + """ Return the contents of a given file path :args path: path to file to return contents from @@ -29,7 +28,7 @@ def get_file_content(path, default=None, strip=True): :args strip: controls if we strip whitespace from the result or not :returns: String with file contents (optionally stripped) or 'default' value - ''' + """ data = default if os.path.exists(path) and os.access(path, os.R_OK): datafile = None @@ -63,7 +62,7 @@ def get_file_content(path, default=None, strip=True): def get_file_lines(path, strip=True, line_sep=None): - '''get list of lines from file''' + """get list of lines from file""" data = get_file_content(path, strip=strip) if data: if line_sep is None: diff --git a/lib/ansible/module_utils/facts/virtual/base.py b/lib/ansible/module_utils/facts/virtual/base.py index 67b59a5503a..943ce406d86 100644 --- a/lib/ansible/module_utils/facts/virtual/base.py +++ b/lib/ansible/module_utils/facts/virtual/base.py @@ -16,8 +16,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import ansible.module_utils.compat.typing as t diff --git a/lib/ansible/module_utils/facts/virtual/dragonfly.py b/lib/ansible/module_utils/facts/virtual/dragonfly.py index b176f8bf53c..8e1aa0dcd28 100644 --- a/lib/ansible/module_utils/facts/virtual/dragonfly.py +++ b/lib/ansible/module_utils/facts/virtual/dragonfly.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.facts.virtual.freebsd import FreeBSDVirtual, VirtualCollector diff --git a/lib/ansible/module_utils/facts/virtual/freebsd.py b/lib/ansible/module_utils/facts/virtual/freebsd.py index 7062d019843..819aa029ddc 100644 --- a/lib/ansible/module_utils/facts/virtual/freebsd.py +++ b/lib/ansible/module_utils/facts/virtual/freebsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/virtual/hpux.py b/lib/ansible/module_utils/facts/virtual/hpux.py index 10574827e44..5164aab835b 100644 --- a/lib/ansible/module_utils/facts/virtual/hpux.py +++ b/lib/ansible/module_utils/facts/virtual/hpux.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import re diff --git a/lib/ansible/module_utils/facts/virtual/linux.py b/lib/ansible/module_utils/facts/virtual/linux.py index 31fa061749a..57b047b11a1 100644 --- a/lib/ansible/module_utils/facts/virtual/linux.py +++ b/lib/ansible/module_utils/facts/virtual/linux.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import glob import os @@ -176,7 +175,7 @@ class LinuxVirtual(Virtual): virtual_facts['virtualization_type'] = 'RHEV' found_virt = True - if product_name in ('VMware Virtual Platform', 'VMware7,1'): + if product_name and product_name.startswith(("VMware",)): guest_tech.add('VMware') if not found_virt: virtual_facts['virtualization_type'] = 'VMware' diff --git a/lib/ansible/module_utils/facts/virtual/netbsd.py b/lib/ansible/module_utils/facts/virtual/netbsd.py index b4ef14ed046..1689ac30414 100644 --- a/lib/ansible/module_utils/facts/virtual/netbsd.py +++ b/lib/ansible/module_utils/facts/virtual/netbsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/virtual/openbsd.py b/lib/ansible/module_utils/facts/virtual/openbsd.py index c449028d42d..5c12df809fa 100644 --- a/lib/ansible/module_utils/facts/virtual/openbsd.py +++ b/lib/ansible/module_utils/facts/virtual/openbsd.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re diff --git a/lib/ansible/module_utils/facts/virtual/sunos.py b/lib/ansible/module_utils/facts/virtual/sunos.py index 1e92677e4ce..7a595f701a5 100644 --- a/lib/ansible/module_utils/facts/virtual/sunos.py +++ b/lib/ansible/module_utils/facts/virtual/sunos.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/lib/ansible/module_utils/facts/virtual/sysctl.py b/lib/ansible/module_utils/facts/virtual/sysctl.py index 1c7b2b34848..649f335ad72 100644 --- a/lib/ansible/module_utils/facts/virtual/sysctl.py +++ b/lib/ansible/module_utils/facts/virtual/sysctl.py @@ -13,8 +13,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re diff --git a/lib/ansible/module_utils/json_utils.py b/lib/ansible/module_utils/json_utils.py index 1ec971ccd2b..01fd2661d72 100644 --- a/lib/ansible/module_utils/json_utils.py +++ b/lib/ansible/module_utils/json_utils.py @@ -24,8 +24,7 @@ # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json # pylint: disable=unused-import @@ -33,13 +32,13 @@ import json # pylint: disable=unused-import # NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any # changes are propagated there. def _filter_non_json_lines(data, objects_only=False): - ''' + """ Used to filter unrelated output around module JSON output, like messages from tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). Filters leading lines before first line-starting occurrence of '{' or '[', and filter all trailing lines after matching close character (working from the bottom of output). - ''' + """ warnings = [] # Filter initial junk diff --git a/lib/ansible/module_utils/parsing/convert_bool.py b/lib/ansible/module_utils/parsing/convert_bool.py index fb331d89cf8..3367b2a09fa 100644 --- a/lib/ansible/module_utils/parsing/convert_bool.py +++ b/lib/ansible/module_utils/parsing/convert_bool.py @@ -1,8 +1,7 @@ # Copyright: 2017, Ansible Project # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause ) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.six import binary_type, text_type from ansible.module_utils.common.text.converters import to_text diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 index f40c3384cbc..3a1a317ec66 100644 --- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 +++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 @@ -37,7 +37,7 @@ Function Add-CSharpType { .PARAMETER CompileSymbols [String[]] A list of symbols to be defined during compile time. These are added to the existing symbols, 'CORECLR', 'WINDOWS', 'UNIX' that are set - conditionalls in this cmdlet. + conditionals in this cmdlet. .NOTES The following features were added to control the compiling options from the @@ -75,7 +75,7 @@ Function Add-CSharpType { [Switch]$IgnoreWarnings, [Switch]$PassThru, [Parameter(Mandatory = $true, ParameterSetName = "Module")][Object]$AnsibleModule, - [Parameter(ParameterSetName = "Manual")][String]$TempPath = $env:TMP, + [Parameter(ParameterSetName = "Manual")][String]$TempPath, [Parameter(ParameterSetName = "Manual")][Switch]$IncludeDebugInfo, [String[]]$CompileSymbols = @() ) @@ -280,9 +280,11 @@ Function Add-CSharpType { $include_debug = $AnsibleModule.Verbosity -ge 3 } else { - $temp_path = $TempPath + $temp_path = [System.IO.Path]::GetTempPath() $include_debug = $IncludeDebugInfo.IsPresent } + $temp_path = Join-Path -Path $temp_path -ChildPath ([Guid]::NewGuid().Guid) + $compiler_options = [System.Collections.ArrayList]@("/optimize") if ($defined_symbols.Count -gt 0) { $compiler_options.Add("/define:" + ([String]::Join(";", $defined_symbols.ToArray()))) > $null @@ -304,8 +306,12 @@ Function Add-CSharpType { ) # create a code snippet for each reference and check if we need - # to reference any extra assemblies - $ignore_warnings = [System.Collections.ArrayList]@() + # to reference any extra assemblies. + # CS1610 is a warning when csc.exe failed to delete temporary files. + # We use our own temp dir deletion mechanism so this doesn't become a + # fatal error. + # https://github.com/ansible-collections/ansible.windows/issues/598 + $ignore_warnings = [System.Collections.ArrayList]@('1610') $compile_units = [System.Collections.Generic.List`1[System.CodeDom.CodeSnippetCompileUnit]]@() foreach ($reference in $References) { # scan through code and add any assemblies that match @@ -373,7 +379,26 @@ Function Add-CSharpType { } } - $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units) + $null = New-Item -Path $temp_path -ItemType Directory -Force + try { + $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units) + } + finally { + # Try to delete the temp path, if this fails and we are running + # with a module object write a warning instead of failing. + try { + [System.IO.Directory]::Delete($temp_path, $true) + } + catch { + $msg = "Failed to cleanup temporary directory '$temp_path' used for compiling C# code." + if ($AnsibleModule) { + $AnsibleModule.Warn("$msg Files may still be present after the task is complete. Error: $_") + } + else { + throw "$msg Error: $_" + } + } + } } finally { foreach ($kvp in $originalEnv.GetEnumerator()) { diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 index 9b86f84188a..fb9fb11c490 100644 --- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 +++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 @@ -4,7 +4,7 @@ # used by Convert-DictToSnakeCase to convert a string in camelCase # format to snake_case Function Convert-StringToSnakeCase($string) { - # cope with pluralized abbreaviations such as TargetGroupARNs + # cope with pluralized abbreviations such as TargetGroupARNs if ($string -cmatch "[A-Z]{3,}s") { $replacement_string = $string -creplace $matches[0], "_$($matches[0].ToLower())" diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 index 4aea98b24f8..a716c3a5590 100644 --- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 +++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 @@ -372,8 +372,11 @@ Function Get-PendingRebootStatus { <# .SYNOPSIS Check if reboot is required, if so notify CA. - Function returns true if computer has a pending reboot -#> + Function returns true if computer has a pending reboot. + + People should not be using this function, it is kept + just for backwards compatibility. + #> $featureData = Invoke-CimMethod -EA Ignore -Name GetServerFeature -Namespace root\microsoft\windows\servermanager -Class MSFT_ServerManagerTasks $regData = Get-ItemProperty "HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager" "PendingFileRenameOperations" -EA Ignore $CBSRebootStatus = Get-ChildItem "HKLM:\\SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing" -ErrorAction SilentlyContinue | diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1 index b59ba72f23f..29e5be1673e 100644 --- a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1 +++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1 @@ -355,7 +355,7 @@ Function Invoke-WithWebRequest { .PARAMETER Module The Ansible.Basic module to set the return values for. This will set the following return values; elapsed - The total time, in seconds, that it took to send the web request and process the response - msg - The human readable description of the response status code + msg - The human-readable description of the response status code status_code - An int that is the response status code .PARAMETER Request diff --git a/lib/ansible/module_utils/pycompat24.py b/lib/ansible/module_utils/pycompat24.py deleted file mode 100644 index d57f968a8c7..00000000000 --- a/lib/ansible/module_utils/pycompat24.py +++ /dev/null @@ -1,53 +0,0 @@ -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c) 2016, Toshio Kuratomi -# Copyright (c) 2015, Marius Gedminas -# -# Redistribution and use in source and binary forms, with or without modification, -# are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys - - -def get_exception(): - """Get the current exception. - - This code needs to work on Python 2.4 through 3.x, so we cannot use - "except Exception, e:" (SyntaxError on Python 3.x) nor - "except Exception as e:" (SyntaxError on Python 2.4-2.5). - Instead we must use :: - - except Exception: - e = get_exception() - - """ - return sys.exc_info()[1] - - -from ast import literal_eval - - -__all__ = ('get_exception', 'literal_eval') diff --git a/lib/ansible/module_utils/service.py b/lib/ansible/module_utils/service.py index 075adfd9e8e..6d3ecea4b8d 100644 --- a/lib/ansible/module_utils/service.py +++ b/lib/ansible/module_utils/service.py @@ -26,8 +26,7 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import glob import os @@ -43,13 +42,13 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text def sysv_is_enabled(name, runlevel=None): - ''' + """ This function will check if the service name supplied is enabled in any of the sysv runlevels :arg name: name of the service to test for :kw runlevel: runlevel to check (default: None) - ''' + """ if runlevel: if not os.path.isdir('/etc/rc0.d/'): return bool(glob.glob('/etc/init.d/rc%s.d/S??%s' % (runlevel, name))) @@ -61,12 +60,12 @@ def sysv_is_enabled(name, runlevel=None): def get_sysv_script(name): - ''' + """ This function will return the expected path for an init script corresponding to the service name supplied. :arg name: name or path of the service to test for - ''' + """ if name.startswith('/'): result = name else: @@ -76,19 +75,19 @@ def get_sysv_script(name): def sysv_exists(name): - ''' + """ This function will return True or False depending on the existence of an init script corresponding to the service name supplied. :arg name: name of the service to test for - ''' + """ return os.path.exists(get_sysv_script(name)) def get_ps(module, pattern): - ''' + """ Last resort to find a service by trying to match pattern to programs in memory - ''' + """ found = False if platform.system() == 'SunOS': flags = '-ef' @@ -107,24 +106,24 @@ def get_ps(module, pattern): def fail_if_missing(module, found, service, msg=''): - ''' + """ This function will return an error or exit gracefully depending on check mode status and if the service is missing or not. - :arg module: is an AnsibleModule object, used for it's utility methods - :arg found: boolean indicating if services was found or not + :arg module: is an AnsibleModule object, used for it's utility methods + :arg found: boolean indicating if services were found or not :arg service: name of service :kw msg: extra info to append to error/success msg when missing - ''' + """ if not found: module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg)) def fork_process(): - ''' + """ This function performs the double fork process to detach from the parent process and execute. - ''' + """ pid = os.fork() if pid == 0: @@ -148,9 +147,7 @@ def fork_process(): os._exit(0) # get new process session and detach - sid = os.setsid() - if sid == -1: - raise Exception("Unable to detach session while daemonizing") + os.setsid() # avoid possible problems with cwd being removed os.chdir("/") @@ -163,16 +160,16 @@ def fork_process(): def daemonize(module, cmd): - ''' + """ Execute a command while detaching as a daemon, returns rc, stdout, and stderr. - :arg module: is an AnsibleModule object, used for it's utility methods + :arg module: is an AnsibleModule object, used for it's utility methods :arg cmd: is a list or string representing the command and options to run This is complex because daemonization is hard for people. What we do is daemonize a part of this module, the daemon runs the command, picks up the return code and output, and returns it to the main process. - ''' + """ # init some vars chunk = 4096 # FIXME: pass in as arg? @@ -182,10 +179,8 @@ def daemonize(module, cmd): try: pipe = os.pipe() pid = fork_process() - except OSError: + except (OSError, RuntimeError): module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc()) - except Exception as exc: - module.fail_json(msg=to_text(exc), exception=traceback.format_exc()) # we don't do any locking as this should be a unique module/process if pid == 0: @@ -215,9 +210,10 @@ def daemonize(module, cmd): for out in list(fds): if out in rfd: data = os.read(out.fileno(), chunk) - if not data: + if data: + output[out] += to_bytes(data, errors=errors) + else: fds.remove(out) - output[out] += data else: break @@ -248,7 +244,7 @@ def daemonize(module, cmd): data = os.read(pipe[0], chunk) if not data: break - return_data += data + return_data += to_bytes(data, errors=errors) # Note: no need to specify encoding on py3 as this module sends the # pickle to itself (thus same python interpreter so we aren't mixing @@ -274,3 +270,30 @@ def check_ps(module, pattern): if pattern in line: return True return False + + +def is_systemd_managed(module): + """ + Find out if the machine supports systemd or not + :arg module: is an AnsibleModule object, used for it's utility methods + + Returns True if the system supports systemd, False if not. + """ + # tools must be installed + if module.get_bin_path('systemctl'): + # This should show if systemd is the boot init system, if checking init failed to mark as systemd + # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html + for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]: + if os.path.exists(canary): + return True + + # If all else fails, check if init is the systemd command, using comm as cmdline could be symlink + try: + with open('/proc/1/comm', 'r') as init_proc: + init = init_proc.readline().strip() + return init == 'systemd' + except IOError: + # If comm doesn't exist, old kernel, no systemd + return False + + return False diff --git a/lib/ansible/module_utils/six/__init__.py b/lib/ansible/module_utils/six/__init__.py index f2d41c83517..4e74af7c00e 100644 --- a/lib/ansible/module_utils/six/__init__.py +++ b/lib/ansible/module_utils/six/__init__.py @@ -25,7 +25,7 @@ """Utilities for writing code that runs on Python 2 and 3""" -from __future__ import absolute_import +from __future__ import annotations import functools import itertools diff --git a/lib/ansible/module_utils/splitter.py b/lib/ansible/module_utils/splitter.py index c170b1cf7c8..5ae3393fd60 100644 --- a/lib/ansible/module_utils/splitter.py +++ b/lib/ansible/module_utils/splitter.py @@ -26,15 +26,14 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations def _get_quote_state(token, quote_char): - ''' + """ the goal of this block is to determine if the quoted string is unterminated in which case it needs to be put back together - ''' + """ # the char before the current one, used to see if # the current character is escaped prev_char = None @@ -51,11 +50,11 @@ def _get_quote_state(token, quote_char): def _count_jinja2_blocks(token, cur_depth, open_token, close_token): - ''' + """ this function counts the number of opening/closing blocks for a given opening/closing type and adjusts the current depth for that block based on the difference - ''' + """ num_open = token.count(open_token) num_close = token.count(close_token) if num_open != num_close: @@ -66,7 +65,7 @@ def _count_jinja2_blocks(token, cur_depth, open_token, close_token): def split_args(args): - ''' + """ Splits args on whitespace, but intelligently reassembles those that may have been split over a jinja2 block or quotes. @@ -79,10 +78,10 @@ def split_args(args): Basically this is a variation shlex that has some more intelligence for how Ansible needs to use it. - ''' + """ # the list of params parsed out of the arg string - # this is going to be the result value when we are donei + # this is going to be the result value when we are done params = [] # here we encode the args, so we have a uniform charset to @@ -213,7 +212,7 @@ def is_quoted(data): def unquote(data): - ''' removes first and last quotes from a string, if the string starts and ends with the same quotes ''' + """ removes first and last quotes from a string, if the string starts and ends with the same quotes """ if is_quoted(data): return data[1:-1] return data diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 1c904e0f98f..c90f0b78fd4 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -6,56 +7,51 @@ # # Copyright (c), Michael DeHaan , 2012-2013 # Copyright (c), Toshio Kuratomi , 2015 +# Copyright: Contributors to the Ansible project # # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) -# -# The match_hostname function and supporting code is under the terms and -# conditions of the Python Software Foundation License. They were taken from -# the Python3 standard library and adapted for use in Python2. See comments in the -# source for which code precisely is under this License. -# -# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0) -''' -The **urls** utils module offers a replacement for the urllib2 python library. +""" +The **urls** utils module offers a replacement for the urllib python library. -urllib2 is the python stdlib way to retrieve files from the Internet but it +urllib is the python stdlib way to retrieve files from the Internet but it lacks some security features (around verifying SSL certificates) that users should care about in most situations. Using the functions in this module corrects -deficiencies in the urllib2 module wherever possible. +deficiencies in the urllib module wherever possible. There are also third-party libraries (for instance, requests) which can be used -to replace urllib2 with a more secure library. However, all third party libraries +to replace urllib with a more secure library. However, all third party libraries require that the library be installed on the managed machine. That is an extra step for users making use of a module. If possible, avoid third party libraries by using this code instead. -''' +""" -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -import atexit import base64 +import email.mime.application import email.mime.multipart import email.mime.nonmultipart -import email.mime.application import email.parser +import email.policy import email.utils -import functools -import io +import http.client import mimetypes import netrc import os import platform import re import socket -import sys import tempfile import traceback import types # pylint: disable=unused-import - +import urllib.error +import urllib.request from contextlib import contextmanager +from http import cookiejar +from urllib.parse import unquote, urlparse, urlunparse +from urllib.request import BaseHandler try: import gzip @@ -68,123 +64,16 @@ except ImportError: else: GzipFile = gzip.GzipFile # type: ignore[assignment,misc] -try: - import email.policy -except ImportError: - # Py2 - import email.generator - -try: - import httplib -except ImportError: - # Python 3 - import http.client as httplib # type: ignore[no-redef] - -import ansible.module_utils.compat.typing as t -import ansible.module_utils.six.moves.http_cookiejar as cookiejar -import ansible.module_utils.six.moves.urllib.error as urllib_error - +from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.collections import Mapping, is_sequence -from ansible.module_utils.six import PY2, PY3, string_types -from ansible.module_utils.six.moves import cStringIO -from ansible.module_utils.basic import get_distribution, missing_required_lib from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -try: - # python3 - import urllib.request as urllib_request - from urllib.request import AbstractHTTPHandler, BaseHandler -except ImportError: - # python2 - import urllib2 as urllib_request # type: ignore[no-redef] - from urllib2 import AbstractHTTPHandler, BaseHandler # type: ignore[no-redef] - -urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectHandler.http_error_307 # type: ignore[attr-defined,assignment] - -try: - from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse, unquote - HAS_URLPARSE = True -except Exception: - HAS_URLPARSE = False - try: import ssl HAS_SSL = True except Exception: HAS_SSL = False -try: - # SNI Handling needs python2.7.9's SSLContext - from ssl import create_default_context, SSLContext # pylint: disable=unused-import - HAS_SSLCONTEXT = True -except ImportError: - HAS_SSLCONTEXT = False - -# SNI Handling for python < 2.7.9 with urllib3 support -HAS_URLLIB3_PYOPENSSLCONTEXT = False -HAS_URLLIB3_SSL_WRAP_SOCKET = False -if not HAS_SSLCONTEXT: - try: - # urllib3>=1.15 - try: - from urllib3.contrib.pyopenssl import PyOpenSSLContext - except Exception: - from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext - HAS_URLLIB3_PYOPENSSLCONTEXT = True - except Exception: - # urllib3<1.15,>=1.6 - try: - try: - from urllib3.contrib.pyopenssl import ssl_wrap_socket - except Exception: - from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket - HAS_URLLIB3_SSL_WRAP_SOCKET = True - except Exception: - pass - -# Select a protocol that includes all secure tls protocols -# Exclude insecure ssl protocols if possible - -if HAS_SSL: - # If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient - PROTOCOL = ssl.PROTOCOL_TLSv1 -if not HAS_SSLCONTEXT and HAS_SSL: - try: - import ctypes - import ctypes.util - except ImportError: - # python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl) - pass - else: - libssl_name = ctypes.util.find_library('ssl') - libssl = ctypes.CDLL(libssl_name) - for method in ('TLSv1_1_method', 'TLSv1_2_method'): - try: - libssl[method] # pylint: disable=pointless-statement - # Found something - we'll let openssl autonegotiate and hope - # the server has disabled sslv2 and 3. best we can do. - PROTOCOL = ssl.PROTOCOL_SSLv23 - break - except AttributeError: - pass - del libssl - - -# The following makes it easier for us to script updates of the bundled backports.ssl_match_hostname -# The bundled backports.ssl_match_hostname should really be moved into its own file for processing -_BUNDLED_METADATA = {"pypi_name": "backports.ssl_match_hostname", "version": "3.7.0.1"} - -LOADED_VERIFY_LOCATIONS = set() # type: t.Set[str] - -HAS_MATCH_HOSTNAME = True -try: - from ssl import match_hostname, CertificateError -except ImportError: - try: - from backports.ssl_match_hostname import match_hostname, CertificateError # type: ignore[assignment] - except ImportError: - HAS_MATCH_HOSTNAME = False - HAS_CRYPTOGRAPHY = True try: from cryptography import x509 @@ -226,7 +115,7 @@ try: if self._context: return - parsed = generic_urlparse(urlparse(req.get_full_url())) + parsed = urlparse(req.get_full_url()) auth_header = self.get_auth_value(headers) if not auth_header: @@ -259,7 +148,7 @@ try: cbt = gssapi.raw.ChannelBindings(application_data=b"tls-server-end-point:" + cert_hash) # TODO: We could add another option that is set to include the port in the SPN if desired in the future. - target = gssapi.Name("HTTP@%s" % parsed['hostname'], gssapi.NameType.hostbased_service) + target = gssapi.Name("HTTP@%s" % parsed.hostname, gssapi.NameType.hostbased_service) self._context = gssapi.SecurityContext(usage="initiate", name=target, creds=cred, channel_bindings=cbt) resp = None @@ -284,213 +173,9 @@ except ImportError: GSSAPI_IMP_ERR = traceback.format_exc() HTTPGSSAPIAuthHandler = None # type: types.ModuleType | None # type: ignore[no-redef] -if not HAS_MATCH_HOSTNAME: - # The following block of code is under the terms and conditions of the - # Python Software Foundation License - - # The match_hostname() function from Python 3.4, essential when using SSL. - - try: - # Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not - from _ssl import SSLCertVerificationError - CertificateError = SSLCertVerificationError # type: ignore[misc] - except ImportError: - class CertificateError(ValueError): # type: ignore[no-redef] - pass - - def _dnsname_match(dn, hostname): - """Matching according to RFC 6125, section 6.4.3 - - - Hostnames are compared lower case. - - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). - - Partial wildcards like 'www*.example.org', multiple wildcards, sole - wildcard or wildcards in labels other then the left-most label are not - supported and a CertificateError is raised. - - A wildcard must match at least one character. - """ - if not dn: - return False - - wildcards = dn.count('*') - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - if wildcards > 1: - # Divergence .format() to percent formatting for Python < 2.6 - raise CertificateError( - "too many wildcards in certificate DNS name: %s" % repr(dn)) - - dn_leftmost, sep, dn_remainder = dn.partition('.') - - if '*' in dn_remainder: - # Only match wildcard in leftmost segment. - # Divergence .format() to percent formatting for Python < 2.6 - raise CertificateError( - "wildcard can only be present in the leftmost label: " - "%s." % repr(dn)) - - if not sep: - # no right side - # Divergence .format() to percent formatting for Python < 2.6 - raise CertificateError( - "sole wildcard without additional labels are not support: " - "%s." % repr(dn)) - - if dn_leftmost != '*': - # no partial wildcard matching - # Divergence .format() to percent formatting for Python < 2.6 - raise CertificateError( - "partial wildcards in leftmost label are not supported: " - "%s." % repr(dn)) - - hostname_leftmost, sep, hostname_remainder = hostname.partition('.') - if not hostname_leftmost or not sep: - # wildcard must match at least one char - return False - return dn_remainder.lower() == hostname_remainder.lower() - - def _inet_paton(ipname): - """Try to convert an IP address to packed binary form - - Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 - support. - """ - # inet_aton() also accepts strings like '1' - # Divergence: We make sure we have native string type for all python versions - try: - b_ipname = to_bytes(ipname, errors='strict') - except UnicodeError: - raise ValueError("%s must be an all-ascii string." % repr(ipname)) - - # Set ipname in native string format - if sys.version_info < (3,): - n_ipname = b_ipname - else: - n_ipname = ipname - - if n_ipname.count('.') == 3: - try: - return socket.inet_aton(n_ipname) - # Divergence: OSError on late python3. socket.error earlier. - # Null bytes generate ValueError on python3(we want to raise - # ValueError anyway), TypeError # earlier - except (OSError, socket.error, TypeError): - pass - - try: - return socket.inet_pton(socket.AF_INET6, n_ipname) - # Divergence: OSError on late python3. socket.error earlier. - # Null bytes generate ValueError on python3(we want to raise - # ValueError anyway), TypeError # earlier - except (OSError, socket.error, TypeError): - # Divergence .format() to percent formatting for Python < 2.6 - raise ValueError("%s is neither an IPv4 nor an IP6 " - "address." % repr(ipname)) - except AttributeError: - # AF_INET6 not available - pass - - # Divergence .format() to percent formatting for Python < 2.6 - raise ValueError("%s is not an IPv4 address." % repr(ipname)) - - def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - ip = _inet_paton(ipname.rstrip()) - return ip == host_ip - - def match_hostname(cert, hostname): # type: ignore[misc] - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed. - - The function matches IP addresses rather than dNSNames if hostname is a - valid ipaddress string. IPv4 addresses are supported on all platforms. - IPv6 addresses are supported on platforms with IPv6 support (AF_INET6 - and inet_pton). - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - try: - # Divergence: Deal with hostname as bytes - host_ip = _inet_paton(to_text(hostname, errors='strict')) - except UnicodeError: - # Divergence: Deal with hostname as byte strings. - # IP addresses should be all ascii, so we consider it not - # an IP address if this fails - host_ip = None - except ValueError: - # Not an IP address (common case) - host_ip = None - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == 'IP Address': - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or subjectAltName fields were found") - - # End of Python Software Foundation Licensed code - - HAS_MATCH_HOSTNAME = True - - -# This is a dummy cacert provided for macOS since you need at least 1 -# ca cert, regardless of validity, for Python on macOS to use the -# keychain functionality in OpenSSL for validating SSL certificates. -# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher -b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE----- -MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV -BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt -MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy -MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD -VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD -gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9 -gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1 -4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj -gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA -FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE -CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z -aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA -MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH -qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV -zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg= ------END CERTIFICATE----- -""" -b_PEM_CERT_RE = re.compile( - br'^-----BEGIN CERTIFICATE-----\n.+?-----END CERTIFICATE-----$', +PEM_CERT_RE = re.compile( + r'^-----BEGIN CERTIFICATE-----\n.+?-----END CERTIFICATE-----$', flags=re.M | re.S ) @@ -510,143 +195,82 @@ class ProxyError(ConnectionError): class SSLValidationError(ConnectionError): - """Failure to connect due to SSL validation failing""" + """Failure to connect due to SSL validation failing + + No longer used, but kept for backwards compatibility + """ pass class NoSSLError(SSLValidationError): - """Needed to connect to an HTTPS url but no ssl library available to verify the certificate""" + """Needed to connect to an HTTPS url but no ssl library available to verify the certificate + + No longer used, but kept for backwards compatibility + """ pass class MissingModuleError(Exception): """Failed to import 3rd party module required by the caller""" def __init__(self, message, import_traceback, module=None): - super(MissingModuleError, self).__init__(message) + super().__init__(message) self.import_traceback = import_traceback self.module = module -# Some environments (Google Compute Engine's CoreOS deploys) do not compile -# against openssl and thus do not have any HTTPS support. -CustomHTTPSConnection = None -CustomHTTPSHandler = None -HTTPSClientAuthHandler = None +UnixHTTPSHandler = None UnixHTTPSConnection = None -if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'): - class CustomHTTPSConnection(httplib.HTTPSConnection): # type: ignore[no-redef] - def __init__(self, client_cert=None, client_key=None, *args, **kwargs): - httplib.HTTPSConnection.__init__(self, *args, **kwargs) - self.context = None - if HAS_SSLCONTEXT: - self.context = self._context - elif HAS_URLLIB3_PYOPENSSLCONTEXT: - self.context = self._context = PyOpenSSLContext(PROTOCOL) - - self._client_cert = client_cert - self._client_key = client_key - if self.context and self._client_cert: - self.context.load_cert_chain(self._client_cert, self._client_key) - - def connect(self): - "Connect to a host on a given (SSL) port." - - if hasattr(self, 'source_address'): - sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) - else: - sock = socket.create_connection((self.host, self.port), self.timeout) - - server_hostname = self.host - # Note: self._tunnel_host is not available on py < 2.6 but this code - # isn't used on py < 2.6 (lack of create_connection) - if self._tunnel_host: - self.sock = sock - self._tunnel() - server_hostname = self._tunnel_host - - if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT: - self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) - elif HAS_URLLIB3_SSL_WRAP_SOCKET: - self.sock = ssl_wrap_socket(sock, keyfile=self._client_key, cert_reqs=ssl.CERT_NONE, # pylint: disable=used-before-assignment - certfile=self._client_cert, ssl_version=PROTOCOL, server_hostname=server_hostname) - else: - self.sock = ssl.wrap_socket(sock, keyfile=self._client_key, certfile=self._client_cert, ssl_version=PROTOCOL) - - class CustomHTTPSHandler(urllib_request.HTTPSHandler): # type: ignore[no-redef] - - def https_open(self, req): - kwargs = {} - if HAS_SSLCONTEXT: - kwargs['context'] = self._context - return self.do_open( - functools.partial( - CustomHTTPSConnection, - **kwargs - ), - req - ) - - https_request = AbstractHTTPHandler.do_request_ - - class HTTPSClientAuthHandler(urllib_request.HTTPSHandler): # type: ignore[no-redef] - '''Handles client authentication via cert/key - - This is a fairly lightweight extension on HTTPSHandler, and can be used - in place of HTTPSHandler - ''' - - def __init__(self, client_cert=None, client_key=None, unix_socket=None, **kwargs): - urllib_request.HTTPSHandler.__init__(self, **kwargs) - self.client_cert = client_cert - self.client_key = client_key - self._unix_socket = unix_socket - - def https_open(self, req): - return self.do_open(self._build_https_connection, req) - - def _build_https_connection(self, host, **kwargs): - try: - kwargs['context'] = self._context - except AttributeError: - pass - if self._unix_socket: - return UnixHTTPSConnection(self._unix_socket)(host, **kwargs) - if not HAS_SSLCONTEXT: - return CustomHTTPSConnection(host, client_cert=self.client_cert, client_key=self.client_key, **kwargs) - return httplib.HTTPSConnection(host, **kwargs) - +if HAS_SSL: @contextmanager def unix_socket_patch_httpconnection_connect(): - '''Monkey patch ``httplib.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect`` + """Monkey patch ``http.client.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect`` so that when calling ``super(UnixHTTPSConnection, self).connect()`` we get the correct behavior of creating self.sock for the unix socket - ''' - _connect = httplib.HTTPConnection.connect - httplib.HTTPConnection.connect = UnixHTTPConnection.connect + """ + _connect = http.client.HTTPConnection.connect + http.client.HTTPConnection.connect = UnixHTTPConnection.connect yield - httplib.HTTPConnection.connect = _connect + http.client.HTTPConnection.connect = _connect - class UnixHTTPSConnection(httplib.HTTPSConnection): # type: ignore[no-redef] + class UnixHTTPSConnection(http.client.HTTPSConnection): # type: ignore[no-redef] def __init__(self, unix_socket): self._unix_socket = unix_socket def connect(self): # This method exists simply to ensure we monkeypatch - # httplib.HTTPConnection.connect to call UnixHTTPConnection.connect + # http.client.HTTPConnection.connect to call UnixHTTPConnection.connect with unix_socket_patch_httpconnection_connect(): # Disable pylint check for the super() call. It complains about UnixHTTPSConnection # being a NoneType because of the initial definition above, but it won't actually # be a NoneType when this code runs - # pylint: disable=bad-super-call - super(UnixHTTPSConnection, self).connect() + super().connect() def __call__(self, *args, **kwargs): - httplib.HTTPSConnection.__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) return self + class UnixHTTPSHandler(urllib.request.HTTPSHandler): # type: ignore[no-redef] + def __init__(self, unix_socket, **kwargs): + super().__init__(**kwargs) + self._unix_socket = unix_socket + + def https_open(self, req): + kwargs = {} + try: + # deprecated: description='deprecated check_hostname' python_version='3.12' + kwargs['check_hostname'] = self._check_hostname + except AttributeError: + pass + return self.do_open( + UnixHTTPSConnection(self._unix_socket), + req, + context=self._context, + **kwargs + ) -class UnixHTTPConnection(httplib.HTTPConnection): - '''Handles http requests to a unix socket file''' + +class UnixHTTPConnection(http.client.HTTPConnection): + """Handles http requests to a unix socket file""" def __init__(self, unix_socket): self._unix_socket = unix_socket @@ -661,15 +285,15 @@ class UnixHTTPConnection(httplib.HTTPConnection): self.sock.settimeout(self.timeout) def __call__(self, *args, **kwargs): - httplib.HTTPConnection.__init__(self, *args, **kwargs) + super().__init__(*args, **kwargs) return self -class UnixHTTPHandler(urllib_request.HTTPHandler): - '''Handler for Unix urls''' +class UnixHTTPHandler(urllib.request.HTTPHandler): + """Handler for Unix urls""" def __init__(self, unix_socket, **kwargs): - urllib_request.HTTPHandler.__init__(self, **kwargs) + super().__init__(**kwargs) self._unix_socket = unix_socket def http_open(self, req): @@ -677,118 +301,60 @@ class UnixHTTPHandler(urllib_request.HTTPHandler): class ParseResultDottedDict(dict): - ''' + """ A dict that acts similarly to the ParseResult named tuple from urllib - ''' + """ def __init__(self, *args, **kwargs): - super(ParseResultDottedDict, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.__dict__ = self def as_list(self): - ''' + """ Generate a list from this dict, that looks like the ParseResult named tuple - ''' + """ return [self.get(k, None) for k in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')] def generic_urlparse(parts): - ''' + """ Returns a dictionary of url parts as parsed by urlparse, but accounts for the fact that older versions of that library do not support named attributes (ie. .netloc) - ''' - generic_parts = ParseResultDottedDict() - if hasattr(parts, 'netloc'): - # urlparse is newer, just read the fields straight - # from the parts object - generic_parts['scheme'] = parts.scheme - generic_parts['netloc'] = parts.netloc - generic_parts['path'] = parts.path - generic_parts['params'] = parts.params - generic_parts['query'] = parts.query - generic_parts['fragment'] = parts.fragment - generic_parts['username'] = parts.username - generic_parts['password'] = parts.password - hostname = parts.hostname - if hostname and hostname[0] == '[' and '[' in parts.netloc and ']' in parts.netloc: - # Py2.6 doesn't parse IPv6 addresses correctly - hostname = parts.netloc.split(']')[0][1:].lower() - generic_parts['hostname'] = hostname - - try: - port = parts.port - except ValueError: - # Py2.6 doesn't parse IPv6 addresses correctly - netloc = parts.netloc.split('@')[-1].split(']')[-1] - if ':' in netloc: - port = netloc.split(':')[1] - if port: - port = int(port) - else: - port = None - generic_parts['port'] = port - else: - # we have to use indexes, and then parse out - # the other parts not supported by indexing - generic_parts['scheme'] = parts[0] - generic_parts['netloc'] = parts[1] - generic_parts['path'] = parts[2] - generic_parts['params'] = parts[3] - generic_parts['query'] = parts[4] - generic_parts['fragment'] = parts[5] - # get the username, password, etc. - try: - netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$') - match = netloc_re.match(parts[1]) - auth = match.group(1) - hostname = match.group(2) - port = match.group(3) - if port: - # the capture group for the port will include the ':', - # so remove it and convert the port to an integer - port = int(port[1:]) - if auth: - # the capture group above includes the @, so remove it - # and then split it up based on the first ':' found - auth = auth[:-1] - username, password = auth.split(':', 1) - else: - username = password = None - generic_parts['username'] = username - generic_parts['password'] = password - generic_parts['hostname'] = hostname - generic_parts['port'] = port - except Exception: - generic_parts['username'] = None - generic_parts['password'] = None - generic_parts['hostname'] = parts[1] - generic_parts['port'] = None - return generic_parts - -def extract_pem_certs(b_data): - for match in b_PEM_CERT_RE.finditer(b_data): + This method isn't of much use any longer, but is kept + in a minimal state for backwards compat. + """ + result = ParseResultDottedDict(parts._asdict()) + result.update({ + 'username': parts.username, + 'password': parts.password, + 'hostname': parts.hostname, + 'port': parts.port, + }) + return result + + +def extract_pem_certs(data): + for match in PEM_CERT_RE.finditer(data): yield match.group(0) def get_response_filename(response): - url = response.geturl() - path = urlparse(url)[2] - filename = os.path.basename(path.rstrip('/')) or None - if filename: - filename = unquote(filename) + if filename := response.headers.get_param('filename', header='content-disposition'): + filename = os.path.basename(filename) + else: + url = response.geturl() + path = urlparse(url)[2] + filename = os.path.basename(path.rstrip('/')) or None + if filename: + filename = unquote(filename) - return response.headers.get_param('filename', header='content-disposition') or filename + return filename def parse_content_type(response): - if PY2: - get_type = response.headers.gettype - get_param = response.headers.getparam - else: - get_type = response.headers.get_content_type - get_param = response.headers.get_param - + get_type = response.headers.get_content_type + get_param = response.headers.get_param content_type = (get_type() or 'application/octet-stream').split(',')[0] main_type, sub_type = content_type.split('/') charset = (get_param('charset') or 'utf-8').split(',')[0] @@ -805,17 +371,8 @@ class GzipDecodedReader(GzipFile): if not HAS_GZIP: raise MissingModuleError(self.missing_gzip_error(), import_traceback=GZIP_IMP_ERR) - if PY3: - self._io = fp - else: - # Py2 ``HTTPResponse``/``addinfourl`` doesn't support all of the file object - # functionality GzipFile requires - self._io = io.BytesIO() - for block in iter(functools.partial(fp.read, 65536), b''): - self._io.write(block) - self._io.seek(0) - fp.close() - gzip.GzipFile.__init__(self, mode='rb', fileobj=self._io) # pylint: disable=non-parent-init-called + self._io = fp + super().__init__(mode='rb', fileobj=self._io) def close(self): try: @@ -832,432 +389,206 @@ class GzipDecodedReader(GzipFile): ) -class RequestWithMethod(urllib_request.Request): - ''' - Workaround for using DELETE/PUT/etc with urllib2 - Originally contained in library/net_infrastructure/dnsmadeeasy - ''' +class HTTPRedirectHandler(urllib.request.HTTPRedirectHandler): + """This is an implementation of a RedirectHandler to match the + functionality provided by httplib2. It will utilize the value of + ``follow_redirects`` to determine how redirects should be handled in + urllib. + """ + + def __init__(self, follow_redirects=None): + self.follow_redirects = follow_redirects - def __init__(self, url, method, data=None, headers=None, origin_req_host=None, unverifiable=True): - if headers is None: - headers = {} - self._method = method.upper() - urllib_request.Request.__init__(self, url, data, headers, origin_req_host, unverifiable) + def __call__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + return self - def get_method(self): - if self._method: - return self._method - else: - return urllib_request.Request.get_method(self) + try: + urllib.request.HTTPRedirectHandler.http_error_308 # type: ignore[attr-defined] + except AttributeError: + # deprecated: description='urllib http 308 support' python_version='3.11' + http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302 + def redirect_request(self, req, fp, code, msg, headers, newurl): + follow_redirects = self.follow_redirects -def RedirectHandlerFactory(follow_redirects=None, validate_certs=True, ca_path=None, ciphers=None): - """This is a class factory that closes over the value of - ``follow_redirects`` so that the RedirectHandler class has access to - that value without having to use globals, and potentially cause problems - where ``open_url`` or ``fetch_url`` are used multiple times in a module. - """ + # Preserve urllib2 compatibility + if follow_redirects in ('urllib2', 'urllib'): + return urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl) - class RedirectHandler(urllib_request.HTTPRedirectHandler): - """This is an implementation of a RedirectHandler to match the - functionality provided by httplib2. It will utilize the value of - ``follow_redirects`` that is passed into ``RedirectHandlerFactory`` - to determine how redirects should be handled in urllib2. - """ + # Handle disabled redirects + elif follow_redirects in ('no', 'none', False): + raise urllib.error.HTTPError(newurl, code, msg, headers, fp) - def redirect_request(self, req, fp, code, msg, headers, newurl): - if not any((HAS_SSLCONTEXT, HAS_URLLIB3_PYOPENSSLCONTEXT)): - handler = maybe_add_ssl_handler(newurl, validate_certs, ca_path=ca_path, ciphers=ciphers) - if handler: - urllib_request._opener.add_handler(handler) - - # Preserve urllib2 compatibility - if follow_redirects == 'urllib2': - return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl) - - # Handle disabled redirects - elif follow_redirects in ['no', 'none', False]: - raise urllib_error.HTTPError(newurl, code, msg, headers, fp) - - method = req.get_method() - - # Handle non-redirect HTTP status or invalid follow_redirects - if follow_redirects in ['all', 'yes', True]: - if code < 300 or code >= 400: - raise urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) - elif follow_redirects == 'safe': - if code < 300 or code >= 400 or method not in ('GET', 'HEAD'): - raise urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) - else: - raise urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) + method = req.get_method() - try: - # Python 2-3.3 - data = req.get_data() - origin_req_host = req.get_origin_req_host() - except AttributeError: - # Python 3.4+ - data = req.data - origin_req_host = req.origin_req_host + # Handle non-redirect HTTP status or invalid follow_redirects + if follow_redirects in ('all', 'yes', True): + if code < 300 or code >= 400: + raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) + elif follow_redirects == 'safe': + if code < 300 or code >= 400 or method not in ('GET', 'HEAD'): + raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) + else: + raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) - # Be conciliant with URIs containing a space - newurl = newurl.replace(' ', '%20') + data = req.data + origin_req_host = req.origin_req_host - # Support redirect with payload and original headers - if code in (307, 308): - # Preserve payload and headers - req_headers = req.headers - else: - # Do not preserve payload and filter headers - data = None - req_headers = dict((k, v) for k, v in req.headers.items() - if k.lower() not in ("content-length", "content-type", "transfer-encoding")) - - # http://tools.ietf.org/html/rfc7231#section-6.4.4 - if code == 303 and method != 'HEAD': - method = 'GET' - - # Do what the browsers do, despite standards... - # First, turn 302s into GETs. - if code == 302 and method != 'HEAD': - method = 'GET' - - # Second, if a POST is responded to with a 301, turn it into a GET. - if code == 301 and method == 'POST': - method = 'GET' - - return RequestWithMethod(newurl, - method=method, - headers=req_headers, - data=data, - origin_req_host=origin_req_host, - unverifiable=True, - ) - - return RedirectHandler - - -def build_ssl_validation_error(hostname, port, paths, exc=None): - '''Inteligently build out the SSLValidationError based on what support - you have installed - ''' - - msg = [ - ('Failed to validate the SSL certificate for %s:%s.' - ' Make sure your managed systems have a valid CA' - ' certificate installed.') - ] - if not HAS_SSLCONTEXT: - msg.append('If the website serving the url uses SNI you need' - ' python >= 2.7.9 on your managed machine') - msg.append(' (the python executable used (%s) is version: %s)' % - (sys.executable, ''.join(sys.version.splitlines()))) - if not HAS_URLLIB3_PYOPENSSLCONTEXT and not HAS_URLLIB3_SSL_WRAP_SOCKET: - msg.append('or you can install the `urllib3`, `pyOpenSSL`,' - ' `ndg-httpsclient`, and `pyasn1` python modules') - - msg.append('to perform SNI verification in python >= 2.6.') - - msg.append('You can use validate_certs=False if you do' - ' not need to confirm the servers identity but this is' - ' unsafe and not recommended.' - ' Paths checked for this platform: %s.') - - if exc: - msg.append('The exception msg was: %s.' % to_native(exc)) - - raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths))) - - -def atexit_remove_file(filename): - if os.path.exists(filename): - try: - os.unlink(filename) - except Exception: - # just ignore if we cannot delete, things should be ok - pass + # Be conciliant with URIs containing a space + newurl = newurl.replace(' ', '%20') + + # Support redirect with payload and original headers + if code in (307, 308): + # Preserve payload and headers + req_headers = req.headers + else: + # Do not preserve payload and filter headers + data = None + req_headers = {k: v for k, v in req.headers.items() + if k.lower() not in ("content-length", "content-type", "transfer-encoding")} + + # http://tools.ietf.org/html/rfc7231#section-6.4.4 + if code == 303 and method != 'HEAD': + method = 'GET' + + # Do what the browsers do, despite standards... + # First, turn 302s into GETs. + if code == 302 and method != 'HEAD': + method = 'GET' + + # Second, if a POST is responded to with a 301, turn it into a GET. + if code == 301 and method == 'POST': + method = 'GET' + + return urllib.request.Request( + newurl, + data=data, + headers=req_headers, + origin_req_host=origin_req_host, + unverifiable=True, + method=method.upper(), + ) -def make_context(cafile=None, cadata=None, ciphers=None, validate_certs=True, client_cert=None, client_key=None): +def make_context(cafile=None, cadata=None, capath=None, ciphers=None, validate_certs=True, client_cert=None, + client_key=None): if ciphers is None: ciphers = [] if not is_sequence(ciphers): raise TypeError('Ciphers must be a list. Got %s.' % ciphers.__class__.__name__) - if HAS_SSLCONTEXT: - context = create_default_context(cafile=cafile) - elif HAS_URLLIB3_PYOPENSSLCONTEXT: - context = PyOpenSSLContext(PROTOCOL) - else: - raise NotImplementedError('Host libraries are too old to support creating an sslcontext') + context = ssl.create_default_context(cafile=cafile) if not validate_certs: - if ssl.OP_NO_SSLv2: - context.options |= ssl.OP_NO_SSLv2 context.options |= ssl.OP_NO_SSLv3 context.check_hostname = False context.verify_mode = ssl.CERT_NONE - if validate_certs and any((cafile, cadata)): - context.load_verify_locations(cafile=cafile, cadata=cadata) + # If cafile is passed, we are only using that for verification, + # don't add additional ca certs + if validate_certs and not cafile: + if not cadata: + cadata = bytearray() + cadata.extend(get_ca_certs(capath=capath)[0]) + if cadata: + context.load_verify_locations(cadata=cadata) if ciphers: context.set_ciphers(':'.join(map(to_native, ciphers))) if client_cert: + # TLS 1.3 needs this to be set to True to allow post handshake cert + # authentication. This functionality was added in Python 3.8 and was + # backported to 3.6.7, and 3.7.1 so needs a check for now. + if hasattr(context, "post_handshake_auth"): + context.post_handshake_auth = True + context.load_cert_chain(client_cert, keyfile=client_key) return context -def get_ca_certs(cafile=None): +def get_ca_certs(cafile=None, capath=None): # tries to find a valid CA cert in one of the # standard locations for the current distribution - cadata = bytearray() - paths_checked = [] + # Using a dict, instead of a set for order, the value is meaningless and will be None + # Not directly using a bytearray to avoid duplicates with fast lookup + cadata = {} + # If cafile is passed, we are only using that for verification, + # don't add additional ca certs if cafile: paths_checked = [cafile] - with open(to_bytes(cafile, errors='surrogate_or_strict'), 'rb') as f: - if HAS_SSLCONTEXT: - for b_pem in extract_pem_certs(f.read()): - cadata.extend( - ssl.PEM_cert_to_DER_cert( - to_native(b_pem, errors='surrogate_or_strict') - ) - ) - return cafile, cadata, paths_checked - - if not HAS_SSLCONTEXT: - paths_checked.append('/etc/ssl/certs') + with open(to_bytes(cafile, errors='surrogate_or_strict'), 'r', errors='surrogateescape') as f: + for pem in extract_pem_certs(f.read()): + b_der = ssl.PEM_cert_to_DER_cert(pem) + cadata[b_der] = None + return bytearray().join(cadata), paths_checked + + default_verify_paths = ssl.get_default_verify_paths() + default_capath = default_verify_paths.capath + paths_checked = {default_capath or default_verify_paths.cafile} + + if capath: + paths_checked.add(capath) system = to_text(platform.system(), errors='surrogate_or_strict') # build a list of paths to check for .crt/.pem files # based on the platform type if system == u'Linux': - paths_checked.append('/etc/pki/ca-trust/extracted/pem') - paths_checked.append('/etc/pki/tls/certs') - paths_checked.append('/usr/share/ca-certificates/cacert.org') + paths_checked.add('/etc/pki/ca-trust/extracted/pem') + paths_checked.add('/etc/pki/tls/certs') + paths_checked.add('/usr/share/ca-certificates/cacert.org') elif system == u'FreeBSD': - paths_checked.append('/usr/local/share/certs') + paths_checked.add('/usr/local/share/certs') elif system == u'OpenBSD': - paths_checked.append('/etc/ssl') + paths_checked.add('/etc/ssl') elif system == u'NetBSD': - paths_checked.append('/etc/openssl/certs') + paths_checked.add('/etc/openssl/certs') elif system == u'SunOS': - paths_checked.append('/opt/local/etc/openssl/certs') + paths_checked.add('/opt/local/etc/openssl/certs') elif system == u'AIX': - paths_checked.append('/var/ssl/certs') - paths_checked.append('/opt/freeware/etc/ssl/certs') + paths_checked.add('/var/ssl/certs') + paths_checked.add('/opt/freeware/etc/ssl/certs') + elif system == u'Darwin': + paths_checked.add('/usr/local/etc/openssl') # fall back to a user-deployed cert in a standard # location if the OS platform one is not available - paths_checked.append('/etc/ansible') - - tmp_path = None - if not HAS_SSLCONTEXT: - tmp_fd, tmp_path = tempfile.mkstemp() - atexit.register(atexit_remove_file, tmp_path) - - # Write the dummy ca cert if we are running on macOS - if system == u'Darwin': - if HAS_SSLCONTEXT: - cadata.extend( - ssl.PEM_cert_to_DER_cert( - to_native(b_DUMMY_CA_CERT, errors='surrogate_or_strict') - ) - ) - else: - os.write(tmp_fd, b_DUMMY_CA_CERT) - # Default Homebrew path for OpenSSL certs - paths_checked.append('/usr/local/etc/openssl') + paths_checked.add('/etc/ansible') # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use # in the ssl check to speed up the test for path in paths_checked: - if not os.path.isdir(path): + if not path or path == default_capath or not os.path.isdir(path): continue - dir_contents = os.listdir(path) - for f in dir_contents: + for f in os.listdir(path): full_path = os.path.join(path, f) - if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'): + if os.path.isfile(full_path) and os.path.splitext(f)[1] in {'.pem', '.cer', '.crt'}: try: - if full_path not in LOADED_VERIFY_LOCATIONS: - with open(full_path, 'rb') as cert_file: - b_cert = cert_file.read() - if HAS_SSLCONTEXT: - try: - for b_pem in extract_pem_certs(b_cert): - cadata.extend( - ssl.PEM_cert_to_DER_cert( - to_native(b_pem, errors='surrogate_or_strict') - ) - ) - except Exception: - continue - else: - os.write(tmp_fd, b_cert) - os.write(tmp_fd, b'\n') + with open(full_path, 'r', errors='surrogateescape') as cert_file: + cert = cert_file.read() + try: + for pem in extract_pem_certs(cert): + b_der = ssl.PEM_cert_to_DER_cert(pem) + cadata[b_der] = None + except Exception: + continue except (OSError, IOError): pass - if HAS_SSLCONTEXT: - default_verify_paths = ssl.get_default_verify_paths() - paths_checked[:0] = [default_verify_paths.capath] - else: - os.close(tmp_fd) - - return (tmp_path, cadata, paths_checked) - - -class SSLValidationHandler(urllib_request.BaseHandler): - ''' - A custom handler class for SSL validation. - - Based on: - http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python - http://techknack.net/python-urllib2-handlers/ - ''' - CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\n" - - def __init__(self, hostname, port, ca_path=None, ciphers=None, validate_certs=True): - self.hostname = hostname - self.port = port - self.ca_path = ca_path - self.ciphers = ciphers - self.validate_certs = validate_certs - - def get_ca_certs(self): - return get_ca_certs(self.ca_path) - - def validate_proxy_response(self, response, valid_codes=None): - ''' - make sure we get back a valid code from the proxy - ''' - valid_codes = [200] if valid_codes is None else valid_codes - - try: - (http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups() - if int(resp_code) not in valid_codes: - raise Exception - except Exception: - raise ProxyError('Connection to proxy failed') - - def detect_no_proxy(self, url): - ''' - Detect if the 'no_proxy' environment variable is set and honor those locations. - ''' - env_no_proxy = os.environ.get('no_proxy') - if env_no_proxy: - env_no_proxy = env_no_proxy.split(',') - netloc = urlparse(url).netloc - - for host in env_no_proxy: - if netloc.endswith(host) or netloc.split(':')[0].endswith(host): - # Our requested URL matches something in no_proxy, so don't - # use the proxy for this - return False - return True - - def make_context(self, cafile, cadata, ciphers=None, validate_certs=True): - cafile = self.ca_path or cafile - if self.ca_path: - cadata = None - else: - cadata = cadata or None - - return make_context(cafile=cafile, cadata=cadata, ciphers=ciphers, validate_certs=validate_certs) - - def http_request(self, req): - tmp_ca_cert_path, cadata, paths_checked = self.get_ca_certs() - - # Detect if 'no_proxy' environment variable is set and if our URL is included - use_proxy = self.detect_no_proxy(req.get_full_url()) - https_proxy = os.environ.get('https_proxy') - - context = None - try: - context = self.make_context(tmp_ca_cert_path, cadata, ciphers=self.ciphers, validate_certs=self.validate_certs) - except NotImplementedError: - # We'll make do with no context below - pass - - try: - if use_proxy and https_proxy: - proxy_parts = generic_urlparse(urlparse(https_proxy)) - port = proxy_parts.get('port') or 443 - proxy_hostname = proxy_parts.get('hostname', None) - if proxy_hostname is None or proxy_parts.get('scheme') == '': - raise ProxyError("Failed to parse https_proxy environment variable." - " Please make sure you export https proxy as 'https_proxy=://:'") - - s = socket.create_connection((proxy_hostname, port)) - if proxy_parts.get('scheme') == 'http': - s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict')) - if proxy_parts.get('username'): - credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', '')) - s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip()) - s.sendall(b'\r\n') - connect_result = b"" - while connect_result.find(b"\r\n\r\n") <= 0: - connect_result += s.recv(4096) - # 128 kilobytes of headers should be enough for everyone. - if len(connect_result) > 131072: - raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.') - self.validate_proxy_response(connect_result) - if context: - ssl_s = context.wrap_socket(s, server_hostname=self.hostname) - elif HAS_URLLIB3_SSL_WRAP_SOCKET: - ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname) - else: - ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) - match_hostname(ssl_s.getpeercert(), self.hostname) - else: - raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) - else: - s = socket.create_connection((self.hostname, self.port)) - if context: - ssl_s = context.wrap_socket(s, server_hostname=self.hostname) - elif HAS_URLLIB3_SSL_WRAP_SOCKET: - ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname) - else: - ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) - match_hostname(ssl_s.getpeercert(), self.hostname) - # close the ssl connection - # ssl_s.unwrap() - s.close() - except (ssl.SSLError, CertificateError) as e: - build_ssl_validation_error(self.hostname, self.port, paths_checked, e) - except socket.error as e: - raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e))) - - return req - - https_request = http_request - - -def maybe_add_ssl_handler(url, validate_certs, ca_path=None, ciphers=None): - parsed = generic_urlparse(urlparse(url)) - if parsed.scheme == 'https' and validate_certs: - if not HAS_SSL: - raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,' - ' however this is unsafe and not recommended') - - # create the SSL validation handler - return SSLValidationHandler(parsed.hostname, parsed.port or 443, ca_path=ca_path, ciphers=ciphers, validate_certs=validate_certs) + # paths_checked isn't used any more, but is kept just for ease of debugging + return bytearray().join(cadata), list(paths_checked) def getpeercert(response, binary_form=False): """ Attempt to get the peer certificate of the response from urlopen. """ - # The response from urllib2.open() is different across Python 2 and 3 - if PY3: - socket = response.fp.raw._sock - else: - socket = response.fp._sock.fp._sock + socket = response.fp.raw._sock try: return socket.getpeercert(binary_form) @@ -1280,7 +611,7 @@ def get_channel_binding_cert_hash(certificate_der): pass # If the signature hash algorithm is unknown/unsupported or md5/sha1 we must use SHA256. - if not hash_algorithm or hash_algorithm.name in ['md5', 'sha1']: + if not hash_algorithm or hash_algorithm.name in ('md5', 'sha1'): hash_algorithm = hashes.SHA256() digest = hashes.Hash(hash_algorithm, default_backend()) @@ -1305,11 +636,80 @@ def rfc2822_date_string(timetuple, zone='-0000'): zone) +def _configure_auth(url, url_username, url_password, use_gssapi, force_basic_auth, use_netrc): + headers = {} + handlers = [] + + parsed = urlparse(url) + if parsed.scheme == 'ftp': + return url, headers, handlers + + username = url_username + password = url_password + + if username: + netloc = parsed.netloc + elif '@' in parsed.netloc: + credentials, netloc = parsed.netloc.split('@', 1) + if ':' in credentials: + username, password = credentials.split(':', 1) + else: + username = credentials + password = '' + username = unquote(username) + password = unquote(password) + + # reconstruct url without credentials + url = urlunparse(parsed._replace(netloc=netloc)) + + if use_gssapi: + if HTTPGSSAPIAuthHandler: # type: ignore[truthy-function] + handlers.append(HTTPGSSAPIAuthHandler(username, password)) + else: + imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True', + url='https://pypi.org/project/gssapi/') + raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR) + + elif username and not force_basic_auth: + passman = urllib.request.HTTPPasswordMgrWithDefaultRealm() + + # this creates a password manager + passman.add_password(None, netloc, username, password) + + # because we have put None at the start it will always + # use this username/password combination for urls + # for which `theurl` is a super-url + authhandler = urllib.request.HTTPBasicAuthHandler(passman) + digest_authhandler = urllib.request.HTTPDigestAuthHandler(passman) + + # create the AuthHandler + handlers.append(authhandler) + handlers.append(digest_authhandler) + + elif username and force_basic_auth: + headers["Authorization"] = basic_auth_header(username, password) + + elif use_netrc: + try: + rc = netrc.netrc(os.environ.get('NETRC')) + login = rc.authenticators(parsed.hostname) + except IOError: + login = None + + if login: + username, dummy, password = login + if username and password: + headers["Authorization"] = basic_auth_header(username, password) + + return url, headers, handlers + + class Request: def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=False, follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, unix_socket=None, - ca_path=None, unredirected_headers=None, decompress=True, ciphers=None, use_netrc=True): + ca_path=None, unredirected_headers=None, decompress=True, ciphers=None, use_netrc=True, + context=None): """This class works somewhat similarly to the ``Session`` class of from requests by defining a cookiejar that can be used across requests as well as cascaded defaults that can apply to repeated requests @@ -1348,6 +748,7 @@ class Request: self.decompress = decompress self.ciphers = ciphers self.use_netrc = use_netrc + self.context = context if isinstance(cookies, cookiejar.CookieJar): self.cookies = cookies else: @@ -1364,9 +765,9 @@ class Request: force_basic_auth=None, follow_redirects=None, client_cert=None, client_key=None, cookies=None, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None, decompress=None, - ciphers=None, use_netrc=None): + ciphers=None, use_netrc=None, context=None): """ - Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3) + Sends a request via HTTP(S) or FTP using urllib (Python3) Does not require the module environment @@ -1391,7 +792,7 @@ class Request: :kwarg http_agent: (optional) String of the User-Agent to use in the request :kwarg force_basic_auth: (optional) Boolean determining if auth header should be sent in the initial request :kwarg follow_redirects: (optional) String of urllib2, all/yes, safe, none to determine how redirects are - followed, see RedirectHandlerFactory for more information + followed, see HTTPRedirectHandler for more information :kwarg client_cert: (optional) PEM formatted certificate chain file to be used for SSL client authentication. This file can also include the key as well, and if the key is included, client_key is not required :kwarg client_key: (optional) PEM formatted file that contains your private key to be used for SSL client @@ -1406,11 +807,11 @@ class Request: :kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses :kwarg ciphers: (optional) List of ciphers to use :kwarg use_netrc: (optional) Boolean determining whether to use credentials from ~/.netrc file + :kwarg context: (optional) ssl.Context object for SSL validation. When provided, all other SSL related + arguments are ignored. See make_context. :returns: HTTPResponse. Added in Ansible 2.9 """ - method = method.upper() - if headers is None: headers = {} elif not isinstance(headers, dict): @@ -1435,106 +836,46 @@ class Request: decompress = self._fallback(decompress, self.decompress) ciphers = self._fallback(ciphers, self.ciphers) use_netrc = self._fallback(use_netrc, self.use_netrc) + context = self._fallback(context, self.context) handlers = [] if unix_socket: handlers.append(UnixHTTPHandler(unix_socket)) - parsed = generic_urlparse(urlparse(url)) - if parsed.scheme != 'ftp': - username = url_username - password = url_password - - if username: - netloc = parsed.netloc - elif '@' in parsed.netloc: - credentials, netloc = parsed.netloc.split('@', 1) - if ':' in credentials: - username, password = credentials.split(':', 1) - else: - username = credentials - password = '' - - parsed_list = parsed.as_list() - parsed_list[1] = netloc - - # reconstruct url without credentials - url = urlunparse(parsed_list) - - if use_gssapi: - if HTTPGSSAPIAuthHandler: # type: ignore[truthy-function] - handlers.append(HTTPGSSAPIAuthHandler(username, password)) - else: - imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True', - url='https://pypi.org/project/gssapi/') - raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR) - - elif username and not force_basic_auth: - passman = urllib_request.HTTPPasswordMgrWithDefaultRealm() - - # this creates a password manager - passman.add_password(None, netloc, username, password) - - # because we have put None at the start it will always - # use this username/password combination for urls - # for which `theurl` is a super-url - authhandler = urllib_request.HTTPBasicAuthHandler(passman) - digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman) - - # create the AuthHandler - handlers.append(authhandler) - handlers.append(digest_authhandler) - - elif username and force_basic_auth: - headers["Authorization"] = basic_auth_header(username, password) - - elif use_netrc: - try: - rc = netrc.netrc(os.environ.get('NETRC')) - login = rc.authenticators(parsed.hostname) - except IOError: - login = None - - if login: - username, dummy, password = login - if username and password: - headers["Authorization"] = basic_auth_header(username, password) + url, auth_headers, auth_handlers = _configure_auth(url, url_username, url_password, use_gssapi, force_basic_auth, use_netrc) + headers.update(auth_headers) + handlers.extend(auth_handlers) if not use_proxy: - proxyhandler = urllib_request.ProxyHandler({}) + proxyhandler = urllib.request.ProxyHandler({}) handlers.append(proxyhandler) - if not any((HAS_SSLCONTEXT, HAS_URLLIB3_PYOPENSSLCONTEXT)): - ssl_handler = maybe_add_ssl_handler(url, validate_certs, ca_path=ca_path, ciphers=ciphers) - if ssl_handler: - handlers.append(ssl_handler) - else: - tmp_ca_path, cadata, paths_checked = get_ca_certs(ca_path) + if not context: context = make_context( - cafile=tmp_ca_path, - cadata=cadata, + cafile=ca_path, ciphers=ciphers, validate_certs=validate_certs, client_cert=client_cert, client_key=client_key, ) - handlers.append(HTTPSClientAuthHandler(client_cert=client_cert, - client_key=client_key, - unix_socket=unix_socket, - context=context)) + if unix_socket: + ssl_handler = UnixHTTPSHandler(unix_socket=unix_socket, context=context) + else: + ssl_handler = urllib.request.HTTPSHandler(context=context) + handlers.append(ssl_handler) - handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs, ca_path=ca_path, ciphers=ciphers)) + handlers.append(HTTPRedirectHandler(follow_redirects)) # add some nicer cookie handling if cookies is not None: - handlers.append(urllib_request.HTTPCookieProcessor(cookies)) + handlers.append(urllib.request.HTTPCookieProcessor(cookies)) - opener = urllib_request.build_opener(*handlers) - urllib_request.install_opener(opener) + opener = urllib.request.build_opener(*handlers) + urllib.request.install_opener(opener) data = to_bytes(data, nonstring='passthru') - request = RequestWithMethod(url, method, data) + request = urllib.request.Request(url, data=data, method=method.upper()) # add the custom agent header, to help prevent issues # with sites that block the default urllib agent string @@ -1558,25 +899,13 @@ class Request: else: request.add_header(header, headers[header]) - r = urllib_request.urlopen(request, None, timeout) + r = urllib.request.urlopen(request, None, timeout) if decompress and r.headers.get('content-encoding', '').lower() == 'gzip': fp = GzipDecodedReader(r.fp) - if PY3: - r.fp = fp - # Content-Length does not match gzip decoded length - # Prevent ``r.read`` from stopping at Content-Length - r.length = None - else: - # Py2 maps ``r.read`` to ``fp.read``, create new ``addinfourl`` - # object to compensate - msg = r.msg - r = urllib_request.addinfourl( - fp, - r.info(), - r.geturl(), - r.getcode() - ) - r.msg = msg + r.fp = fp + # Content-Length does not match gzip decoded length + # Prevent ``r.read`` from stopping at Content-Length + r.length = None return r def get(self, url, **kwargs): @@ -1660,11 +989,11 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, client_cert=None, client_key=None, cookies=None, use_gssapi=False, unix_socket=None, ca_path=None, unredirected_headers=None, decompress=True, ciphers=None, use_netrc=True): - ''' - Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3) + """ + Sends a request via HTTP(S) or FTP using urllib (Python3) Does not require the module environment - ''' + """ method = method or ('POST' if data else 'GET') return Request().open(method, url, data=data, headers=headers, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, @@ -1709,7 +1038,7 @@ def prepare_multipart(fields): m = email.mime.multipart.MIMEMultipart('form-data') for field, value in sorted(fields.items()): - if isinstance(value, string_types): + if isinstance(value, str): main_type = 'text' sub_type = 'plain' content = value @@ -1757,30 +1086,15 @@ def prepare_multipart(fields): m.attach(part) - if PY3: - # Ensure headers are not split over multiple lines - # The HTTP policy also uses CRLF by default - b_data = m.as_bytes(policy=email.policy.HTTP) - else: - # Py2 - # We cannot just call ``as_string`` since it provides no way - # to specify ``maxheaderlen`` - fp = cStringIO() # cStringIO seems to be required here - # Ensure headers are not split over multiple lines - g = email.generator.Generator(fp, maxheaderlen=0) - g.flatten(m) - # ``fix_eols`` switches from ``\n`` to ``\r\n`` - b_data = email.utils.fix_eols(fp.getvalue()) + # Ensure headers are not split over multiple lines + # The HTTP policy also uses CRLF by default + b_data = m.as_bytes(policy=email.policy.HTTP) del m headers, sep, b_content = b_data.partition(b'\r\n\r\n') del b_data - if PY3: - parser = email.parser.BytesHeaderParser().parsebytes - else: - # Py2 - parser = email.parser.HeaderParser().parsestr + parser = email.parser.BytesHeaderParser().parsebytes return ( parser(headers)['content-type'], # Message converts to native strings @@ -1803,10 +1117,10 @@ def basic_auth_header(username, password): def url_argument_spec(): - ''' + """ Creates an argument spec that can be used with any module that will be requesting content via urllib/urllib2 - ''' + """ return dict( url=dict(type='str'), force=dict(type='bool', default=False), @@ -1866,9 +1180,6 @@ def fetch_url(module, url, data=None, headers=None, method=None, body = info['body'] """ - if not HAS_URLPARSE: - module.fail_json(msg='urlparse is not installed') - if not HAS_GZIP: module.fail_json(msg=GzipDecodedReader.missing_gzip_error()) @@ -1894,7 +1205,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, use_gssapi = module.params.get('use_gssapi', use_gssapi) if not isinstance(cookies, cookiejar.CookieJar): - cookies = cookiejar.LWPCookieJar() + cookies = cookiejar.CookieJar() r = None info = dict(url=url, status=-1) @@ -1907,25 +1218,23 @@ def fetch_url(module, url, data=None, headers=None, method=None, client_key=client_key, cookies=cookies, use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path, unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc) - # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable - info.update(dict((k.lower(), v) for k, v in r.info().items())) + # Lowercase keys, to conform to py2 behavior + info.update({k.lower(): v for k, v in r.info().items()}) # Don't be lossy, append header values for duplicate headers - # In Py2 there is nothing that needs done, py2 does this for us - if PY3: - temp_headers = {} - for name, value in r.headers.items(): - # The same as above, lower case keys to match py2 behavior, and create more consistent results - name = name.lower() - if name in temp_headers: - temp_headers[name] = ', '.join((temp_headers[name], value)) - else: - temp_headers[name] = value - info.update(temp_headers) + temp_headers = {} + for name, value in r.headers.items(): + # The same as above, lower case keys to match py2 behavior, and create more consistent results + name = name.lower() + if name in temp_headers: + temp_headers[name] = ', '.join((temp_headers[name], value)) + else: + temp_headers[name] = value + info.update(temp_headers) # parse the cookies into a nice dictionary cookie_list = [] - cookie_dict = dict() + cookie_dict = {} # Python sorts cookies in order of most specific (ie. longest) path first. See ``CookieJar._cookie_attrs`` # Cookies with the same path are reversed from response order. # This code makes no assumptions about that, and accepts the order given by python @@ -1937,17 +1246,11 @@ def fetch_url(module, url, data=None, headers=None, method=None, info['cookies'] = cookie_dict # finally update the result with a message about the fetch info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code)) - except NoSSLError as e: - distribution = get_distribution() - if distribution is not None and distribution.lower() == 'redhat': - module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e), **info) - else: - module.fail_json(msg='%s' % to_native(e), **info) except (ConnectionError, ValueError) as e: module.fail_json(msg=to_native(e), **info) except MissingModuleError as e: module.fail_json(msg=to_text(e), exception=e.import_traceback) - except urllib_error.HTTPError as e: + except urllib.error.HTTPError as e: r = e try: if e.fp is None: @@ -1964,18 +1267,18 @@ def fetch_url(module, url, data=None, headers=None, method=None, # Try to add exception info to the output but don't fail if we can't try: # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable - info.update(dict((k.lower(), v) for k, v in e.info().items())) + info.update({k.lower(): v for k, v in e.info().items()}) except Exception: pass info.update({'msg': to_native(e), 'body': body, 'status': e.code}) - except urllib_error.URLError as e: + except urllib.error.URLError as e: code = int(getattr(e, 'code', -1)) info.update(dict(msg="Request failed: %s" % to_native(e), status=code)) except socket.error as e: info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1)) - except httplib.BadStatusLine as e: + except http.client.BadStatusLine as e: info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1)) except Exception as e: info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1), @@ -2030,7 +1333,7 @@ def _split_multiext(name, min=3, max=4, count=2): def fetch_file(module, url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, unredirected_headers=None, decompress=True, ciphers=None): - '''Download and save a file via HTTP(S) or FTP (needs the module as parameter). + """Download and save a file via HTTP(S) or FTP (needs the module as parameter). This is basically a wrapper around fetch_url(). :arg module: The AnsibleModule (used to get username, password etc. (s.b.). @@ -2048,7 +1351,7 @@ def fetch_file(module, url, data=None, headers=None, method=None, :kwarg ciphers: (optional) List of ciphers to use :returns: A string, the path to the downloaded file. - ''' + """ # download file bufsize = 65536 parts = urlparse(url) @@ -2058,7 +1361,7 @@ def fetch_file(module, url, data=None, headers=None, method=None, try: rsp, info = fetch_url(module, url, data, headers, method, use_proxy, force, last_mod_time, timeout, unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers) - if not rsp: + if not rsp or (rsp.code and rsp.code >= 400): module.fail_json(msg="Failure downloading %s, %s" % (url, info['msg'])) data = rsp.read(bufsize) while data: diff --git a/lib/ansible/module_utils/yumdnf.py b/lib/ansible/module_utils/yumdnf.py index 7eb9d5fc2b7..b2cbba3fde2 100644 --- a/lib/ansible/module_utils/yumdnf.py +++ b/lib/ansible/module_utils/yumdnf.py @@ -9,20 +9,16 @@ # - Abhijeet Kasurde (@Akasurde) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -import os -import time -import glob from abc import ABCMeta, abstractmethod -from ansible.module_utils.six import with_metaclass - yumdnf_argument_spec = dict( argument_spec=dict( allow_downgrade=dict(type='bool', default=False), + allowerasing=dict(default=False, type="bool"), autoremove=dict(type='bool', default=False), + best=dict(type="bool"), bugfix=dict(required=False, type='bool', default=False), cacheonly=dict(type='bool', default=False), conf_file=dict(type='str'), @@ -36,10 +32,14 @@ yumdnf_argument_spec = dict( enablerepo=dict(type='list', elements='str', default=[]), exclude=dict(type='list', elements='str', default=[]), installroot=dict(type='str', default="/"), - install_repoquery=dict(type='bool', default=True), + install_repoquery=dict( + type='bool', default=True, + removed_in_version='2.20', removed_from_collection='ansible.builtin', + ), install_weak_deps=dict(type='bool', default=True), list=dict(type='str'), name=dict(type='list', elements='str', aliases=['pkg'], default=[]), + nobest=dict(type="bool"), releasever=dict(default=None), security=dict(type='bool', default=False), skip_broken=dict(type='bool', default=False), @@ -52,12 +52,12 @@ yumdnf_argument_spec = dict( lock_timeout=dict(type='int', default=30), ), required_one_of=[['name', 'list', 'update_cache']], - mutually_exclusive=[['name', 'list']], + mutually_exclusive=[['name', 'list'], ['best', 'nobest']], supports_check_mode=True, ) -class YumDnf(with_metaclass(ABCMeta, object)): # type: ignore[misc] +class YumDnf(metaclass=ABCMeta): """ Abstract class that handles the population of instance variables that should be identical between both YUM and DNF modules because of the feature parity @@ -69,7 +69,9 @@ class YumDnf(with_metaclass(ABCMeta, object)): # type: ignore[misc] self.module = module self.allow_downgrade = self.module.params['allow_downgrade'] + self.allowerasing = self.module.params['allowerasing'] self.autoremove = self.module.params['autoremove'] + self.best = self.module.params['best'] self.bugfix = self.module.params['bugfix'] self.cacheonly = self.module.params['cacheonly'] self.conf_file = self.module.params['conf_file'] @@ -87,6 +89,7 @@ class YumDnf(with_metaclass(ABCMeta, object)): # type: ignore[misc] self.install_weak_deps = self.module.params['install_weak_deps'] self.list = self.module.params['list'] self.names = [p.strip() for p in self.module.params['name']] + self.nobest = self.module.params['nobest'] self.releasever = self.module.params['releasever'] self.security = self.module.params['security'] self.skip_broken = self.module.params['skip_broken'] @@ -127,31 +130,6 @@ class YumDnf(with_metaclass(ABCMeta, object)): # type: ignore[misc] results=[], ) - # This should really be redefined by both the yum and dnf module but a - # default isn't a bad idea - self.lockfile = '/var/run/yum.pid' - - @abstractmethod - def is_lockfile_pid_valid(self): - return - - def _is_lockfile_present(self): - return (os.path.isfile(self.lockfile) or glob.glob(self.lockfile)) and self.is_lockfile_pid_valid() - - def wait_for_lock(self): - '''Poll until the lock is removed if timeout is a positive number''' - - if not self._is_lockfile_present(): - return - - if self.lock_timeout > 0: - for iteration in range(0, self.lock_timeout): - time.sleep(1) - if not self._is_lockfile_present(): - return - - self.module.fail_json(msg='{0} lockfile is held by another process'.format(self.pkg_mgr_name)) - def listify_comma_sep_strings_in_list(self, some_list): """ method to accept a list of strings as the parameter, find any strings diff --git a/lib/ansible/modules/add_host.py b/lib/ansible/modules/add_host.py index eb9d55979a2..80a2d0aef8f 100644 --- a/lib/ansible/modules/add_host.py +++ b/lib/ansible/modules/add_host.py @@ -4,11 +4,10 @@ # Copyright: Ansible Team # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: add_host short_description: Add a host (and alternatively a group) to the ansible-playbook in-memory inventory @@ -62,7 +61,7 @@ notes: - The alias O(host) of the parameter O(name) is only available on Ansible 2.4 and newer. - Since Ansible 2.4, the C(inventory_dir) variable is now set to V(None) instead of the 'global inventory source', because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour. -- Though this module does not change the remote host, we do provide 'changed' status as it can be useful for those trying to track inventory changes. +- Though this module does not change the remote host, we do provide C(changed) status as it can be useful for those trying to track inventory changes. - The hosts added will not bypass the C(--limit) from the command line, so both of those need to be in agreement to make them available as play targets. They are still available from hostvars and for delegation as a normal part of the inventory. seealso: @@ -70,9 +69,9 @@ seealso: author: - Ansible Core Team - Seth Vidal (@skvidal) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add host to group 'just_created' with variable foo=42 ansible.builtin.add_host: name: '{{ ip_from_ec2 }}' @@ -112,4 +111,4 @@ EXAMPLES = r''' name: '{{ item }}' groups: done loop: "{{ ansible_play_hosts }}" -''' +""" diff --git a/lib/ansible/modules/apt.py b/lib/ansible/modules/apt.py index 5c7e283c890..266165f22a2 100644 --- a/lib/ansible/modules/apt.py +++ b/lib/ansible/modules/apt.py @@ -6,11 +6,10 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: apt short_description: Manages apt-packages @@ -22,6 +21,7 @@ options: description: - A list of package names, like V(foo), or package specifier with version, like V(foo=1.0) or V(foo>=1.0). Name wildcards (fnmatch) like V(apt*) and version wildcards like V(foo=1.0*) are also supported. + - Do not use single or double quotes around the version when referring to the package name with a specific version, such as V(foo=1.0) or V(foo>=1.0). aliases: [ package, pkg ] type: list elements: str @@ -63,21 +63,20 @@ options: default: 'no' default_release: description: - - Corresponds to the C(-t) option for I(apt) and sets pin priorities + - Corresponds to the C(-t) option for I(apt) and sets pin priorities. aliases: [ default-release ] type: str install_recommends: description: - - Corresponds to the C(--no-install-recommends) option for I(apt). V(true) installs recommended packages. V(false) does not install + - Corresponds to the C(--no-install-recommends) option for C(apt). V(true) installs recommended packages. V(false) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed. aliases: [ install-recommends ] type: bool force: description: - - 'Corresponds to the C(--force-yes) to I(apt-get) and implies O(allow_unauthenticated=yes) and O(allow_downgrade=yes)' - - "This option will disable checking both the packages' signatures and the certificates of the - web servers they are downloaded from." - - 'This option *is not* the equivalent of passing the C(-f) flag to I(apt-get) on the command line' + - 'Corresponds to the C(--force-yes) to C(apt-get) and implies O(allow_unauthenticated=yes) and O(allow_downgrade=yes).' + - "This option will disable checking both the packages' signatures and the certificates of the web servers they are downloaded from." + - 'This option *is not* the equivalent of passing the C(-f) flag to C(apt-get) on the command line.' - '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.** Please also see C(man apt-get) for more information.' type: bool @@ -85,7 +84,7 @@ options: clean: description: - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but - the lock file from /var/cache/apt/archives/ and /var/cache/apt/archives/partial/. + the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/). - Can be run as part of the package installation (clean runs before install) or as a separate step. type: bool default: 'no' @@ -93,7 +92,7 @@ options: allow_unauthenticated: description: - Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup. - - 'O(allow_unauthenticated) is only supported with O(state): V(install)/V(present)' + - 'O(allow_unauthenticated) is only supported with O(state): V(install)/V(present).' aliases: [ allow-unauthenticated ] type: bool default: 'no' @@ -104,13 +103,14 @@ options: - This option enables the named package and version to replace an already installed higher version of that package. - Note that setting O(allow_downgrade=true) can make this module behave in a non-idempotent way. - (The task could end up with a set of packages that does not match the complete list of specified packages to install). + - 'O(allow_downgrade) is only supported by C(apt) and will be ignored if C(aptitude) is detected or specified.' aliases: [ allow-downgrade, allow_downgrades, allow-downgrades ] type: bool default: 'no' version_added: "2.12" allow_change_held_packages: description: - - Allows changing the version of a package which is on the apt hold list + - Allows changing the version of a package which is on the apt hold list. type: bool default: 'no' version_added: '2.13' @@ -127,14 +127,14 @@ options: type: str dpkg_options: description: - - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' - - Options should be supplied as comma separated list + - Add C(dpkg) options to C(apt) command. Defaults to C(-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"). + - Options should be supplied as comma separated list. default: force-confdef,force-confold type: str deb: description: - Path to a .deb package on the remote machine. - - If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1) + - If C(://) in the path, ansible will attempt to download deb before installing. (Version added 2.1) - Requires the C(xz-utils) package to extract the control file of the deb package to install. type: path required: false @@ -142,7 +142,8 @@ options: autoremove: description: - If V(true), remove unused dependency packages for all module states except V(build-dep). It can also be used as the only option. - - Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information. + - Previous to version 2.4, O(autoclean) was also an alias for O(autoremove), now it is its own separate command. + See documentation for further information. type: bool default: 'no' version_added: "2.1" @@ -154,10 +155,10 @@ options: version_added: "2.4" policy_rc_d: description: - - Force the exit code of /usr/sbin/policy-rc.d. - - For example, if I(policy_rc_d=101) the installed package will not trigger a service start. - - If /usr/sbin/policy-rc.d already exists, it is backed up and restored after the package installation. - - If V(null), the /usr/sbin/policy-rc.d isn't created/changed. + - Force the exit code of C(/usr/sbin/policy-rc.d). + - For example, if O(policy_rc_d=101) the installed package will not trigger a service start. + - If C(/usr/sbin/policy-rc.d) already exists, it is backed up and restored after the package installation. + - If V(null), the C(/usr/sbin/policy-rc.d) is not created/changed. type: int default: null version_added: "2.8" @@ -171,13 +172,14 @@ options: description: - 'Corresponds to the C(--no-remove) option for C(apt).' - 'If V(true), it is ensured that no packages will be removed or the task will fail.' - - 'O(fail_on_autoremove) is only supported with O(state) except V(absent)' + - 'O(fail_on_autoremove) is only supported with O(state) except V(absent).' + - 'O(fail_on_autoremove) is only supported by C(apt) and will be ignored if C(aptitude) is detected or specified.' type: bool default: 'no' version_added: "2.11" force_apt_get: description: - - Force usage of apt-get instead of aptitude + - Force usage of apt-get instead of aptitude. type: bool default: 'no' version_added: "2.4" @@ -203,19 +205,22 @@ attributes: platforms: debian notes: - Three of the upgrade modes (V(full), V(safe) and its alias V(true)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back. - - In most cases, packages installed with apt will start newly installed services by default. Most distributions have mechanisms to avoid this. - For example when installing Postgresql-9.5 in Debian 9, creating an excutable shell script (/usr/sbin/policy-rc.d) that throws - a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or remove its execute permission afterwards. - - The apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier + - In most cases, packages installed with I(apt) will start newly installed services by default. Most distributions have mechanisms to avoid this. + For example when installing Postgresql-9.5 in Debian 9, creating an executable shell script (/usr/sbin/policy-rc.d) that throws + a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or its execute permission afterward. + - The C(apt-get) commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. - Since we don't have warnings and prompts before installing we disallow this.Use an explicit fnmatch pattern if you want wildcarding) + Since there are no warnings and prompts before installing, we disallow this. Use an explicit fnmatch pattern if you want wildcarding). - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option. - When O(default_release) is used, an implicit priority of 990 is used. This is the same behavior as C(apt-get -t). - When an exact version is specified, an implicit priority of 1001 is used. -''' + - If the interpreter can't import C(python-apt)/C(python3-apt) the module will check for it in system-owned interpreters as well. + If the dependency can't be found, the module will attempt to install it. + If the dependency is found or installed, the module will be respawned under the correct interpreter. +""" -EXAMPLES = ''' -- name: Install apache httpd (state=present is optional) +EXAMPLES = """ +- name: Install apache httpd (state=present is optional) ansible.builtin.apt: name: apache2 state: present @@ -320,11 +325,11 @@ EXAMPLES = ''' purge: true - name: Run the equivalent of "apt-get clean" as a separate step - apt: + ansible.builtin.apt: clean: yes -''' +""" -RETURN = ''' +RETURN = """ cache_updated: description: if the cache was updated or not returned: success, in some cases @@ -350,7 +355,7 @@ stderr: returned: success, when needed type: str sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..." -''' # NOQA +""" # NOQA # added to stave off future warnings about apt api import warnings @@ -360,23 +365,24 @@ import datetime import fnmatch import locale as locale_module import os -import random import re +import secrets import shutil import sys import tempfile import time from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.file import S_IRWXU_RXG_RXO from ansible.module_utils.common.locale import get_best_parsable_locale from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.six import PY3, string_types +from ansible.module_utils.six import string_types from ansible.module_utils.urls import fetch_file DPKG_OPTIONS = 'force-confdef,force-confold' -APT_GET_ZERO = "\n0 upgraded, 0 newly installed" -APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed" +APT_GET_ZERO = "\n0 upgraded, 0 newly installed, 0 to remove" +APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed, 0 to remove" APT_LISTS_PATH = "/var/lib/apt/lists" APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" APT_MARK_INVALID_OP = 'Invalid operation' @@ -444,7 +450,7 @@ class PolicyRcD(object): with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d: policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d']) - os.chmod('/usr/sbin/policy-rc.d', 0o0755) + os.chmod('/usr/sbin/policy-rc.d', S_IRWXU_RXG_RXO) except Exception: self.m.fail_json(msg="Failed to create or chmod /usr/sbin/policy-rc.d") @@ -501,7 +507,7 @@ def package_best_match(pkgname, version_cmp, version, release, cache): policy.create_pin('Release', pkgname, release, 990) if version_cmp == "=": # Installing a specific version from command line overrides all pinning - # We don't mimmic this exactly, but instead set a priority which is higher than all APT built-in pin priorities. + # We don't mimic this exactly, but instead set a priority which is higher than all APT built-in pin priorities. policy.create_pin('Version', pkgname, version, 1001) pkg = cache[pkgname] pkgver = policy.get_candidate_ver(pkg) @@ -881,6 +887,11 @@ def install_deb( except Exception as e: m.fail_json(msg="Unable to install package: %s" % to_native(e)) + # Install 'Recommends' of this deb file + if install_recommends: + pkg_recommends = get_field_of_deb(m, deb_file, "Recommends") + deps_to_install.extend([pkg_name.strip() for pkg_name in pkg_recommends.split()]) + # and add this deb to the list of packages to install pkgs_to_install.append(deb_file) @@ -1091,13 +1102,24 @@ def upgrade(m, mode="yes", force=False, default_release=None, force_yes = '' if fail_on_autoremove: - fail_on_autoremove = '--no-remove' + if apt_cmd == APT_GET_CMD: + fail_on_autoremove = '--no-remove' + else: + m.warn("APTITUDE does not support '--no-remove', ignoring the 'fail_on_autoremove' parameter.") + fail_on_autoremove = '' else: fail_on_autoremove = '' allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else '' - allow_downgrade = '--allow-downgrades' if allow_downgrade else '' + if allow_downgrade: + if apt_cmd == APT_GET_CMD: + allow_downgrade = '--allow-downgrades' + else: + m.warn("APTITUDE does not support '--allow-downgrades', ignoring the 'allow_downgrade' parameter.") + allow_downgrade = '' + else: + allow_downgrade = '' if apt_cmd is None: if use_apt_get: @@ -1162,7 +1184,7 @@ def get_updated_cache_time(): # https://github.com/ansible/ansible-modules-core/issues/2951 def get_cache(module): - '''Attempt to get the cache object and update till it works''' + """Attempt to get the cache object and update till it works""" cache = None try: cache = apt.Cache() @@ -1230,9 +1252,19 @@ def main(): LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, + LANGUAGE=locale, ) module.run_command_environ_update = APT_ENV_VARS + global APTITUDE_CMD + APTITUDE_CMD = module.get_bin_path("aptitude", False) + global APT_GET_CMD + APT_GET_CMD = module.get_bin_path("apt-get") + + p = module.params + install_recommends = p['install_recommends'] + dpkg_options = expand_dpkg_options(p['dpkg_options']) + if not HAS_PYTHON_APT: # This interpreter can't see the apt Python library- we'll do the following to try and fix that: # 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it @@ -1245,13 +1277,13 @@ def main(): # made any more complex than it already is to try and cover more, eg, custom interpreters taking over # system locations) - apt_pkg_name = 'python3-apt' if PY3 else 'python-apt' + apt_pkg_name = 'python3-apt' if has_respawned(): # this shouldn't be possible; short-circuit early if it happens... module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable)) - interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python'] + interpreters = ['/usr/bin/python3', '/usr/bin/python'] interpreter = probe_interpreters_for_module(interpreters, 'apt') @@ -1271,10 +1303,18 @@ def main(): module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name) else: module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name) - module.run_command(['apt-get', 'update'], check_rc=True) + module.run_command([APT_GET_CMD, 'update'], check_rc=True) # try to install the apt Python binding - module.run_command(['apt-get', 'install', '--no-install-recommends', apt_pkg_name, '-y', '-q'], check_rc=True) + apt_pkg_cmd = [APT_GET_CMD, 'install', apt_pkg_name, '-y', '-q', dpkg_options] + + if install_recommends is False: + apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=no"]) + elif install_recommends is True: + apt_pkg_cmd.extend(["-o", "APT::Install-Recommends=yes"]) + # install_recommends is None uses the OS default + + module.run_command(apt_pkg_cmd, check_rc=True) # try again to find the bindings in common places interpreter = probe_interpreters_for_module(interpreters, 'apt') @@ -1288,18 +1328,11 @@ def main(): # we've done all we can do; just tell the user it's busted and get out module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable)) - global APTITUDE_CMD - APTITUDE_CMD = module.get_bin_path("aptitude", False) - global APT_GET_CMD - APT_GET_CMD = module.get_bin_path("apt-get") - - p = module.params - if p['clean'] is True: aptclean_stdout, aptclean_stderr, aptclean_diff = aptclean(module) # If there is nothing else to do exit. This will set state as # changed based on if the cache was updated. - if not p['package'] and not p['upgrade'] and not p['deb']: + if not p['package'] and p['upgrade'] == 'no' and not p['deb']: module.exit_json( changed=True, msg=aptclean_stdout, @@ -1318,11 +1351,9 @@ def main(): updated_cache = False updated_cache_time = 0 - install_recommends = p['install_recommends'] allow_unauthenticated = p['allow_unauthenticated'] allow_downgrade = p['allow_downgrade'] allow_change_held_packages = p['allow_change_held_packages'] - dpkg_options = expand_dpkg_options(p['dpkg_options']) autoremove = p['autoremove'] fail_on_autoremove = p['fail_on_autoremove'] autoclean = p['autoclean'] @@ -1357,23 +1388,32 @@ def main(): err = '' update_cache_retries = module.params.get('update_cache_retries') update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay') - randomize = random.randint(0, 1000) / 1000.0 + randomize = secrets.randbelow(1000) / 1000.0 for retry in range(update_cache_retries): try: if not module.check_mode: cache.update() break - except apt.cache.FetchFailedException as e: - err = to_native(e) + except apt.cache.FetchFailedException as fetch_failed_exc: + err = fetch_failed_exc + module.warn( + f"Failed to update cache after {retry + 1} retries due " + f"to {to_native(fetch_failed_exc)}, retrying" + ) # Use exponential backoff plus a little bit of randomness delay = 2 ** retry + randomize if delay > update_cache_retry_max_delay: delay = update_cache_retry_max_delay + randomize time.sleep(delay) + module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to refresh the cache again") else: - module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason')) + msg = ( + f"Failed to update apt cache after {update_cache_retries} retries: " + f"{err if err else 'unknown reason'}" + ) + module.fail_json(msg=msg) cache.open(progress=None) mtimestamp, post_cache_update_time = get_updated_cache_time() diff --git a/lib/ansible/modules/apt_key.py b/lib/ansible/modules/apt_key.py index 295dc262a3a..3828f9a882b 100644 --- a/lib/ansible/modules/apt_key.py +++ b/lib/ansible/modules/apt_key.py @@ -5,11 +5,10 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: apt_key author: @@ -27,13 +26,13 @@ attributes: platform: platforms: debian notes: - - The apt-key command used by this module has been deprecated. See the L(Debian wiki,https://wiki.debian.org/DebianRepository/UseThirdParty) for details. - This module is kept for backwards compatibility for systems that still use apt-key as the main way to manage apt repository keys. + - The C(apt-key) command used by this module has been deprecated. See the L(Debian wiki,https://wiki.debian.org/DebianRepository/UseThirdParty) for details. + This module is kept for backwards compatibility for systems that still use C(apt-key) as the main way to manage apt repository keys. - As a sanity check, downloaded key id must match the one specified. - "Use full fingerprint (40 characters) key ids to avoid key collisions. To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)." - - If you specify both the key id and the URL with O(state=present), the task can verify or add the key as needed. - - Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's update_cache option). + - If you specify both the key O(id) and the O(url) with O(state=present), the task can verify or add the key as needed. + - Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's C(update_cache) option). requirements: - gpg seealso: @@ -43,7 +42,7 @@ options: description: - The identifier of the key. - Including this allows check mode to correctly report the changed state. - - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead. + - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead. - This parameter is required when O(state) is set to V(absent). type: str data: @@ -80,9 +79,9 @@ options: on personally controlled sites using self-signed certificates. type: bool default: 'yes' -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: One way to avoid apt_key once it is removed from your distro, armored keys should use .asc extension, binary should use .gpg block: - name: somerepo | no apt key @@ -134,9 +133,9 @@ EXAMPLES = ''' id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA file: /tmp/apt.gpg state: present -''' +""" -RETURN = ''' +RETURN = """ after: description: List of apt key ids or fingerprints after any modification returned: on change @@ -167,7 +166,7 @@ short_id: returned: always type: str sample: "A88D21E9" -''' +""" import os @@ -189,7 +188,7 @@ def lang_env(module): if not hasattr(lang_env, 'result'): locale = get_best_parsable_locale(module) - lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale) + lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale) return lang_env.result diff --git a/lib/ansible/modules/apt_repository.py b/lib/ansible/modules/apt_repository.py index 158913a1204..b17801f5f89 100644 --- a/lib/ansible/modules/apt_repository.py +++ b/lib/ansible/modules/apt_repository.py @@ -6,11 +6,10 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: apt_repository short_description: Add and remove APT repositories @@ -42,13 +41,13 @@ options: default: "present" mode: description: - - The octal mode for newly created files in sources.list.d. + - The octal mode for newly created files in C(sources.list.d). - Default is what system uses (probably 0644). type: raw version_added: "1.6" update_cache: description: - - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes. + - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes. type: bool default: "yes" aliases: [ update-cache ] @@ -73,9 +72,9 @@ options: version_added: '1.8' filename: description: - - Sets the name of the source list file in sources.list.d. + - Sets the name of the source list file in C(sources.list.d). Defaults to a file name based on the repository source url. - The .list extension will be automatically added. + The C(.list) extension will be automatically added. type: str version_added: '2.1' codename: @@ -91,8 +90,8 @@ options: Without this library, the module does not work. - Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3. - Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not - the system Python, set O(install_python_apt=false) and ensure that the Python apt library - for your Python version is installed some other way. + the system Python, set O(install_python_apt=false) and ensure that the Python apt library + for your Python version is installed some other way. type: bool default: true author: @@ -102,9 +101,9 @@ requirements: - python-apt (python 2) - python3-apt (python 3) - apt-key or gpg -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add specified repository into sources list ansible.builtin.apt_repository: repo: deb http://archive.canonical.com/ubuntu hardy partner @@ -146,9 +145,9 @@ EXAMPLES = ''' ansible.builtin.apt_repository: repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/myrepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable" state: present -''' +""" -RETURN = ''' +RETURN = """ repo: description: A source string for the repository returned: always @@ -168,22 +167,22 @@ sources_removed: type: list sample: ["/etc/apt/sources.list.d/artifacts_elastic_co_packages_6_x_apt.list"] version_added: "2.15" -''' +""" import copy import glob import json import os import re +import secrets import sys import tempfile -import random import time from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.file import S_IRWU_RG_RO as DEFAULT_SOURCES_PERM from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six import PY3 from ansible.module_utils.urls import fetch_url from ansible.module_utils.common.locale import get_best_parsable_locale @@ -202,7 +201,6 @@ except ImportError: HAVE_PYTHON_APT = False APT_KEY_DIRS = ['/etc/apt/keyrings', '/etc/apt/trusted.gpg.d', '/usr/share/keyrings'] -DEFAULT_SOURCES_PERM = 0o0644 VALID_SOURCE_TYPES = ('deb', 'deb-src') @@ -231,6 +229,7 @@ class SourcesList(object): def __init__(self, module): self.module = module self.files = {} # group sources by file + self.files_mapping = {} # internal DS for tracking symlinks # Repositories that we're adding -- used to implement mode param self.new_repos = set() self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist') @@ -241,10 +240,12 @@ class SourcesList(object): # read sources.list.d for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')): + if os.path.islink(file): + self.files_mapping[file] = os.readlink(file) self.load(file) def __iter__(self): - '''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.''' + """Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.""" for file, sources in self.files.items(): for n, valid, enabled, source, comment in sources: if valid: @@ -314,9 +315,9 @@ class SourcesList(object): @staticmethod def _apt_cfg_file(filespec): - ''' + """ Wrapper for `apt_pkg` module for running with Python 2.5 - ''' + """ try: result = apt_pkg.config.find_file(filespec) except AttributeError: @@ -325,9 +326,9 @@ class SourcesList(object): @staticmethod def _apt_cfg_dir(dirspec): - ''' + """ Wrapper for `apt_pkg` module for running with Python 2.5 - ''' + """ try: result = apt_pkg.config.find_dir(dirspec) except AttributeError: @@ -373,7 +374,11 @@ class SourcesList(object): f.write(line) except IOError as ex: self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(ex))) - self.module.atomic_move(tmp_path, filename) + if filename in self.files_mapping: + # Write to symlink target instead of replacing symlink as a normal file + self.module.atomic_move(tmp_path, self.files_mapping[filename]) + else: + self.module.atomic_move(tmp_path, filename) # allow the user to override the default mode if filename in self.new_repos: @@ -408,17 +413,17 @@ class SourcesList(object): return new def modify(self, file, n, enabled=None, source=None, comment=None): - ''' + """ This function to be used with iterator, so we don't care of invalid sources. If source, enabled, or comment is None, original value from line ``n`` will be preserved. - ''' + """ valid, enabled_old, source_old, comment_old = self.files[file][n][1:] self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old)) def _add_valid_source(self, source_new, comment_new, file): # We'll try to reuse disabled source if we have it. # If we have more than one entry, we will enable them all - no advanced logic, remember. - self.module.log('ading source file: %s | %s | %s' % (source_new, comment_new, file)) + self.module.log('adding source file: %s | %s | %s' % (source_new, comment_new, file)) found = False for filename, n, enabled, source, comment in self: if source == source_new: @@ -457,7 +462,10 @@ class SourcesList(object): class UbuntuSourcesList(SourcesList): - LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s' + # prefer api.launchpad.net over launchpad.net/api + # see: https://github.com/ansible/ansible/pull/81978#issuecomment-1767062178 + LP_API = 'https://api.launchpad.net/1.0/~%s/+archive/%s' + PPA_URI = 'https://ppa.launchpadcontent.net' def __init__(self, module): self.module = module @@ -489,14 +497,14 @@ class UbuntuSourcesList(SourcesList): except IndexError: ppa_name = 'ppa' - line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename) + line = 'deb %s/%s/%s/ubuntu %s main' % (self.PPA_URI, ppa_owner, ppa_name, self.codename) return line, ppa_owner, ppa_name def _key_already_exists(self, key_fingerprint): if self.apt_key_bin: locale = get_best_parsable_locale(self.module) - APT_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale) + APT_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) self.module.run_command_environ_update = APT_ENV rc, out, err = self.module.run_command([self.apt_key_bin, 'export', key_fingerprint], check_rc=True) found = bool(not err or 'nothing exported' not in err) @@ -608,7 +616,7 @@ class UbuntuSourcesList(SourcesList): def revert_sources_list(sources_before, sources_after, sourceslist_before): - '''Revert the sourcelist files to their previous state.''' + """Revert the sourcelist files to their previous state.""" # First remove any new files that were created: for filename in set(sources_after.keys()).difference(sources_before.keys()): @@ -656,13 +664,13 @@ def main(): # made any more complex than it already is to try and cover more, eg, custom interpreters taking over # system locations) - apt_pkg_name = 'python3-apt' if PY3 else 'python-apt' + apt_pkg_name = 'python3-apt' if has_respawned(): # this shouldn't be possible; short-circuit early if it happens... module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable)) - interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python'] + interpreters = ['/usr/bin/python3', '/usr/bin/python'] interpreter = probe_interpreters_for_module(interpreters, 'apt') @@ -730,29 +738,38 @@ def main(): if changed and not module.check_mode: try: + err = '' sourceslist.save() if update_cache: - err = '' update_cache_retries = module.params.get('update_cache_retries') update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay') - randomize = random.randint(0, 1000) / 1000.0 + randomize = secrets.randbelow(1000) / 1000.0 + cache = apt.Cache() for retry in range(update_cache_retries): try: - cache = apt.Cache() cache.update() break - except apt.cache.FetchFailedException as e: - err = to_native(e) + except apt.cache.FetchFailedException as fetch_failed_exc: + err = fetch_failed_exc + module.warn( + f"Failed to update cache after {retry + 1} due " + f"to {to_native(fetch_failed_exc)} retry, retrying" + ) # Use exponential backoff with a max fail count, plus a little bit of randomness delay = 2 ** retry + randomize if delay > update_cache_retry_max_delay: delay = update_cache_retry_max_delay + randomize time.sleep(delay) + module.warn(f"Sleeping for {int(round(delay))} seconds, before attempting to update the cache again") else: revert_sources_list(sources_before, sources_after, sourceslist_before) - module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason')) + msg = ( + f"Failed to update apt cache after {update_cache_retries} retries: " + f"{err if err else 'unknown reason'}" + ) + module.fail_json(msg=msg) except (OSError, IOError) as ex: revert_sources_list(sources_before, sources_after, sourceslist_before) diff --git a/lib/ansible/modules/assemble.py b/lib/ansible/modules/assemble.py index c93b4ff67db..ff570aee1b9 100644 --- a/lib/ansible/modules/assemble.py +++ b/lib/ansible/modules/assemble.py @@ -5,11 +5,10 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: assemble short_description: Assemble configuration files from fragments @@ -62,14 +61,14 @@ options: type: str ignore_hidden: description: - - A boolean that controls if files that start with a '.' will be included or not. + - A boolean that controls if files that start with a C(.) will be included or not. type: bool default: no version_added: '2.0' validate: description: - The validation command to run before copying into place. - - The path to the file to validate is passed in via '%s' which must be present as in the sshd example below. + - The path to the file to validate is passed in by C(%s) which must be present as in the sshd example below. - The command is passed securely so shell features like expansion and pipes won't work. type: str version_added: '2.0' @@ -103,9 +102,9 @@ extends_documentation_fragment: - action_common_attributes.files - decrypt - files -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Assemble from fragments from a directory ansible.builtin.assemble: src: /etc/someapp/fragments @@ -122,9 +121,9 @@ EXAMPLES = r''' src: /etc/ssh/conf.d/ dest: /etc/ssh/sshd_config validate: /usr/sbin/sshd -t -f %s -''' +""" -RETURN = r'''#''' +RETURN = r"""#""" import codecs import os @@ -137,7 +136,7 @@ from ansible.module_utils.common.text.converters import to_native def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None): - ''' assemble a file from a directory of fragments ''' + """ assemble a file from a directory of fragments """ tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir) tmp = os.fdopen(tmpfd, 'wb') delimit_me = False @@ -206,6 +205,11 @@ def main(): regexp=dict(type='str'), ignore_hidden=dict(type='bool', default=False), validate=dict(type='str'), + + # Options that are for the action plugin, but ignored by the module itself. + # We have them here so that the tests pass without ignores, which + # reduces the likelihood of further bugs added. + decrypt=dict(type='bool', default=True), ), add_file_common_args=True, ) diff --git a/lib/ansible/modules/assert.py b/lib/ansible/modules/assert.py index 0070f25671e..90eeacb305f 100644 --- a/lib/ansible/modules/assert.py +++ b/lib/ansible/modules/assert.py @@ -3,11 +3,10 @@ # Copyright: (c) 2012, Dag Wieers # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: assert short_description: Asserts given expressions are true @@ -18,14 +17,14 @@ version_added: "1.5" options: that: description: - - A list of string expressions of the same form that can be passed to the 'when' statement. + - A list of string expressions of the same form that can be passed to the C(when) statement. type: list elements: str required: true fail_msg: description: - The customized message used for a failing assertion. - - This argument was called 'msg' before Ansible 2.7, now it is renamed to 'fail_msg' with alias 'msg'. + - This argument was called O(msg) before Ansible 2.7, now it is renamed to O(fail_msg) with alias O(msg). type: str aliases: [ msg ] version_added: "2.7" @@ -71,17 +70,22 @@ seealso: author: - Ansible Core Team - Michael DeHaan -''' +""" -EXAMPLES = r''' -- ansible.builtin.assert: { that: "ansible_os_family != 'RedHat'" } +EXAMPLES = r""" +- name: A single condition can be supplied as string instead of list + ansible.builtin.assert: + that: "ansible_os_family != 'RedHat'" -- ansible.builtin.assert: +- name: Use yaml multiline strings to ease escaping + ansible.builtin.assert: that: - "'foo' in some_command_result.stdout" - number_of_the_counting == 3 + - > + "reject" not in some_command_result.stderr -- name: After version 2.7 both 'msg' and 'fail_msg' can customize failing assertion message +- name: After version 2.7 both O(msg) and O(fail_msg) can customize failing assertion message ansible.builtin.assert: that: - my_param <= 100 @@ -89,7 +93,7 @@ EXAMPLES = r''' fail_msg: "'my_param' must be between 0 and 100" success_msg: "'my_param' is between 0 and 100" -- name: Please use 'msg' when ansible version is smaller than 2.7 +- name: Please use O(msg) when ansible version is smaller than 2.7 ansible.builtin.assert: that: - my_param <= 100 @@ -102,4 +106,4 @@ EXAMPLES = r''' - my_param <= 100 - my_param >= 0 quiet: true -''' +""" diff --git a/lib/ansible/modules/async_status.py b/lib/ansible/modules/async_status.py index e0a5871bcdf..0a4eeb53ac2 100644 --- a/lib/ansible/modules/async_status.py +++ b/lib/ansible/modules/async_status.py @@ -3,11 +3,10 @@ # Copyright: (c) 2012, Michael DeHaan , and others # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: async_status short_description: Obtain status of asynchronous task @@ -24,7 +23,7 @@ options: mode: description: - If V(status), obtain the status. - - If V(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job O(jid). + - If V(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job O(jid), without waiting for it to finish. type: str choices: [ cleanup, status ] default: status @@ -37,7 +36,8 @@ attributes: async: support: none check_mode: - support: none + support: full + version_added: '2.17' diff_mode: support: none bypass_host_loop: @@ -51,28 +51,33 @@ seealso: author: - Ansible Core Team - Michael DeHaan -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- -- name: Asynchronous yum task - ansible.builtin.yum: +- name: Asynchronous dnf task + ansible.builtin.dnf: name: docker-io state: present async: 1000 poll: 0 - register: yum_sleeper + register: dnf_sleeper - name: Wait for asynchronous job to end ansible.builtin.async_status: - jid: '{{ yum_sleeper.ansible_job_id }}' + jid: '{{ dnf_sleeper.ansible_job_id }}' register: job_result until: job_result.finished retries: 100 delay: 10 -''' -RETURN = r''' +- name: Clean up async file + ansible.builtin.async_status: + jid: '{{ dnf_sleeper.ansible_job_id }}' + mode: cleanup +""" + +RETURN = r""" ansible_job_id: description: The asynchronous job id returned: success @@ -100,7 +105,7 @@ erased: description: Path to erased job file returned: when file is erased type: str -''' +""" import json import os @@ -112,12 +117,15 @@ from ansible.module_utils.common.text.converters import to_native def main(): - module = AnsibleModule(argument_spec=dict( - jid=dict(type='str', required=True), - mode=dict(type='str', default='status', choices=['cleanup', 'status']), - # passed in from the async_status action plugin - _async_dir=dict(type='path', required=True), - )) + module = AnsibleModule( + argument_spec=dict( + jid=dict(type="str", required=True), + mode=dict(type="str", default="status", choices=["cleanup", "status"]), + # passed in from the async_status action plugin + _async_dir=dict(type="path", required=True), + ), + supports_check_mode=True, + ) mode = module.params['mode'] jid = module.params['jid'] diff --git a/lib/ansible/modules/async_wrapper.py b/lib/ansible/modules/async_wrapper.py index b585396e45e..d33ebe196ed 100644 --- a/lib/ansible/modules/async_wrapper.py +++ b/lib/ansible/modules/async_wrapper.py @@ -3,8 +3,7 @@ # Copyright: (c) 2012, Michael DeHaan , and others # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import errno @@ -22,8 +21,6 @@ import multiprocessing from ansible.module_utils.common.text.converters import to_text, to_bytes -PY3 = sys.version_info[0] == 3 - syslog.openlog('ansible-%s' % os.path.basename(__file__)) syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:])) @@ -78,13 +75,13 @@ def daemonize_self(): # NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there. # FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function. def _filter_non_json_lines(data): - ''' + """ Used to filter unrelated output around module JSON output, like messages from tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). Filters leading lines before first line-starting occurrence of '{', and filter all trailing lines after matching close character (working from the bottom of output). - ''' + """ warnings = [] # Filter initial junk @@ -169,13 +166,18 @@ def _run_module(wrapped_cmd, jid): interpreter = _get_interpreter(cmd[0]) if interpreter: cmd = interpreter + cmd - script = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + script = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=False, + text=True, + encoding="utf-8", + errors="surrogateescape", + ) (outdata, stderr) = script.communicate() - if PY3: - outdata = outdata.decode('utf-8', 'surrogateescape') - stderr = stderr.decode('utf-8', 'surrogateescape') (filtered_outdata, json_warnings) = _filter_non_json_lines(outdata) diff --git a/lib/ansible/modules/blockinfile.py b/lib/ansible/modules/blockinfile.py index e9feb7eed6a..e5240a0cc4f 100644 --- a/lib/ansible/modules/blockinfile.py +++ b/lib/ansible/modules/blockinfile.py @@ -4,11 +4,10 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: blockinfile short_description: Insert/update/remove a text block surrounded by marker lines @@ -34,7 +33,7 @@ options: marker: description: - The marker line template. - - C({mark}) will be replaced with the values in O(marker_begin) (default="BEGIN") and O(marker_end) (default="END"). + - C({mark}) will be replaced with the values in O(marker_begin) (default=C(BEGIN)) and O(marker_end) (default=C(END)). - Using a custom marker without the C({mark}) variable may result in the block being repeatedly inserted on subsequent playbook runs. - Multi-line markers are not supported and will result in the block being repeatedly inserted on subsequent playbook runs. - A newline is automatically appended by the module to O(marker_begin) and O(marker_end). @@ -51,12 +50,10 @@ options: description: - If specified and no begin/ending O(marker) lines are found, the block will be inserted after the last match of specified regular expression. - A special value is available; V(EOF) for inserting the block at the end of the file. - - If specified regular expression has no matches, V(EOF) will be used instead. + - If specified regular expression has no matches or no value is passed, V(EOF) will be used instead. - The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines. This behaviour was added in ansible-core 2.14. type: str - choices: [ EOF, '*regex*' ] - default: EOF insertbefore: description: - If specified and no begin/ending O(marker) lines are found, the block will be inserted before the last match of specified regular expression. @@ -65,7 +62,6 @@ options: - The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines. This behaviour was added in ansible-core 2.14. type: str - choices: [ BOF, '*regex*' ] create: description: - Create a new file if it does not exist. @@ -90,12 +86,28 @@ options: type: str default: END version_added: '2.5' + append_newline: + required: false + description: + - Append a blank line to the inserted block, if this does not appear at the end of the file. + - Note that this attribute is not considered when C(state) is set to C(absent) + type: bool + default: no + version_added: '2.16' + prepend_newline: + required: false + description: + - Prepend a blank line to the inserted block, if this does not appear at the beginning of the file. + - Note that this attribute is not considered when C(state) is set to C(absent) + type: bool + default: no + version_added: '2.16' notes: - - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. + - When using C(with_*) loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. - As of Ansible 2.3, the O(dest) option has been changed to O(path) as default, but O(dest) still works as well. - Option O(ignore:follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so O(ignore:follow=no) does not make sense. - - When more then one block should be handled in one file you must change the O(marker) per task. + - When more than one block should be handled in one file you must change the O(marker) per task. extends_documentation_fragment: - action_common_attributes - action_common_attributes.files @@ -113,13 +125,15 @@ attributes: platforms: posix vault: support: none -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Before Ansible 2.3, option 'dest' or 'name' was used instead of 'path' -- name: Insert/Update "Match User" configuration block in /etc/ssh/sshd_config +- name: Insert/Update "Match User" configuration block in /etc/ssh/sshd_config prepending and appending a new line ansible.builtin.blockinfile: path: /etc/ssh/sshd_config + append_newline: true + prepend_newline: true block: | Match User ansible-agent PasswordAuthentication no @@ -173,7 +187,7 @@ EXAMPLES = r''' insertafter: '(?m)SID_LIST_LISTENER_DG =\n.*\(SID_LIST =' marker: " " -''' +""" import re import os @@ -186,9 +200,8 @@ from ansible.module_utils.common.text.converters import to_bytes, to_native def write_changes(module, contents, path): tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir) - f = os.fdopen(tmpfd, 'wb') - f.write(contents) - f.close() + with os.fdopen(tmpfd, 'wb') as tf: + tf.write(contents) validate = module.params.get('validate', None) valid = not validate @@ -231,6 +244,8 @@ def main(): validate=dict(type='str'), marker_begin=dict(type='str', default='BEGIN'), marker_end=dict(type='str', default='END'), + append_newline=dict(type='bool', default=False), + prepend_newline=dict(type='bool', default=False), ), mutually_exclusive=[['insertbefore', 'insertafter']], add_file_common_args=True, @@ -249,11 +264,13 @@ def main(): module.fail_json(rc=257, msg='Path %s does not exist !' % path) destpath = os.path.dirname(path) - if not os.path.exists(destpath) and not module.check_mode: + if destpath and not os.path.exists(destpath) and not module.check_mode: try: os.makedirs(destpath) + except OSError as e: + module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (destpath, e.errno, e.strerror)) except Exception as e: - module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (destpath, e[0], e[1])) + module.fail_json(msg='Error creating %s Error: %s' % (destpath, to_native(e))) original = None lines = [] else: @@ -274,6 +291,7 @@ def main(): block = to_bytes(params['block']) marker = to_bytes(params['marker']) present = params['state'] == 'present' + blank_line = [b(os.linesep)] if not present and not path_exists: module.exit_json(changed=False, msg="File %s not present" % path) @@ -337,7 +355,26 @@ def main(): if not lines[n0 - 1].endswith(b(os.linesep)): lines[n0 - 1] += b(os.linesep) + # Before the block: check if we need to prepend a blank line + # If yes, we need to add the blank line if we are not at the beginning of the file + # and the previous line is not a blank line + # In both cases, we need to shift by one on the right the inserting position of the block + if params['prepend_newline'] and present: + if n0 != 0 and lines[n0 - 1] != b(os.linesep): + lines[n0:n0] = blank_line + n0 += 1 + + # Insert the block lines[n0:n0] = blocklines + + # After the block: check if we need to append a blank line + # If yes, we need to add the blank line if we are not at the end of the file + # and the line right after is not a blank line + if params['append_newline'] and present: + line_after_block = n0 + len(blocklines) + if line_after_block < len(lines) and lines[line_after_block] != b(os.linesep): + lines[line_after_block:line_after_block] = blank_line + if lines: result = b''.join(lines) else: diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py index 846ac36afaf..ed71342ab6b 100644 --- a/lib/ansible/modules/command.py +++ b/lib/ansible/modules/command.py @@ -4,11 +4,10 @@ # Copyright: (c) 2016, Toshio Kuratomi # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: command short_description: Execute commands on targets @@ -16,12 +15,11 @@ version_added: historical description: - The M(ansible.builtin.command) module takes the command name followed by a list of space-delimited arguments. - The given command will be executed on all selected nodes. - - The command(s) will not be - processed through the shell, so variables like C($HOSTNAME) and operations - like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work. + - The command(s) will not be processed through the shell, so operations like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work. + Also, environment variables are resolved via Python, not shell, see O(expand_argument_vars) and are left unchanged if not matched. Use the M(ansible.builtin.shell) module if you need these features. - - To create C(command) tasks that are easier to read than the ones using space-delimited - arguments, pass parameters using the C(args) L(task keyword,https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#task) + - To create C(command) tasks that are easier to read than the ones using space-delimited arguments, + pass parameters using the C(args) L(task keyword,https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#task) or use O(cmd) parameter. - Either a free form command or O(cmd) parameter is required, see the examples. - For Windows targets, use the M(ansible.windows.win_command) module instead. @@ -42,8 +40,8 @@ attributes: options: expand_argument_vars: description: - - Expands the arguments that are variables, for example C($HOME) will be expanded before being passed to the - command to run. + - Expands the arguments that are variables, for example C($HOME) will be expanded before being passed to the command to run. + - If a variable is not matched, it is left unchanged, unlike shell substitution which would remove it. - Set to V(false) to disable expansion and treat the value as a literal argument. type: bool default: true @@ -51,7 +49,7 @@ options: free_form: description: - The command module takes a free form string as a command to run. - - There is no actual parameter named 'free form'. + - There is no actual parameter named C(free_form). cmd: type: str description: @@ -119,9 +117,9 @@ seealso: author: - Ansible Core Team - Michael DeHaan -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Return motd to registered var ansible.builtin.command: cat /etc/motd register: mymotd @@ -175,9 +173,9 @@ EXAMPLES = r''' - name: Safely use templated variable to run command. Always use the quote filter to avoid injection issues ansible.builtin.command: cat {{ myfile|quote }} register: myoutput -''' +""" -RETURN = r''' +RETURN = r""" msg: description: changed returned: always @@ -230,7 +228,7 @@ stderr_lines: returned: always type: list sample: [u'ls cannot access foo: No such file or directory', u'ls …'] -''' +""" import datetime import glob @@ -276,7 +274,7 @@ def main(): strip = module.params['strip_empty_ends'] expand_argument_vars = module.params['expand_argument_vars'] - # we promissed these in 'always' ( _lines get autoaded on action plugin) + # we promised these in 'always' ( _lines get auto-added on action plugin) r = {'changed': False, 'stdout': '', 'stderr': '', 'rc': None, 'cmd': None, 'start': None, 'end': None, 'delta': None, 'msg': ''} if not shell and executable: diff --git a/lib/ansible/modules/copy.py b/lib/ansible/modules/copy.py index af3ab4a2b53..8a5297466f4 100644 --- a/lib/ansible/modules/copy.py +++ b/lib/ansible/modules/copy.py @@ -4,20 +4,23 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: copy version_added: historical short_description: Copy files to remote locations description: - - The M(ansible.builtin.copy) module copies a file from the local or remote machine to a location on the remote machine. + - The M(ansible.builtin.copy) module copies a file or a directory structure from the local or remote machine to a location on the remote machine. + File system meta-information (permissions, ownership, etc.) may be set, even when the file or directory already exists on the target system. + Some meta-information may be copied on request. + - Get meta-information with the M(ansible.builtin.stat) module. + - Set meta-information with the M(ansible.builtin.file) module. - Use the M(ansible.builtin.fetch) module to copy files from remote locations to the local box. - If you need variable interpolation in copied files, use the M(ansible.builtin.template) module. - Using a variable in the O(content) field will result in unpredictable output. + Using a variable with the O(content) parameter produces unpredictable results. - For Windows targets, use the M(ansible.windows.win_copy) module instead. options: src: @@ -25,8 +28,8 @@ options: - Local path to a file to copy to the remote server. - This can be absolute or relative. - If path is a directory, it is copied recursively. In this case, if path ends - with "/", only inside contents of that directory are copied to destination. - Otherwise, if it does not end with "/", the directory itself with all contents + with C(/), only inside contents of that directory are copied to destination. + Otherwise, if it does not end with C(/), the directory itself with all contents is copied. This behavior is similar to the C(rsync) command line tool. type: path content: @@ -41,7 +44,7 @@ options: description: - Remote absolute path where the file should be copied to. - If O(src) is a directory, this must be a directory too. - - If O(dest) is a non-existent path and if either O(dest) ends with "/" or O(src) is a directory, O(dest) is created. + - If O(dest) is a non-existent path and if either O(dest) ends with C(/) or O(src) is a directory, O(dest) is created. - If O(dest) is a relative path, the starting directory is determined by the remote host. - If O(src) and O(dest) are files, the parent directory of O(dest) is not created and the task fails if it does not already exist. type: path @@ -79,19 +82,20 @@ options: See CVE-2020-1736 for further details. directory_mode: description: - - When doing a recursive copy set the mode for the directories. - - If this is not set we will use the system defaults. - - The mode is only set on directories which are newly created, and will not affect those that already existed. + - Set the access permissions of newly created directories to the given mode. + Permissions on existing directories do not change. + - See O(mode) for the syntax of accepted values. + - The target system's defaults determine permissions when this parameter is not set. type: raw version_added: '1.5' remote_src: description: - Influence whether O(src) needs to be transferred or already is present remotely. - If V(false), it will search for O(src) on the controller node. - - If V(true) it will search for O(src) on the managed (remote) node. + - If V(true), it will search for O(src) on the managed (remote) node. - O(remote_src) supports recursive copying as of version 2.8. - O(remote_src) only works with O(mode=preserve) as of version 2.6. - - Autodecryption of files does not work when O(remote_src=yes). + - Auto-decryption of files does not work when O(remote_src=yes). type: bool default: no version_added: '2.0' @@ -105,7 +109,6 @@ options: description: - This flag indicates that filesystem links in the source tree, if they exist, should be followed. type: bool - default: yes version_added: '2.4' checksum: description: @@ -151,9 +154,9 @@ attributes: vault: support: full version_added: '2.2' -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Copy file with owner and permissions ansible.builtin.copy: src: /srv/myfiles/foo.conf @@ -216,9 +219,9 @@ EXAMPLES = r''' src: /etc/foo.conf dest: /path/to/link # link to /path/to/file follow: no -''' +""" -RETURN = r''' +RETURN = r""" dest: description: Destination file/path. returned: success @@ -268,7 +271,7 @@ mode: description: Permissions of the target, after execution. returned: success type: str - sample: "0644" + sample: '0644' size: description: Size of the target, after execution. returned: success @@ -279,14 +282,13 @@ state: returned: success type: str sample: file -''' +""" import errno import filecmp import grp import os import os.path -import platform import pwd import shutil import stat @@ -295,13 +297,6 @@ import traceback from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.common.process import get_bin_path -from ansible.module_utils.common.locale import get_best_parsable_locale -from ansible.module_utils.six import PY3 - - -# The AnsibleModule object -module = None class AnsibleModuleError(Exception): @@ -309,25 +304,10 @@ class AnsibleModuleError(Exception): self.results = results -# Once we get run_command moved into common, we can move this into a common/files module. We can't -# until then because of the module.run_command() method. We may need to move it into -# basic::AnsibleModule() until then but if so, make it a private function so that we don't have to -# keep it for backwards compatibility later. -def clear_facls(path): - setfacl = get_bin_path('setfacl') - # FIXME "setfacl -b" is available on Linux and FreeBSD. There is "setfacl -D e" on z/OS. Others? - acl_command = [setfacl, '-b', path] - b_acl_command = [to_bytes(x) for x in acl_command] - locale = get_best_parsable_locale(module) - rc, out, err = module.run_command(b_acl_command, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)) - if rc != 0: - raise RuntimeError('Error running "{0}": stdout: "{1}"; stderr: "{2}"'.format(' '.join(b_acl_command), out, err)) - - def split_pre_existing_dir(dirname): - ''' + """ Return the first pre-existing directory and a list of the new directories that will be created. - ''' + """ head, tail = os.path.split(dirname) b_head = to_bytes(head, errors='surrogate_or_strict') if head == '': @@ -343,9 +323,9 @@ def split_pre_existing_dir(dirname): def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed): - ''' + """ Walk the new directories list and make sure that permissions are as we would expect - ''' + """ if new_directory_list: working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0)) @@ -524,8 +504,6 @@ def copy_common_dirs(src, dest, module): def main(): - global module - module = AnsibleModule( # not checking because of daisy chain to file module argument_spec=dict( @@ -537,7 +515,7 @@ def main(): force=dict(type='bool', default=True), validate=dict(type='str'), directory_mode=dict(type='raw'), - remote_src=dict(type='bool'), + remote_src=dict(type='bool', default=False), local_follow=dict(type='bool'), checksum=dict(type='str'), follow=dict(type='bool', default=False), @@ -700,54 +678,8 @@ def main(): else: raise - # might be needed below - if PY3 and hasattr(os, 'listxattr'): - try: - src_has_acls = 'system.posix_acl_access' in os.listxattr(src) - except Exception as e: - # assume unwanted ACLs by default - src_has_acls = True - # at this point we should always have tmp file - module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes']) - - if PY3 and hasattr(os, 'listxattr') and platform.system() == 'Linux' and not remote_src: - # atomic_move used above to copy src into dest might, in some cases, - # use shutil.copy2 which in turn uses shutil.copystat. - # Since Python 3.3, shutil.copystat copies file extended attributes: - # https://docs.python.org/3/library/shutil.html#shutil.copystat - # os.listxattr (along with others) was added to handle the operation. - - # This means that on Python 3 we are copying the extended attributes which includes - # the ACLs on some systems - further limited to Linux as the documentation above claims - # that the extended attributes are copied only on Linux. Also, os.listxattr is only - # available on Linux. - - # If not remote_src, then the file was copied from the controller. In that - # case, any filesystem ACLs are artifacts of the copy rather than preservation - # of existing attributes. Get rid of them: - - if src_has_acls: - # FIXME If dest has any default ACLs, there are not applied to src now because - # they were overridden by copystat. Should/can we do anything about this? - # 'system.posix_acl_default' in os.listxattr(os.path.dirname(b_dest)) - - try: - clear_facls(dest) - except ValueError as e: - if 'setfacl' in to_native(e): - # No setfacl so we're okay. The controller couldn't have set a facl - # without the setfacl command - pass - else: - raise - except RuntimeError as e: - # setfacl failed. - if 'Operation not supported' in to_native(e): - # The file system does not support ACLs. - pass - else: - raise + module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'], keep_dest_attrs=not remote_src) except (IOError, OSError): module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc()) diff --git a/lib/ansible/modules/cron.py b/lib/ansible/modules/cron.py index d43c8133831..0382aa6b265 100644 --- a/lib/ansible/modules/cron.py +++ b/lib/ansible/modules/cron.py @@ -7,11 +7,10 @@ # Copyright: (c) 2015, Luca Berruti # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: cron short_description: Manage cron.d and crontab entries @@ -19,19 +18,19 @@ description: - Use this module to manage crontab and environment variables entries. This module allows you to create environment variables and named crontab entries, update, or delete them. - 'When crontab jobs are managed: the module includes one line with the description of the - crontab entry C("#Ansible: ") corresponding to the "name" passed to the module, - which is used by future ansible/module calls to find/check the state. The "name" - parameter should be unique, and changing the "name" value will result in a new cron + crontab entry C("#Ansible: ") corresponding to the O(name) passed to the module, + which is used by future ansible/module calls to find/check the state. The O(name) + parameter should be unique, and changing the O(name) value will result in a new cron task being created (or a different one being removed).' - When environment variables are managed, no comment line is added, but, when the module - needs to find/check the state, it uses the "name" parameter to find the environment + needs to find/check the state, it uses the O(name) parameter to find the environment variable definition line. - - When using symbols such as %, they must be properly escaped. + - When using symbols such as C(%), they must be properly escaped. version_added: "0.9" options: name: description: - - Description of a crontab entry or, if env is set, the name of environment variable. + - Description of a crontab entry or, if O(env) is set, the name of environment variable. - This parameter is always required as of ansible-core 2.12. type: str required: yes @@ -42,7 +41,7 @@ options: type: str job: description: - - The command to execute or, if env is set, the value of environment variable. + - The command to execute or, if O(env) is set, the value of environment variable. - The command should not contain line breaks. - Required if O(state=present). type: str @@ -59,10 +58,10 @@ options: The assumption is that this file is exclusively managed by the module, do not use if the file contains multiple entries, NEVER use for /etc/crontab. - If this is a relative path, it is interpreted with respect to C(/etc/cron.d). - - Many linux distros expect (and some require) the filename portion to consist solely + - Many Linux distros expect (and some require) the filename portion to consist solely of upper- and lower-case letters, digits, underscores, and hyphens. - - Using this parameter requires you to specify the O(user) as well, unless O(state) is not V(present). - - Either this parameter or O(name) is required + - Using this parameter requires you to specify the O(user) as well, unless O(state=absent). + - Either this parameter or O(name) is required. type: path backup: description: @@ -132,6 +131,9 @@ options: version_added: "2.1" requirements: - cron (any 'vixie cron' conformant variant, like cronie) +notes: + - If you are experiencing permissions issues with cron and MacOS, + you should see the official MacOS documentation for further information. author: - Dane Summers (@dsummersl) - Mike Grozak (@rhaido) @@ -148,9 +150,9 @@ attributes: platform: support: full platforms: posix -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null" ansible.builtin.cron: name: "check dirs" @@ -203,9 +205,9 @@ EXAMPLES = r''' name: APP_HOME env: yes state: absent -''' +""" -RETURN = r'''#''' +RETURN = r"""#""" import os import platform @@ -215,6 +217,7 @@ import sys import tempfile from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.file import S_IRWU_RWG_RWO from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.module_utils.six.moves import shlex_quote @@ -259,10 +262,9 @@ class CronTab(object): if self.cron_file: # read the cronfile try: - f = open(self.b_cron_file, 'rb') - self.n_existing = to_native(f.read(), errors='surrogate_or_strict') - self.lines = self.n_existing.splitlines() - f.close() + with open(self.b_cron_file, 'rb') as f: + self.n_existing = to_native(f.read(), errors='surrogate_or_strict') + self.lines = self.n_existing.splitlines() except IOError: # cron file does not exist return @@ -308,7 +310,7 @@ class CronTab(object): fileh = open(self.b_cron_file, 'wb') else: filed, path = tempfile.mkstemp(prefix='crontab') - os.chmod(path, int('0644', 8)) + os.chmod(path, S_IRWU_RWG_RWO) fileh = os.fdopen(filed, 'wb') fileh.write(to_bytes(self.render())) @@ -325,7 +327,7 @@ class CronTab(object): os.unlink(path) if rc != 0: - self.module.fail_json(msg=err) + self.module.fail_json(msg=f"Failed to install new cronfile: {path}", stderr=err, stdout=out, rc=rc) # set SELinux permissions if self.module.selinux_enabled() and self.cron_file: diff --git a/lib/ansible/modules/deb822_repository.py b/lib/ansible/modules/deb822_repository.py index 6b73cfe29e0..a27af10786c 100644 --- a/lib/ansible/modules/deb822_repository.py +++ b/lib/ansible/modules/deb822_repository.py @@ -2,35 +2,34 @@ # Copyright: Contributors to the Ansible project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ author: 'Ansible Core Team (@ansible)' short_description: 'Add and remove deb822 formatted repositories' description: -- 'Add and remove deb822 formatted repositories in Debian based distributions' +- 'Add and remove deb822 formatted repositories in Debian based distributions.' module: deb822_repository notes: -- This module will not automatically update caches, call the apt module based +- This module will not automatically update caches, call the M(ansible.builtin.apt) module based on the changed state. options: allow_downgrade_to_insecure: description: - Allow downgrading a package that was previously authenticated but - is no longer authenticated + is no longer authenticated. type: bool allow_insecure: description: - - Allow insecure repositories + - Allow insecure repositories. type: bool allow_weak: description: - - Allow repositories signed with a key using a weak digest algorithm + - Allow repositories signed with a key using a weak digest algorithm. type: bool architectures: description: - - 'Architectures to search within repository' + - Architectures to search within repository. type: list elements: str by_hash: @@ -52,7 +51,7 @@ options: components: description: - Components specify different sections of one distribution version - present in a Suite. + present in a C(Suite). type: list elements: str date_max_future: @@ -65,8 +64,8 @@ options: type: bool inrelease_path: description: - - Determines the path to the InRelease file, relative to the normal - position of an InRelease file. + - Determines the path to the C(InRelease) file, relative to the normal + position of an C(InRelease) file. type: str languages: description: @@ -82,8 +81,8 @@ options: type: str pdiffs: description: - - Controls if APT should try to use PDiffs to update old indexes - instead of downloading the new indexes entirely + - Controls if APT should try to use C(PDiffs) to update old indexes + instead of downloading the new indexes entirely. type: bool signed_by: description: @@ -98,21 +97,20 @@ options: Suite can specify an exact path in relation to the URI(s) provided, in which case the Components: must be omitted and suite must end with a slash (C(/)). Alternatively, it may take the form of a - distribution version (e.g. a version codename like disco or artful). + distribution version (for example a version codename like C(disco) or C(artful)). If the suite does not specify a path, at least one component must be present. type: list elements: str targets: description: - - Defines which download targets apt will try to acquire from this - source. + - Defines which download targets apt will try to acquire from this source. type: list elements: str trusted: description: - Decides if a source is considered trusted or if warnings should be - raised before e.g. packages are installed from this source. + raised before, for example packages are installed from this source. type: bool types: choices: @@ -124,7 +122,7 @@ options: elements: str description: - Which types of packages to look for from a given source; either - binary V(deb) or source code V(deb-src) + binary V(deb) or source code V(deb-src). uris: description: - The URIs must specify the base of the Debian distribution archive, @@ -133,7 +131,7 @@ options: elements: str mode: description: - - The octal mode for newly created files in sources.list.d. + - The octal mode for newly created files in C(sources.list.d). type: raw default: '0644' state: @@ -147,9 +145,9 @@ options: requirements: - python3-debian / python-debian version_added: '2.15' -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add debian repo deb822_repository: name: debian @@ -191,9 +189,9 @@ EXAMPLES = ''' components: stable architectures: amd64 signed_by: https://download.example.com/linux/ubuntu/gpg -''' +""" -RETURN = ''' +RETURN = """ repo: description: A source string for the repository returned: always @@ -226,7 +224,7 @@ key_filename: returned: always type: str sample: /etc/apt/keyrings/debian.gpg -''' +""" import os import re @@ -237,6 +235,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.collections import is_sequence +from ansible.module_utils.common.file import S_IRWXU_RXG_RXO, S_IRWU_RG_RO from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six import raise_from # type: ignore[attr-defined] @@ -260,7 +259,7 @@ def ensure_keyrings_dir(module): changed = False if not os.path.isdir(KEYRINGS_DIR): if not module.check_mode: - os.mkdir(KEYRINGS_DIR, 0o755) + os.mkdir(KEYRINGS_DIR, S_IRWXU_RXG_RXO) changed |= True changed |= module.set_fs_attributes_if_different( @@ -354,7 +353,7 @@ def write_signed_by_key(module, v, slug): module.atomic_move(tmpfile, filename) changed |= True - changed |= module.set_mode_if_different(filename, 0o0644, False) + changed |= module.set_mode_if_different(filename, S_IRWU_RG_RO, False) return changed, filename, None @@ -501,7 +500,7 @@ def main(): deb822 = Deb822() signed_by_filename = None - for key, value in params.items(): + for key, value in sorted(params.items()): if value is None: continue diff --git a/lib/ansible/modules/debconf.py b/lib/ansible/modules/debconf.py index 0e7aad0036b..701c19dabb6 100644 --- a/lib/ansible/modules/debconf.py +++ b/lib/ansible/modules/debconf.py @@ -3,11 +3,10 @@ # Copyright: (c) 2014, Brian Coca # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: debconf short_description: Configure a .deb package @@ -27,27 +26,27 @@ attributes: platforms: debian notes: - This module requires the command line debconf tools. - - A number of questions have to be answered (depending on the package). + - Several questions have to be answered (depending on the package). Use 'debconf-show ' on any Debian or derivative with the package installed to see questions/settings available. - - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords. - - It is highly recommended to add C(no_log=True) to task while handling sensitive information using this module. - - The debconf module does not reconfigure packages, it just updates the debconf database. + - Some distros will always record tasks involving the setting of passwords as changed. This is due to C(debconf-get-selections) masking passwords. + - It is highly recommended to add C(no_log=True) to the task while handling sensitive information using this module. + - The M(ansible.builtin.debconf) module does not reconfigure packages, it just updates the debconf database. An additional step is needed (typically with C(notify) if debconf makes a change) to reconfigure the package and apply the changes. - debconf is extensively used for pre-seeding configuration prior to installation + C(debconf) is extensively used for pre-seeding configuration prior to installation rather than modifying configurations. - So, while dpkg-reconfigure does use debconf data, it is not always authoritative + So, while C(dpkg-reconfigure) does use debconf data, it is not always authoritative and you may need to check how your package is handled. - - Also note dpkg-reconfigure is a 3-phase process. It invokes the + - Also note C(dpkg-reconfigure) is a 3-phase process. It invokes the control scripts from the C(/var/lib/dpkg/info) directory with the C(.prerm reconfigure ), C(.config reconfigure ) and C(.postinst control ) arguments. - The main issue is that the C(.config reconfigure) step for many packages will first reset the debconf database (overriding changes made by this module) by checking the on-disk configuration. If this is the case for your package then - dpkg-reconfigure will effectively ignore changes made by debconf. - - However as dpkg-reconfigure only executes the C(.config) step if the file + C(dpkg-reconfigure) will effectively ignore changes made by debconf. + - However as C(dpkg-reconfigure) only executes the C(.config) step if the file exists, it is possible to rename it to C(/var/lib/dpkg/info/.config.ignore) before executing C(dpkg-reconfigure -f noninteractive ) and then restore it. This seems to be compliant with Debian policy for the .config file. @@ -71,23 +70,25 @@ options: - The type of the value supplied. - It is highly recommended to add C(no_log=True) to task while specifying O(vtype=password). - V(seen) was added in Ansible 2.2. + - After Ansible 2.17, user can specify C(value) as a list, if C(vtype) is set as V(multiselect). type: str choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ] value: description: - - Value to set the configuration to. - type: str + - Value to set the configuration to. + - After Ansible 2.17, C(value) is of type C(raw). + type: raw aliases: [ answer ] unseen: description: - - Do not set 'seen' flag when pre-seeding. + - Do not set C(seen) flag when pre-seeding. type: bool default: false author: - Brian Coca (@bcoca) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set default locale to fr_FR.UTF-8 ansible.builtin.debconf: name: locales @@ -120,14 +121,39 @@ EXAMPLES = r''' value: "{{ site_passphrase }}" vtype: password no_log: True -''' +""" -RETURN = r'''#''' +RETURN = r"""#""" -from ansible.module_utils.common.text.converters import to_text +from ansible.module_utils.common.text.converters import to_text, to_native from ansible.module_utils.basic import AnsibleModule +def get_password_value(module, pkg, question, vtype): + getsel = module.get_bin_path('debconf-get-selections', True) + cmd = [getsel] + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg=f"Failed to get the value '{question}' from '{pkg}': {err}") + + for line in out.split("\n"): + if not line.startswith(pkg): + continue + + # line is a collection of tab separated values + fields = line.split('\t') + if len(fields) <= 3: + # No password found, return a blank password + return '' + try: + if fields[1] == question and fields[2] == vtype: + # If correct question and question type found, return password value + return fields[3] + except IndexError: + # Fail safe + return '' + + def get_selections(module, pkg): cmd = [module.get_bin_path('debconf-show', True), pkg] rc, out, err = module.run_command(' '.join(cmd)) @@ -150,11 +176,6 @@ def set_selection(module, pkg, question, vtype, value, unseen): if unseen: cmd.append('-u') - if vtype == 'boolean': - if value == 'True': - value = 'true' - elif value == 'False': - value = 'false' data = ' '.join([pkg, question, vtype, value]) return module.run_command(cmd, data=data) @@ -166,7 +187,7 @@ def main(): name=dict(type='str', required=True, aliases=['pkg']), question=dict(type='str', aliases=['selection', 'setting']), vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']), - value=dict(type='str', aliases=['answer']), + value=dict(type='raw', aliases=['answer']), unseen=dict(type='bool', default=False), ), required_together=(['question', 'vtype', 'value'],), @@ -189,23 +210,37 @@ def main(): if vtype is None or value is None: module.fail_json(msg="when supplying a question you must supply a valid vtype and value") + # ensure we compare booleans supplied to the way debconf sees them (true/false strings) + if vtype == 'boolean': + value = to_text(value).lower() + # if question doesn't exist, value cannot match if question not in prev: changed = True else: - existing = prev[question] - # ensure we compare booleans supplied to the way debconf sees them (true/false strings) if vtype == 'boolean': - value = to_text(value).lower() existing = to_text(prev[question]).lower() + elif vtype == 'password': + existing = get_password_value(module, pkg, question, vtype) + elif vtype == 'multiselect' and isinstance(value, list): + try: + value = sorted(value) + except TypeError as exc: + module.fail_json(msg="Invalid value provided for 'multiselect': %s" % to_native(exc)) + existing = sorted([i.strip() for i in existing.split(",")]) if value != existing: changed = True if changed: if not module.check_mode: + if vtype == 'multiselect' and isinstance(value, list): + try: + value = ", ".join(value) + except TypeError as exc: + module.fail_json(msg="Invalid value provided for 'multiselect': %s" % to_native(exc)) rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen) if rc: module.fail_json(msg=e) @@ -215,12 +250,12 @@ def main(): prev = {question: prev[question]} else: prev[question] = '' + + diff_dict = {} if module._diff: after = prev.copy() after.update(curr) diff_dict = {'before': prev, 'after': after} - else: - diff_dict = {} module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict) diff --git a/lib/ansible/modules/debug.py b/lib/ansible/modules/debug.py index 6e6301c8e6f..c90b1eea806 100644 --- a/lib/ansible/modules/debug.py +++ b/lib/ansible/modules/debug.py @@ -3,11 +3,10 @@ # Copyright: (c) 2012 Dag Wieers # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: debug short_description: Print statements during execution @@ -15,7 +14,7 @@ description: - This module prints statements during execution and can be useful for debugging variables or expressions without necessarily halting the playbook. -- Useful for debugging together with the 'when:' directive. +- Useful for debugging together with the C(when:) directive. - This module is also supported for Windows targets. version_added: '0.8' options: @@ -69,9 +68,9 @@ seealso: author: - Dag Wieers (@dagwieers) - Michael DeHaan -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Print the gateway for each host when defined ansible.builtin.debug: msg: System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }} @@ -96,4 +95,4 @@ EXAMPLES = r''' msg: - "Provisioning based on YOUR_KEY which is: {{ lookup('ansible.builtin.env', 'YOUR_KEY') }}" - "These servers were built using the password of '{{ password_used }}'. Please retain this for later use." -''' +""" diff --git a/lib/ansible/modules/dnf.py b/lib/ansible/modules/dnf.py index f2a968f22d6..7ab874a941f 100644 --- a/lib/ansible/modules/dnf.py +++ b/lib/ansible/modules/dnf.py @@ -6,11 +6,10 @@ # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: dnf version_added: 1.9 @@ -20,16 +19,22 @@ description: options: use_backend: description: - - By default, this module will select the backend based on the C(ansible_pkg_mgr) fact. + - Backend module to use. default: "auto" - choices: [ auto, dnf4, dnf5 ] + choices: + auto: Automatically select the backend based on the C(ansible_facts.pkg_mgr) fact. + yum: Alias for V(auto) (see Notes) + dnf: M(ansible.builtin.dnf) + yum4: Alias for V(dnf) + dnf4: Alias for V(dnf) + dnf5: M(ansible.builtin.dnf5) type: str version_added: 2.15 name: description: - "A package name or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. - You can also pass a url or a local path to a rpm file. + You can also pass a url or a local path to an rpm file. To operate on several packages this can accept a comma separated string of packages or a list of packages." - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0). Spaces around the operator are required. @@ -50,14 +55,14 @@ options: state: description: - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. - - Default is V(None), however in effect the default action is V(present) unless the O(autoremove) option is - enabled for this module, then V(absent) is inferred. + - Default is V(None), however in effect the default action is V(present) unless the O(autoremove=true), + then V(absent) is inferred. choices: ['absent', 'present', 'installed', 'removed', 'latest'] type: str enablerepo: description: - - I(Repoid) of repositories to enable for the install/update operation. + - C(Repoid) of repositories to enable for the install/update operation. These repos will not persist beyond the transaction. When specifying multiple repos, separate them with a ",". type: list @@ -66,9 +71,9 @@ options: disablerepo: description: - - I(Repoid) of repositories to disable for the install/update operation. + - C(Repoid) of repositories to disable for the install/update operation. These repos will not persist beyond the transaction. - When specifying multiple repos, separate them with a ",". + When specifying multiple repos, separate them with a C(,). type: list elements: str default: [] @@ -81,7 +86,7 @@ options: disable_gpg_check: description: - Whether to disable the GPG checking of signatures of packages being - installed. Has an effect only if O(state) is V(present) or V(latest). + installed. Has an effect only if O(state=present) or O(state=latest). - This setting affects packages installed from a repository as well as "local" packages installed from the filesystem or a URL. type: bool @@ -106,13 +111,13 @@ options: description: - If V(true), removes all "leaf" packages from the system that were originally installed as dependencies of user-installed packages but which are no longer - required by any such package. Should be used alone or when O(state) is V(absent) + required by any such package. Should be used alone or when O(state=absent). type: bool default: "no" version_added: "2.4" exclude: description: - - Package name(s) to exclude when state=present, or latest. This can be a + - Package name(s) to exclude when O(state=present), or latest. This can be a list or a comma separated string. version_added: "2.7" type: list @@ -121,14 +126,14 @@ options: skip_broken: description: - Skip all unavailable packages or packages with broken dependencies - without raising an error. Equivalent to passing the --skip-broken option. + without raising an error. Equivalent to passing the C(--skip-broken) option. type: bool default: "no" version_added: "2.7" update_cache: description: - Force dnf to check if cache is out of date and redownload if needed. - Has an effect only if O(state) is V(present) or V(latest). + Has an effect only if O(state=present) or O(state=latest). type: bool default: "no" aliases: [ expire-cache ] @@ -136,7 +141,7 @@ options: update_only: description: - When using latest, only update installed packages. Do not install packages. - - Has an effect only if O(state) is V(latest) + - Has an effect only if O(state=present) or O(state=latest). default: "no" type: bool version_added: "2.7" @@ -156,7 +161,7 @@ options: version_added: "2.7" enable_plugin: description: - - I(Plugin) name to enable for the install/update operation. + - C(Plugin) name to enable for the install/update operation. The enabled plugin will not persist beyond the transaction. version_added: "2.7" type: list @@ -164,7 +169,7 @@ options: default: [] disable_plugin: description: - - I(Plugin) name to disable for the install/update operation. + - C(Plugin) name to disable for the install/update operation. The disabled plugins will not persist beyond the transaction. version_added: "2.7" type: list @@ -174,13 +179,14 @@ options: description: - Disable the excludes defined in DNF config files. - If set to V(all), disables all excludes. - - If set to V(main), disable excludes defined in [main] in dnf.conf. + - If set to V(main), disable excludes defined in C([main]) in C(dnf.conf). - If set to V(repoid), disable excludes defined for given repo id. version_added: "2.7" type: str validate_certs: description: - - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to V(false), the SSL certificates will not be validated. + - This only applies if using a https url as the source of the rpm. For example, for localinstall. + If set to V(false), the SSL certificates will not be validated. - This should only set to V(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. type: bool default: "yes" @@ -196,7 +202,7 @@ options: description: - Specify if the named package and version is allowed to downgrade a maybe already installed higher version of that package. - Note that setting allow_downgrade=True can make this module + Note that setting O(allow_downgrade=true) can make this module behave in a non-idempotent way. The task could end up with a set of packages that does not match the complete list of specified packages to install (because dependencies between the downgraded @@ -207,8 +213,8 @@ options: version_added: "2.7" install_repoquery: description: - - This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature - parity/compatibility with the M(ansible.builtin.yum) module. + - This is effectively a no-op in DNF as it is not needed with DNF. + - This option is deprecated and will be removed in ansible-core 2.20. type: bool default: "yes" version_added: "2.7" @@ -239,18 +245,26 @@ options: version_added: "2.8" allowerasing: description: - - If V(true) it allows erasing of installed packages to resolve dependencies. + - If V(true) it allows erasing of installed packages to resolve dependencies. required: false type: bool default: "no" version_added: "2.10" nobest: description: - - Set best option to False, so that transactions are not limited to best candidates only. + - This is the opposite of the O(best) option kept for backwards compatibility. + - Since ansible-core 2.17 the default value is set by the operating system distribution. required: false type: bool - default: "no" version_added: "2.11" + best: + description: + - When set to V(true), either use a package with the highest version available or fail. + - When set to V(false), if the latest version cannot be installed go with the lower version. + - Default is set by the operating system distribution. + required: false + type: bool + version_added: "2.17" cacheonly: description: - Tells dnf to run entirely from system cache; does not download or update metadata. @@ -262,7 +276,7 @@ extends_documentation_fragment: - action_common_attributes.flow attributes: action: - details: In the case of dnf, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package). + details: dnf has 2 action plugins that use it under the hood, M(ansible.builtin.dnf) and M(ansible.builtin.package). support: partial async: support: none @@ -275,23 +289,26 @@ attributes: platform: platforms: rhel notes: - - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option. + - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option. - Group removal doesn't work if the group was installed with Ansible because upstream dnf's API doesn't properly mark groups as installed, therefore upon removal the module is unable to detect that the group is installed - (https://bugzilla.redhat.com/show_bug.cgi?id=1620324) + U(https://bugzilla.redhat.com/show_bug.cgi?id=1620324). + - While O(use_backend=yum) and the ability to call the action plugin as + M(ansible.builtin.yum) are provided for syntax compatibility, the YUM + backend was removed in ansible-core 2.17 because the required libraries are + not available for any supported version of Python. If you rely on this + functionality, use an older version of Ansible. requirements: - - "python >= 2.6" - - python-dnf - - for the autoremove option you need dnf >= 2.0.1" + - python3-dnf author: - Igor Gnatenko (@ignatenkobrain) - Cristian van Ee (@DJMuggs) - Berend De Schouwer (@berenddeschouwer) - Adam Miller (@maxamillion) -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Install the latest version of Apache ansible.builtin.dnf: name: httpd @@ -377,16 +394,13 @@ EXAMPLES = ''' ansible.builtin.dnf: name: '@postgresql/client' state: present -''' +""" import os -import re import sys from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.urls import fetch_file -from ansible.module_utils.six import text_type -from ansible.module_utils.compat.version import LooseVersion from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.locale import get_best_parsable_locale @@ -411,22 +425,8 @@ class DnfModule(YumDnf): super(DnfModule, self).__init__(module) self._ensure_dnf() - self.lockfile = "/var/cache/dnf/*_lock.pid" self.pkg_mgr_name = "dnf" - - try: - self.with_modules = dnf.base.WITH_MODULES - except AttributeError: - self.with_modules = False - - # DNF specific args that are not part of YumDnf - self.allowerasing = self.module.params['allowerasing'] - self.nobest = self.module.params['nobest'] - - def is_lockfile_pid_valid(self): - # FIXME? it looks like DNF takes care of invalid lock files itself? - # https://github.com/ansible/ansible/issues/57189 - return True + self.with_modules = dnf.base.WITH_MODULES def _sanitize_dnf_error_msg_install(self, spec, error): """ @@ -441,22 +441,6 @@ class DnfModule(YumDnf): return error - def _sanitize_dnf_error_msg_remove(self, spec, error): - """ - For unhandled dnf.exceptions.Error scenarios, there are certain error - messages we want to ignore in a removal scenario as known benign - failures. Do that here. - """ - if ( - 'no package matched' in to_native(error) or - 'No match for argument:' in to_native(error) - ): - return (False, "{0} is not installed".format(spec)) - - # Return value is tuple of: - # ("Is this actually a failure?", "Error Message") - return (True, error) - def _package_dict(self, package): """Return a dictionary of information for the package.""" # NOTE: This no longer contains the 'dnfstate' field because it is @@ -469,7 +453,7 @@ class DnfModule(YumDnf): 'version': package.version, 'repo': package.repoid} - # envra format for alignment with the yum module + # envra format for backwards compat result['envra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(**result) # keep nevra key for backwards compat as it was previously @@ -483,94 +467,6 @@ class DnfModule(YumDnf): return result - def _split_package_arch(self, packagename): - # This list was auto generated on a Fedora 28 system with the following one-liner - # printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n' - redhat_rpm_arches = [ - "aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha", - "alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel", - "armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon", - "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el", - "mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6", - "noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64", - "ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries", - "riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v", - "sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64" - ] - - name, delimiter, arch = packagename.rpartition('.') - if name and arch and arch in redhat_rpm_arches: - return name, arch - return packagename, None - - def _packagename_dict(self, packagename): - """ - Return a dictionary of information for a package name string or None - if the package name doesn't contain at least all NVR elements - """ - - if packagename[-4:] == '.rpm': - packagename = packagename[:-4] - - rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)') - try: - arch = None - nevr, arch = self._split_package_arch(packagename) - if arch: - packagename = nevr - rpm_nevr_match = rpm_nevr_re.match(packagename) - if rpm_nevr_match: - name, epoch, version, release = rpm_nevr_re.match(packagename).groups() - if not version or not version.split('.')[0].isdigit(): - return None - else: - return None - except AttributeError as e: - self.module.fail_json( - msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)), - rc=1, - results=[] - ) - - if not epoch: - epoch = "0" - - if ':' in name: - epoch_name = name.split(":") - - epoch = epoch_name[0] - name = ''.join(epoch_name[1:]) - - result = { - 'name': name, - 'epoch': epoch, - 'release': release, - 'version': version, - } - - return result - - # Original implementation from yum.rpmUtils.miscutils (GPLv2+) - # http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py - def _compare_evr(self, e1, v1, r1, e2, v2, r2): - # return 1: a is newer than b - # 0: a and b are the same version - # -1: b is newer than a - if e1 is None: - e1 = '0' - else: - e1 = str(e1) - v1 = str(v1) - r1 = str(r1) - if e2 is None: - e2 = '0' - else: - e2 = str(e2) - v2 = str(v2) - r2 = str(r2) - rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2)) - return rc - def _ensure_dnf(self): locale = get_best_parsable_locale(self.module) os.environ['LC_ALL'] = os.environ['LC_MESSAGES'] = locale @@ -579,9 +475,9 @@ class DnfModule(YumDnf): global dnf try: import dnf - import dnf.cli import dnf.const import dnf.exceptions + import dnf.package import dnf.subject import dnf.util HAS_DNF = True @@ -593,7 +489,6 @@ class DnfModule(YumDnf): system_interpreters = ['/usr/libexec/platform-python', '/usr/bin/python3', - '/usr/bin/python2', '/usr/bin/python'] if not has_respawned(): @@ -608,7 +503,7 @@ class DnfModule(YumDnf): # done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed) self.module.fail_json( msg="Could not import the dnf python module using {0} ({1}). " - "Please install `python3-dnf` or `python2-dnf` package or ensure you have specified the " + "Please install `python3-dnf` package or ensure you have specified the " "correct ansible_python_interpreter. (attempted {2})" .format(sys.executable, sys.version.replace('\n', ''), system_interpreters), results=[] @@ -685,13 +580,20 @@ class DnfModule(YumDnf): # setting this to an empty string instead of None appears to mimic the DNF CLI behavior conf.substitutions['releasever'] = '' + # Honor installroot for dnf directories + # This will also perform variable substitutions in the paths + for opt in ('cachedir', 'logdir', 'persistdir'): + conf.prepend_installroot(opt) + # Set skip_broken (in dnf this is strict=0) if self.skip_broken: conf.strict = 0 - # Set best - if self.nobest: - conf.best = 0 + # best and nobest are mutually exclusive + if self.nobest is not None: + conf.best = not self.nobest + elif self.best is not None: + conf.best = self.best if self.download_only: conf.downloadonly = True @@ -724,26 +626,23 @@ class DnfModule(YumDnf): for repo in repos.get_matching(repo_pattern): repo.enable() + for repo in base.repos.iter_enabled(): + if self.disable_gpg_check: + repo.gpgcheck = False + repo.repo_gpgcheck = False + def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, sslverify): """Return a fully configured dnf Base object.""" base = dnf.Base() self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify) - try: - # this method has been supported in dnf-4.2.17-6 or later - # https://bugzilla.redhat.com/show_bug.cgi?id=1788212 - base.setup_loggers() - except AttributeError: - pass - try: - base.init_plugins(set(self.disable_plugin), set(self.enable_plugin)) - base.pre_configure_plugins() - except AttributeError: - pass # older versions of dnf didn't require this and don't have these methods + + base.setup_loggers() + base.init_plugins(set(self.disable_plugin), set(self.enable_plugin)) + base.pre_configure_plugins() + self._specify_repositories(base, disablerepo, enablerepo) - try: - base.configure_plugins() - except AttributeError: - pass # older versions of dnf didn't require this and don't have these methods + + base.configure_plugins() try: if self.update_cache: @@ -809,48 +708,40 @@ class DnfModule(YumDnf): self.module.exit_json(msg="", results=results) def _is_installed(self, pkg): - installed = self.base.sack.query().installed() - - package_spec = {} - name, arch = self._split_package_arch(pkg) - if arch: - package_spec['arch'] = arch - - package_details = self._packagename_dict(pkg) - if package_details: - package_details['epoch'] = int(package_details['epoch']) - package_spec.update(package_details) - else: - package_spec['name'] = name - - return bool(installed.filter(**package_spec)) - - def _is_newer_version_installed(self, pkg_name): - candidate_pkg = self._packagename_dict(pkg_name) - if not candidate_pkg: - # The user didn't provide a versioned rpm, so version checking is - # not required - return False - - installed = self.base.sack.query().installed() - installed_pkg = installed.filter(name=candidate_pkg['name']).run() - if installed_pkg: - installed_pkg = installed_pkg[0] - - # this looks weird but one is a dict and the other is a dnf.Package - evr_cmp = self._compare_evr( - installed_pkg.epoch, installed_pkg.version, installed_pkg.release, - candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'], + installed_query = dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).installed() + if dnf.util.is_glob_pattern(pkg): + available_query = dnf.subject.Subject(pkg).get_best_query(sack=self.base.sack).available() + return not ( + {p.name for p in available_query} - {p.name for p in installed_query} ) - - return evr_cmp == 1 else: + return bool(installed_query) + + def _is_newer_version_installed(self, pkg_spec): + try: + if isinstance(pkg_spec, dnf.package.Package): + installed = sorted(self.base.sack.query().installed().filter(name=pkg_spec.name, arch=pkg_spec.arch))[-1] + return installed.evr_gt(pkg_spec) + else: + available = dnf.subject.Subject(pkg_spec).get_best_query(sack=self.base.sack).available() + installed = self.base.sack.query().installed().filter(name=available[0].name) + for arch in sorted(set(p.arch for p in installed)): # select only from already-installed arches for this case + installed_pkg = sorted(installed.filter(arch=arch))[-1] + try: + available_pkg = sorted(available.filter(arch=arch))[-1] + except IndexError: + continue # nothing currently available for this arch; keep going + if installed_pkg.evr_gt(available_pkg): + return True + return False + except IndexError: return False def _mark_package_install(self, pkg_spec, upgrade=False): """Mark the package for install.""" is_newer_version_installed = self._is_newer_version_installed(pkg_spec) is_installed = self._is_installed(pkg_spec) + msg = '' try: if is_newer_version_installed: if self.allow_downgrade: @@ -884,49 +775,34 @@ class DnfModule(YumDnf): pass else: # Case 7, The package is not installed, simply install it self.base.install(pkg_spec, strict=self.base.conf.strict) - - return {'failed': False, 'msg': '', 'failure': '', 'rc': 0} - except dnf.exceptions.MarkingError as e: + msg = "No package {0} available.".format(pkg_spec) + if self.base.conf.strict: + return { + 'failed': True, + 'msg': msg, + 'failure': " ".join((pkg_spec, to_native(e))), + 'rc': 1, + "results": [] + } + except dnf.exceptions.DepsolveError as e: return { 'failed': True, - 'msg': "No package {0} available.".format(pkg_spec), + 'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec), 'failure': " ".join((pkg_spec, to_native(e))), 'rc': 1, "results": [] } - - except dnf.exceptions.DepsolveError as e: + except dnf.exceptions.Error as e: return { 'failed': True, - 'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec), + 'msg': "Unknown Error occurred for package {0}.".format(pkg_spec), 'failure': " ".join((pkg_spec, to_native(e))), 'rc': 1, "results": [] } - except dnf.exceptions.Error as e: - if to_text("already installed") in to_text(e): - return {'failed': False, 'msg': '', 'failure': ''} - else: - return { - 'failed': True, - 'msg': "Unknown Error occurred for package {0}.".format(pkg_spec), - 'failure': " ".join((pkg_spec, to_native(e))), - 'rc': 1, - "results": [] - } - - def _whatprovides(self, filepath): - self.base.read_all_repos() - available = self.base.sack.query().available() - # Search in file - files_filter = available.filter(file=filepath) - # And Search in provides - pkg_spec = files_filter.union(available.filter(provides=filepath)).run() - - if pkg_spec: - return pkg_spec[0].name + return {'failed': False, 'msg': msg, 'failure': '', 'rc': 0} def _parse_spec_group_file(self): pkg_specs, grp_specs, module_specs, filenames = [], [], [], [] @@ -939,11 +815,13 @@ class DnfModule(YumDnf): elif name.endswith(".rpm"): filenames.append(name) elif name.startswith('/'): - # like "dnf install /usr/bin/vi" - pkg_spec = self._whatprovides(name) - if pkg_spec: - pkg_specs.append(pkg_spec) - continue + # dnf install /usr/bin/vi + installed = self.base.sack.query().filter(provides=name, file=name).installed().run() + if installed: + pkg_specs.append(installed[0].name) # should be only one? + elif not self.update_only: + # not installed, pass the filename for dnf to process + pkg_specs.append(name) elif name.startswith("@") or ('/' in name): if not already_loaded_comps: self.base.read_comps() @@ -966,12 +844,14 @@ class DnfModule(YumDnf): def _update_only(self, pkgs): not_installed = [] for pkg in pkgs: - if self._is_installed(pkg): + if self._is_installed( + self._package_dict(pkg)["nevra"] if isinstance(pkg, dnf.package.Package) else pkg + ): try: - if isinstance(to_text(pkg), text_type): - self.base.upgrade(pkg) - else: + if isinstance(pkg, dnf.package.Package): self.base.package_upgrade(pkg) + else: + self.base.upgrade(pkg) except Exception as e: self.module.fail_json( msg="Error occurred attempting update_only operation: {0}".format(to_native(e)), @@ -984,36 +864,20 @@ class DnfModule(YumDnf): return not_installed def _install_remote_rpms(self, filenames): - if int(dnf.__version__.split(".")[0]) >= 2: - pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True)) - else: - pkgs = [] - try: - for filename in filenames: - pkgs.append(self.base.add_remote_rpm(filename)) - except IOError as e: - if to_text("Can not load RPM file") in to_text(e): - self.module.fail_json( - msg="Error occurred attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)), - results=[], - rc=1, - ) - if self.update_only: - self._update_only(pkgs) - else: - for pkg in pkgs: - try: - if self._is_newer_version_installed(self._package_dict(pkg)['nevra']): - if self.allow_downgrade: - self.base.package_install(pkg, strict=self.base.conf.strict) - else: + try: + pkgs = self.base.add_remote_rpms(filenames) + if self.update_only: + self._update_only(pkgs) + else: + for pkg in pkgs: + if not (self._is_newer_version_installed(pkg) and not self.allow_downgrade): self.base.package_install(pkg, strict=self.base.conf.strict) - except Exception as e: - self.module.fail_json( - msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)), - results=[], - rc=1, - ) + except Exception as e: + self.module.fail_json( + msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)), + results=[], + rc=1, + ) def _is_module_installed(self, module_spec): if self.with_modules: @@ -1030,7 +894,7 @@ class DnfModule(YumDnf): else: return True # No stream provided, but module found - return False # seems like a sane default + return False # seems like a logical default def ensure(self): @@ -1199,13 +1063,6 @@ class DnfModule(YumDnf): response['results'].append("Packages providing %s not installed due to update_only specified" % spec) else: for pkg_spec in pkg_specs: - # Previously we forced base.conf.best=True here. - # However in 2.11+ there is a self.nobest option, so defer to that. - # Note, however, that just because nobest isn't set, doesn't mean that - # base.conf.best is actually true. We only force it false in - # _configure_base(), we never set it to true, and it can default to false. - # Thus, we still need to explicitly set it here. - self.base.conf.best = not self.nobest install_result = self._mark_package_install(pkg_spec, upgrade=True) if install_result['failed']: if install_result['msg']: @@ -1241,14 +1098,6 @@ class DnfModule(YumDnf): except dnf.exceptions.CompsError: # Group is already uninstalled. pass - except AttributeError: - # Group either isn't installed or wasn't marked installed at install time - # because of DNF bug - # - # This is necessary until the upstream dnf API bug is fixed where installing - # a group via the dnf API doesn't actually mark the group as installed - # https://bugzilla.redhat.com/show_bug.cgi?id=1620324 - pass for environment in environments: try: @@ -1257,25 +1106,11 @@ class DnfModule(YumDnf): # Environment is already uninstalled. pass - installed = self.base.sack.query().installed() for pkg_spec in pkg_specs: - # short-circuit installed check for wildcard matching - if '*' in pkg_spec: - try: - self.base.remove(pkg_spec) - except dnf.exceptions.MarkingError as e: - is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e)) - if is_failure: - failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e))) - else: - response['results'].append(handled_remove_error) - continue - - installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query( - sack=self.base.sack).installed().run() - - for pkg in installed_pkg: - self.base.remove(str(pkg)) + try: + self.base.remove(pkg_spec) + except dnf.exceptions.MarkingError as e: + response['results'].append(f"{e.value}: {pkg_spec}") # Like the dnf CLI we want to allow recursive removal of dependent # packages @@ -1329,10 +1164,8 @@ class DnfModule(YumDnf): self.base.download_packages(self.base.transaction.install_set) except dnf.exceptions.DownloadError as e: - self.module.fail_json( - msg="Failed to download packages: {0}".format(to_text(e)), - results=[], - ) + failure_response['msg'] = "Failed to download packages: {0}".format(to_native(e)) + self.module.fail_json(**failure_response) # Validate GPG. This is NOT done in dnf.Base (it's done in the # upstream CLI subclass of dnf.Base) @@ -1373,33 +1206,10 @@ class DnfModule(YumDnf): failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e)) self.module.fail_json(**failure_response) except dnf.exceptions.Error as e: - if to_text("already installed") in to_text(e): - response['changed'] = False - response['results'].append("Package already installed: {0}".format(to_native(e))) - self.module.exit_json(**response) - else: - failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e)) - self.module.fail_json(**failure_response) + failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e)) + self.module.fail_json(**failure_response) def run(self): - """The main function.""" - - # Check if autoremove is called correctly - if self.autoremove: - if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'): - self.module.fail_json( - msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__, - results=[], - ) - - # Check if download_dir is called correctly - if self.download_dir: - if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'): - self.module.fail_json( - msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__, - results=[], - ) - if self.update_cache and not self.names and not self.list: self.base = self._base( self.conf_file, self.disable_gpg_check, self.disablerepo, @@ -1439,8 +1249,10 @@ class DnfModule(YumDnf): if self.with_modules: self.module_base = dnf.module.module_base.ModuleBase(self.base) - - self.ensure() + try: + self.ensure() + finally: + self.base.close() def main(): @@ -1455,11 +1267,7 @@ def main(): # list=repos # list=pkgspec - # Extend yumdnf_argument_spec with dnf-specific features that will never be - # backported to yum because yum is now in "maintenance mode" upstream - yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool') - yumdnf_argument_spec['argument_spec']['nobest'] = dict(default=False, type='bool') - yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'dnf4', 'dnf5']) + yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'dnf', 'yum', 'yum4', 'dnf4', 'dnf5']) module = AnsibleModule( **yumdnf_argument_spec diff --git a/lib/ansible/modules/dnf5.py b/lib/ansible/modules/dnf5.py index bf87cd4c0ce..df4ee206748 100644 --- a/lib/ansible/modules/dnf5.py +++ b/lib/ansible/modules/dnf5.py @@ -2,9 +2,8 @@ # Copyright 2023 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = """ module: dnf5 @@ -18,8 +17,8 @@ options: name: description: - "A package name or package specifier with version, like C(name-1.0). - When using state=latest, this can be '*' which means run: dnf -y update. - You can also pass a url or a local path to a rpm file. + When using O(state=latest), this can be C(*) which means run: C(dnf -y update). + You can also pass a url or a local path to an rpm file. To operate on several packages this can accept a comma separated string of packages or a list of packages." - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0). Spaces around the operator are required. @@ -38,15 +37,15 @@ options: state: description: - Whether to install (V(present), V(latest)), or remove (V(absent)) a package. - - Default is V(None), however in effect the default action is V(present) unless the V(autoremove) option is - enabled for this module, then V(absent) is inferred. + - Default is V(None), however in effect the default action is V(present) unless the O(autoremove=true), + then V(absent) is inferred. choices: ['absent', 'present', 'installed', 'removed', 'latest'] type: str enablerepo: description: - I(Repoid) of repositories to enable for the install/update operation. These repos will not persist beyond the transaction. - When specifying multiple repos, separate them with a ",". + When specifying multiple repos, separate them with a C(,). type: list elements: str default: [] @@ -54,7 +53,7 @@ options: description: - I(Repoid) of repositories to disable for the install/update operation. These repos will not persist beyond the transaction. - When specifying multiple repos, separate them with a ",". + When specifying multiple repos, separate them with a C(,). type: list elements: str default: [] @@ -85,12 +84,12 @@ options: description: - If V(true), removes all "leaf" packages from the system that were originally installed as dependencies of user-installed packages but which are no longer - required by any such package. Should be used alone or when O(state) is V(absent) + required by any such package. Should be used alone or when O(state=absent). type: bool default: "no" exclude: description: - - Package name(s) to exclude when state=present, or latest. This can be a + - Package name(s) to exclude when O(state=present) or O(state=latest). This can be a list or a comma separated string. type: list elements: str @@ -98,20 +97,20 @@ options: skip_broken: description: - Skip all unavailable packages or packages with broken dependencies - without raising an error. Equivalent to passing the --skip-broken option. + without raising an error. Equivalent to passing the C(--skip-broken) option. type: bool default: "no" update_cache: description: - Force dnf to check if cache is out of date and redownload if needed. - Has an effect only if O(state) is V(present) or V(latest). + Has an effect only if O(state=present) or O(state=latest). type: bool default: "no" aliases: [ expire-cache ] update_only: description: - When using latest, only update installed packages. Do not install packages. - - Has an effect only if O(state) is V(latest) + - Has an effect only if O(state=present) or O(state=latest). default: "no" type: bool security: @@ -128,17 +127,19 @@ options: type: bool enable_plugin: description: - - This is currently a no-op as dnf5 itself does not implement this feature. - I(Plugin) name to enable for the install/update operation. The enabled plugin will not persist beyond the transaction. + - O(disable_plugin) takes precedence in case a plugin is listed in both O(enable_plugin) and O(disable_plugin). + - Requires python3-libdnf5 5.2.0.0+. type: list elements: str default: [] disable_plugin: description: - - This is currently a no-op as dnf5 itself does not implement this feature. - I(Plugin) name to disable for the install/update operation. The disabled plugins will not persist beyond the transaction. + - O(disable_plugin) takes precedence in case a plugin is listed in both O(enable_plugin) and O(disable_plugin). + - Requires python3-libdnf5 5.2.0.0+. type: list default: [] elements: str @@ -146,13 +147,13 @@ options: description: - Disable the excludes defined in DNF config files. - If set to V(all), disables all excludes. - - If set to V(main), disable excludes defined in [main] in dnf.conf. + - If set to V(main), disable excludes defined in C([main]) in C(dnf.conf). - If set to V(repoid), disable excludes defined for given repo id. type: str validate_certs: description: - This is effectively a no-op in the dnf5 module as dnf5 itself handles downloading a https url as the source of the rpm, - but is an accepted parameter for feature parity/compatibility with the M(ansible.builtin.yum) module. + but is an accepted parameter for feature parity/compatibility with the M(ansible.builtin.dnf) module. type: bool default: "yes" sslverify: @@ -165,7 +166,7 @@ options: description: - Specify if the named package and version is allowed to downgrade a maybe already installed higher version of that package. - Note that setting allow_downgrade=True can make this module + Note that setting O(allow_downgrade=true) can make this module behave in a non-idempotent way. The task could end up with a set of packages that does not match the complete list of specified packages to install (because dependencies between the downgraded @@ -175,8 +176,8 @@ options: default: "no" install_repoquery: description: - - This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature - parity/compatibility with the M(ansible.builtin.yum) module. + - This is effectively a no-op in DNF as it is not needed with DNF. + - This option is deprecated and will be removed in ansible-core 2.20. type: bool default: "yes" download_only: @@ -209,10 +210,18 @@ options: default: "no" nobest: description: - - Set best option to False, so that transactions are not limited to best candidates only. + - This is the opposite of the O(best) option kept for backwards compatibility. + - Since ansible-core 2.17 the default value is set by the operating system distribution. required: false type: bool - default: "no" + best: + description: + - When set to V(true), either use a package with the highest version available or fail. + - When set to V(false), if the latest version cannot be installed go with the lower version. + - Default is set by the operating system distribution. + required: false + type: bool + version_added: "2.17" cacheonly: description: - Tells dnf to run entirely from system cache; does not download or update metadata. @@ -223,7 +232,7 @@ extends_documentation_fragment: - action_common_attributes.flow attributes: action: - details: In the case of dnf, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package). + details: dnf5 has 2 action plugins that use it under the hood, M(ansible.builtin.dnf) and M(ansible.builtin.package). support: partial async: support: none @@ -236,7 +245,6 @@ attributes: platform: platforms: rhel requirements: - - "python3" - "python3-libdnf5" version_added: 2.15 """ @@ -350,30 +358,67 @@ libdnf5 = None def is_installed(base, spec): settings = libdnf5.base.ResolveSpecSettings() - query = libdnf5.rpm.PackageQuery(base) - query.filter_installed() - match, nevra = query.resolve_pkg_spec(spec, settings, True) - return match + installed_query = libdnf5.rpm.PackageQuery(base) + installed_query.filter_installed() + match, nevra = installed_query.resolve_pkg_spec(spec, settings, True) + + # FIXME use `is_glob_pattern` function when available: + # https://github.com/rpm-software-management/dnf5/issues/1563 + glob_patterns = set("*[?") + if any(set(char) & glob_patterns for char in spec): + available_query = libdnf5.rpm.PackageQuery(base) + available_query.filter_available() + available_query.resolve_pkg_spec(spec, settings, True) + + return not ( + {p.get_name() for p in available_query} - {p.get_name() for p in installed_query} + ) + else: + return match def is_newer_version_installed(base, spec): + # FIXME investigate whether this function can be replaced by dnf5's allow_downgrade option + if "/" in spec: + spec = spec.split("/")[-1] + if spec.endswith(".rpm"): + spec = spec[:-4] + try: spec_nevra = next(iter(libdnf5.rpm.Nevra.parse(spec))) - except RuntimeError: + except (RuntimeError, StopIteration): return False - spec_name = spec_nevra.get_name() - v = spec_nevra.get_version() - r = spec_nevra.get_release() - if not v or not r: + + spec_version = spec_nevra.get_version() + if not spec_version: return False - spec_evr = "{}:{}-{}".format(spec_nevra.get_epoch() or "0", v, r) - query = libdnf5.rpm.PackageQuery(base) - query.filter_installed() - query.filter_name([spec_name]) - query.filter_evr([spec_evr], libdnf5.common.QueryCmp_GT) + installed = libdnf5.rpm.PackageQuery(base) + installed.filter_installed() + installed.filter_name([spec_nevra.get_name()]) + installed.filter_latest_evr() + try: + installed_package = list(installed)[-1] + except IndexError: + return False - return query.size() > 0 + target = libdnf5.rpm.PackageQuery(base) + target.filter_name([spec_nevra.get_name()]) + target.filter_version([spec_version]) + spec_release = spec_nevra.get_release() + if spec_release: + target.filter_release([spec_release]) + spec_epoch = spec_nevra.get_epoch() + if spec_epoch: + target.filter_epoch([spec_epoch]) + target.filter_latest_evr() + try: + target_package = list(target)[-1] + except IndexError: + return False + + # FIXME https://github.com/rpm-software-management/dnf5/issues/1104 + return libdnf5.rpm.rpmvercmp(installed_package.get_evr(), target_package.get_evr()) == 1 def package_to_dict(package): @@ -394,8 +439,7 @@ def get_unneeded_pkgs(base): query = libdnf5.rpm.PackageQuery(base) query.filter_installed() query.filter_unneeded() - for pkg in query: - yield pkg + yield from query class Dnf5Module(YumDnf): @@ -403,13 +447,30 @@ class Dnf5Module(YumDnf): super(Dnf5Module, self).__init__(module) self._ensure_dnf() - # FIXME https://github.com/rpm-software-management/dnf5/issues/402 - self.lockfile = "" self.pkg_mgr_name = "dnf5" - # DNF specific args that are not part of YumDnf - self.allowerasing = self.module.params["allowerasing"] - self.nobest = self.module.params["nobest"] + def fail_on_non_existing_plugins(self, base): + # https://github.com/rpm-software-management/dnf5/issues/1460 + try: + plugin_names = [p.get_name() for p in base.get_plugins_info()] + except AttributeError: + # plugins functionality requires python3-libdnf5 5.2.0.0+ + # silently ignore here, the module will fail later when + # base.enable_disable_plugins is attempted to be used if + # user specifies enable_plugin/disable_plugin + return + + msg = [] + if enable_unmatched := set(self.enable_plugin).difference(plugin_names): + msg.append( + f"No matches were found for the following plugin name patterns while enabling libdnf5 plugins: {', '.join(enable_unmatched)}." + ) + if disable_unmatched := set(self.disable_plugin).difference(plugin_names): + msg.append( + f"No matches were found for the following plugin name patterns while disabling libdnf5 plugins: {', '.join(disable_unmatched)}." + ) + if msg: + self.module.fail_json(msg=" ".join(msg)) def _ensure_dnf(self): locale = get_best_parsable_locale(self.module) @@ -429,7 +490,6 @@ class Dnf5Module(YumDnf): system_interpreters = [ "/usr/libexec/platform-python", "/usr/bin/python3", - "/usr/bin/python2", "/usr/bin/python", ] @@ -452,17 +512,7 @@ class Dnf5Module(YumDnf): failures=[], ) - def is_lockfile_pid_valid(self): - # FIXME https://github.com/rpm-software-management/dnf5/issues/402 - return True - def run(self): - if sys.version_info.major < 3: - self.module.fail_json( - msg="The dnf5 module requires Python 3.", - failures=[], - rc=1, - ) if not self.list and not self.download_only and os.geteuid() != 0: self.module.fail_json( msg="This command has to be run under the root user.", @@ -470,13 +520,6 @@ class Dnf5Module(YumDnf): rc=1, ) - if self.enable_plugin or self.disable_plugin: - self.module.fail_json( - msg="enable_plugin and disable_plugin options are not yet implemented in DNF5", - failures=[], - rc=1, - ) - base = libdnf5.base.Base() conf = base.get_config() @@ -484,7 +527,7 @@ class Dnf5Module(YumDnf): conf.config_file_path = self.conf_file try: - base.load_config_from_file() + base.load_config() except RuntimeError as e: self.module.fail_json( msg=str(e), @@ -503,7 +546,11 @@ class Dnf5Module(YumDnf): self.disable_excludes = "*" conf.disable_excludes = self.disable_excludes conf.skip_broken = self.skip_broken - conf.best = not self.nobest + # best and nobest are mutually exclusive + if self.nobest is not None: + conf.best = not self.nobest + elif self.best is not None: + conf.best = self.best conf.install_weak_deps = self.install_weak_deps conf.gpgcheck = not self.disable_gpg_check conf.localpkg_gpgcheck = not self.disable_gpg_check @@ -515,12 +562,28 @@ class Dnf5Module(YumDnf): if self.download_dir: conf.destdir = self.download_dir + if self.enable_plugin: + try: + base.enable_disable_plugins(self.enable_plugin, True) + except AttributeError: + self.module.fail_json(msg="'enable_plugin' requires python3-libdnf5 5.2.0.0+") + + if self.disable_plugin: + try: + base.enable_disable_plugins(self.disable_plugin, False) + except AttributeError: + self.module.fail_json(msg="'disable_plugin' requires python3-libdnf5 5.2.0.0+") + base.setup() + # https://github.com/rpm-software-management/dnf5/issues/1460 + self.fail_on_non_existing_plugins(base) + log_router = base.get_logger() global_logger = libdnf5.logger.GlobalLogger() global_logger.set(log_router.get(), libdnf5.logger.Logger.Level_DEBUG) - logger = libdnf5.logger.create_file_logger(base) + # FIXME hardcoding the filename does not seem right, should libdnf5 expose the default file name? + logger = libdnf5.logger.create_file_logger(base, "dnf5.log") log_router.add_logger(logger) if self.update_cache: @@ -545,7 +608,11 @@ class Dnf5Module(YumDnf): for repo in repo_query: repo.enable() - sack.update_and_load_enabled_repos(True) + try: + sack.load_repos() + except AttributeError: + # dnf5 < 5.2.0.0 + sack.update_and_load_enabled_repos(True) if self.update_cache and not self.names and not self.list: self.module.exit_json( @@ -577,7 +644,11 @@ class Dnf5Module(YumDnf): self.module.exit_json(msg="", results=results, rc=0) settings = libdnf5.base.GoalJobSettings() - settings.group_with_name = True + try: + settings.set_group_with_name(True) + except AttributeError: + # dnf5 < 5.2.0.0 + settings.group_with_name = True if self.bugfix or self.security: advisory_query = libdnf5.advisory.AdvisoryQuery(base) types = [] @@ -592,18 +663,12 @@ class Dnf5Module(YumDnf): results = [] if self.names == ["*"] and self.state == "latest": goal.add_rpm_upgrade(settings) - elif self.state in {"install", "present", "latest"}: + elif self.state in {"installed", "present", "latest"}: upgrade = self.state == "latest" for spec in self.names: if is_newer_version_installed(base, spec): if self.allow_downgrade: - if upgrade: - if is_installed(base, spec): - goal.add_upgrade(spec, settings) - else: - goal.add_install(spec, settings) - else: - goal.add_install(spec, settings) + goal.add_install(spec, settings) elif is_installed(base, spec): if upgrade: goal.add_upgrade(spec, settings) @@ -631,7 +696,7 @@ class Dnf5Module(YumDnf): if transaction.get_problems(): failures = [] for log_event in transaction.get_resolve_logs(): - if log_event.get_problem() == libdnf5.base.GoalProblem_NOT_FOUND and self.state in {"install", "present", "latest"}: + if log_event.get_problem() == libdnf5.base.GoalProblem_NOT_FOUND and self.state in {"installed", "present", "latest"}: # NOTE dnf module compat failures.append("No package {} available.".format(log_event.get_spec())) else: @@ -697,10 +762,6 @@ class Dnf5Module(YumDnf): def main(): - # Extend yumdnf_argument_spec with dnf-specific features that will never be - # backported to yum because yum is now in "maintenance mode" upstream - yumdnf_argument_spec["argument_spec"]["allowerasing"] = dict(default=False, type="bool") - yumdnf_argument_spec["argument_spec"]["nobest"] = dict(default=False, type="bool") Dnf5Module(AnsibleModule(**yumdnf_argument_spec)).run() diff --git a/lib/ansible/modules/dpkg_selections.py b/lib/ansible/modules/dpkg_selections.py index 71226f01b06..31841306d86 100644 --- a/lib/ansible/modules/dpkg_selections.py +++ b/lib/ansible/modules/dpkg_selections.py @@ -3,16 +3,15 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: dpkg_selections short_description: Dpkg package selection selections description: - - Change dpkg package selection state via --get-selections and --set-selections. + - Change dpkg package selection state via C(--get-selections) and C(--set-selections). version_added: "2.0" author: - Brian Brazil (@brian-brazil) @@ -40,8 +39,8 @@ attributes: platforms: debian notes: - This module will not cause any packages to be installed/removed/purged, use the M(ansible.builtin.apt) module for that. -''' -EXAMPLES = ''' +""" +EXAMPLES = """ - name: Prevent python from being upgraded ansible.builtin.dpkg_selections: name: python @@ -51,9 +50,10 @@ EXAMPLES = ''' ansible.builtin.dpkg_selections: name: python selection: install -''' +""" from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.locale import get_best_parsable_locale def main(): @@ -67,12 +67,18 @@ def main(): dpkg = module.get_bin_path('dpkg', True) + locale = get_best_parsable_locale(module) + DPKG_ENV = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) + module.run_command_environ_update = DPKG_ENV + name = module.params['name'] selection = module.params['selection'] # Get current settings. rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True) - if not out: + if 'no packages found matching' in err: + module.fail_json(msg="Failed to find package '%s' to perform selection '%s'." % (name, selection)) + elif not out: current = 'not present' else: current = out.split()[1] diff --git a/lib/ansible/modules/expect.py b/lib/ansible/modules/expect.py index 921aa70a447..90ece7d76f3 100644 --- a/lib/ansible/modules/expect.py +++ b/lib/ansible/modules/expect.py @@ -3,11 +3,10 @@ # (c) 2015, Matt Martz # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: expect version_added: '2.0' @@ -38,12 +37,14 @@ options: responses: type: dict description: - - Mapping of expected string/regex and string to respond with. If the - response is a list, successive matches return successive - responses. List functionality is new in 2.1. + - Mapping of prompt regular expressions and corresponding answer(s). + - Each key in O(responses) is a Python regex U(https://docs.python.org/3/library/re.html#regular-expression-syntax). + - The value of each key is a string or list of strings. + If the value is a string and the prompt is encountered multiple times, the answer will be repeated. + Provide the value as a list to give different answers for successive matches. required: true timeout: - type: int + type: raw description: - Amount of time in seconds to wait for the expected strings. Use V(null) to disable timeout. @@ -69,15 +70,10 @@ notes: - If you want to run a command through the shell (say you are using C(<), C(>), C(|), and so on), you must specify a shell in the command such as C(/bin/bash -c "/path/to/something | grep else"). - - The question, or key, under O(responses) is a python regex match. Case - insensitive searches are indicated with a prefix of C(?i). + - Case insensitive searches are indicated with a prefix of C((?i)). - The C(pexpect) library used by this module operates with a search window of 2000 bytes, and does not use a multiline regex match. To perform a - start of line bound match, use a pattern like ``(?m)^pattern`` - - By default, if a question is encountered multiple times, its string - response will be repeated. If you need different responses for successive - question matches, instead of a string response, use a list of strings as - the response. The list functionality is new in 2.1. + start of line bound match, use a pattern like C((?m)^pattern). - The M(ansible.builtin.expect) module is designed for simple scenarios. For more complex needs, consider the use of expect code with the M(ansible.builtin.shell) or M(ansible.builtin.script) modules. (An example is part of the M(ansible.builtin.shell) module documentation). @@ -87,9 +83,9 @@ seealso: - module: ansible.builtin.script - module: ansible.builtin.shell author: "Matt Martz (@sivel)" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Case insensitive password string match ansible.builtin.expect: command: passwd username @@ -98,15 +94,29 @@ EXAMPLES = r''' # you don't want to show passwords in your logs no_log: true -- name: Generic question with multiple different responses +- name: Match multiple regular expressions and demonstrate individual and repeated responses ansible.builtin.expect: command: /path/to/custom/command responses: Question: + # give a unique response for each of the 3 hypothetical prompts matched - response1 - response2 - response3 -''' + # give the same response for every matching prompt + "^Match another prompt$": "response" + +- name: Multiple questions with responses + ansible.builtin.expect: + command: /path/to/custom/command + responses: + "Please provide your name": + - "Anna" + "Database user": + - "{{ db_username }}" + "Database password": + - "{{ db_password }}" +""" import datetime import os @@ -122,6 +132,7 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils.common.validation import check_type_int def response_closure(module, question, responses): @@ -147,7 +158,7 @@ def main(): creates=dict(type='path'), removes=dict(type='path'), responses=dict(type='dict', required=True), - timeout=dict(type='int', default=30), + timeout=dict(type='raw', default=30), echo=dict(type='bool', default=False), ) ) @@ -162,6 +173,11 @@ def main(): removes = module.params['removes'] responses = module.params['responses'] timeout = module.params['timeout'] + if timeout is not None: + try: + timeout = check_type_int(timeout) + except TypeError as te: + module.fail_json(msg=f"argument 'timeout' is of type {type(timeout)} and we were unable to convert to int: {te}") echo = module.params['echo'] events = dict() diff --git a/lib/ansible/modules/fail.py b/lib/ansible/modules/fail.py index 8d3fa153c02..7e68c77070f 100644 --- a/lib/ansible/modules/fail.py +++ b/lib/ansible/modules/fail.py @@ -3,11 +3,10 @@ # Copyright: (c) 2012, Dag Wieers # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: fail short_description: Fail with custom message @@ -53,11 +52,11 @@ seealso: - module: ansible.builtin.meta author: - Dag Wieers (@dagwieers) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Example using fail and when together ansible.builtin.fail: msg: The system may not be provisioned according to the CMDB status. when: cmdb_status != "to-be-staged" -''' +""" diff --git a/lib/ansible/modules/fetch.py b/lib/ansible/modules/fetch.py index 77ebd190409..5886a82ce8c 100644 --- a/lib/ansible/modules/fetch.py +++ b/lib/ansible/modules/fetch.py @@ -5,11 +5,10 @@ # This is a virtual module that is entirely implemented as an action plugin and runs on the controller -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: fetch short_description: Fetch files from remote nodes @@ -29,8 +28,8 @@ options: dest: description: - A directory to save the file into. - - For example, if the O(dest) directory is C(/backup) a O(src) file named C(/etc/profile) on host - C(host.example.com), would be saved into C(/backup/host.example.com/etc/profile). + - For example, if O(dest=/backup), then O(src=/etc/profile) on host + C(host.example.com), would save the file into C(/backup/host.example.com/etc/profile). The host name is based on the inventory name. required: yes fail_on_missing: @@ -96,9 +95,9 @@ seealso: author: - Ansible Core Team - Michael DeHaan -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Store file into /tmp/fetched/host.example.com/tmp/somefile ansible.builtin.fetch: src: /tmp/somefile @@ -121,4 +120,4 @@ EXAMPLES = r''' src: /tmp/uniquefile dest: special/prefix-{{ inventory_hostname }} flat: yes -''' +""" diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py index 0aa91838efc..b79eca58881 100644 --- a/lib/ansible/modules/file.py +++ b/lib/ansible/modules/file.py @@ -4,11 +4,10 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: file version_added: historical @@ -64,9 +63,9 @@ options: force: description: - > - Force the creation of the symlinks in two cases: the source file does + Force the creation of the links in two cases: if the link type is symbolic and the source file does not exist (but will appear later); the destination exists and is a file (so, we need to unlink the - O(path) file and create symlink to the O(src) file in place of it). + O(path) file and create a link to the O(src) file in place of it). type: bool default: no follow: @@ -74,6 +73,8 @@ options: - This flag indicates that filesystem links, if they exist, should be followed. - O(follow=yes) and O(state=link) can modify O(src) when combined with parameters such as O(mode). - Previous to Ansible 2.5, this was V(false) by default. + - While creating a symlink with a non-existent destination, set O(follow=false) to avoid a warning message related to permission issues. + The warning message is added to notify the user that we can not set permissions to the non-existent destination. type: bool default: yes version_added: '1.8' @@ -122,9 +123,9 @@ attributes: author: - Ansible Core Team - Michael DeHaan -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Change file ownership, group and permissions ansible.builtin.file: path: /etc/foo.conf @@ -213,8 +214,8 @@ EXAMPLES = r''' path: /etc/foo state: absent -''' -RETURN = r''' +""" +RETURN = r""" dest: description: Destination file/path, equal to the value passed to O(path). returned: O(state=touch), O(state=hard), O(state=link) @@ -225,12 +226,11 @@ path: returned: O(state=absent), O(state=directory), O(state=file) type: str sample: /path/to/file.txt -''' +""" import errno import os import shutil -import sys import time from pwd import getpwnam, getpwuid @@ -238,38 +238,13 @@ from grp import getgrnam, getgrgid from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes, to_native - +from ansible.module_utils.common.sentinel import Sentinel # There will only be a single AnsibleModule object per module module = None -class AnsibleModuleError(Exception): - def __init__(self, results): - self.results = results - - def __repr__(self): - return 'AnsibleModuleError(results={0})'.format(self.results) - - -class ParameterError(AnsibleModuleError): - pass - - -class Sentinel(object): - def __new__(cls, *args, **kwargs): - return cls - - -def _ansible_excepthook(exc_type, exc_value, tb): - # Using an exception allows us to catch it if the calling code knows it can recover - if issubclass(exc_type, AnsibleModuleError): - module.fail_json(**exc_value.results) - else: - sys.__excepthook__(exc_type, exc_value, tb) - - -def additional_parameter_handling(params): +def additional_parameter_handling(module): """Additional parameter validation and reformatting""" # When path is a directory, rewrite the pathname to be the file inside of the directory # TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch? @@ -281,6 +256,7 @@ def additional_parameter_handling(params): # if state == file: place inside of the directory (use _original_basename) # if state == link: place inside of the directory (use _original_basename. Fallback to src?) # if state == hard: place inside of the directory (use _original_basename. Fallback to src?) + params = module.params if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))): basename = None @@ -306,17 +282,21 @@ def additional_parameter_handling(params): # make sure the target path is a directory when we're doing a recursive operation if params['recurse'] and params['state'] != 'directory': - raise ParameterError(results={"msg": "recurse option requires state to be 'directory'", - "path": params["path"]}) + module.fail_json( + msg="recurse option requires state to be 'directory'", + path=params["path"] + ) # Fail if 'src' but no 'state' is specified if params['src'] and params['state'] not in ('link', 'hard'): - raise ParameterError(results={'msg': "src option requires state to be 'link' or 'hard'", - 'path': params['path']}) + module.fail_json( + msg="src option requires state to be 'link' or 'hard'", + path=params['path'] + ) def get_state(path): - ''' Find out current state ''' + """ Find out current state """ b_path = to_bytes(path, errors='surrogate_or_strict') try: @@ -376,8 +356,8 @@ def recursive_set_attributes(b_path, follow, file_args, mtime, atime): except RuntimeError as e: # on Python3 "RecursionError" is raised which is derived from "RuntimeError" # TODO once this function is moved into the common file utilities, this should probably raise more general exception - raise AnsibleModuleError( - results={'msg': "Could not recursively set attributes on %s. Original error was: '%s'" % (to_native(b_path), to_native(e))} + module.fail_json( + msg=f"Could not recursively set attributes on {to_native(b_path)}. Original error was: '{to_native(e)}'" ) return changed @@ -418,17 +398,17 @@ def initial_diff(path, state, prev_state): def get_timestamp_for_time(formatted_time, time_format): if formatted_time == 'preserve': return None - elif formatted_time == 'now': + if formatted_time == 'now': return Sentinel - else: - try: - struct = time.strptime(formatted_time, time_format) - struct_time = time.mktime(struct) - except (ValueError, OverflowError) as e: - raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s' - % (formatted_time, time_format, to_native(e, nonstring='simplerepr'))}) + try: + struct = time.strptime(formatted_time, time_format) + struct_time = time.mktime(struct) + except (ValueError, OverflowError) as e: + module.fail_json( + msg=f"Error while obtaining timestamp for time {formatted_time} using format {time_format}: {to_native(e, nonstring='simplerepr')}", + ) - return struct_time + return struct_time def update_timestamp_for_file(path, mtime, atime, diff=None): @@ -485,18 +465,19 @@ def update_timestamp_for_file(path, mtime, atime, diff=None): diff['before']['atime'] = previous_atime diff['after']['atime'] = atime except OSError as e: - raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s' - % to_native(e, nonstring='simplerepr'), 'path': path}) + module.fail_json( + msg=f"Error while updating modification or access time: {to_native(e, nonstring='simplerepr')}", + path=path + ) return True def keep_backward_compatibility_on_timestamps(parameter, state): if state in ['file', 'hard', 'directory', 'link'] and parameter is None: return 'preserve' - elif state == 'touch' and parameter is None: + if state == 'touch' and parameter is None: return 'now' - else: - return parameter + return parameter def execute_diff_peek(path): @@ -529,14 +510,18 @@ def ensure_absent(path): try: shutil.rmtree(b_path, ignore_errors=False) except Exception as e: - raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)}) + module.fail_json( + msg=f"rmtree failed: {to_native(e)}" + ) else: try: os.unlink(b_path) except OSError as e: if e.errno != errno.ENOENT: # It may already have been removed - raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e), - 'path': path}) + module.fail_json( + msg=f"unlinking failed: {to_native(e)}", + path=path + ) result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'}) else: @@ -565,9 +550,10 @@ def execute_touch(path, follow, timestamps): open(b_path, 'wb').close() changed = True except (OSError, IOError) as e: - raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s' - % to_native(e, nonstring='simplerepr'), - 'path': path}) + module.fail_json( + msg=f"Error, could not touch target: {to_native(e, nonstring='simplerepr')}", + path=path + ) # Update the attributes on the file diff = initial_diff(path, 'touch', prev_state) file_args = module.load_file_common_arguments(module.params) @@ -605,8 +591,11 @@ def ensure_file_attributes(path, follow, timestamps): if prev_state not in ('file', 'hard'): # file is not absent and any other state is a conflict - raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state), - 'path': path, 'state': prev_state}) + module.fail_json( + msg=f"file ({path}) is {prev_state}, cannot continue", + path=path, + state=prev_state + ) diff = initial_diff(path, 'file', prev_state) changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False) @@ -663,15 +652,18 @@ def ensure_directory(path, follow, recurse, timestamps): changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False) changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff) except Exception as e: - raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:' - ' %s' % (curpath, to_native(e)), - 'path': path}) + module.fail_json( + msg=f"There was an issue creating {curpath} as requested: {to_native(e)}", + path=path + ) return {'path': path, 'changed': changed, 'diff': diff} elif prev_state != 'directory': # We already know prev_state is not 'absent', therefore it exists in some form. - raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state), - 'path': path}) + module.fail_json( + msg=f"{path} already exists as a {prev_state}", + path=path + ) # # previous state == directory @@ -713,31 +705,39 @@ def ensure_symlink(path, src, follow, force, timestamps): b_absrc = to_bytes(absrc, errors='surrogate_or_strict') if not force and src is not None and not os.path.exists(b_absrc): - raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you' - ' really want to create the link: %s' % absrc, - 'path': path, 'src': src}) + module.fail_json( + msg="src file does not exist, use 'force=yes' if you" + f" really want to create the link: {absrc}", + path=path, + src=src + ) if prev_state == 'directory': if not force: - raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s' - % (prev_state, path), - 'path': path}) + module.fail_json( + msg=f'refusing to convert from {prev_state} to symlink for {path}', + path=path + ) elif os.listdir(b_path): # refuse to replace a directory that has files in it - raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to' - ' convert it' % path, - 'path': path}) + module.fail_json( + msg=f'the directory {path} is not empty, refusing to convert it', + path=path + ) elif prev_state in ('file', 'hard') and not force: - raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s' - % (prev_state, path), - 'path': path}) + module.fail_json( + msg=f'refusing to convert from {prev_state} to symlink for {path}', + path=path + ) diff = initial_diff(path, 'link', prev_state) changed = False if prev_state in ('hard', 'file', 'directory', 'absent'): if src is None: - raise AnsibleModuleError(results={'msg': 'src is required for creating new symlinks'}) + module.fail_json( + msg='src is required for creating new symlinks', + ) changed = True elif prev_state == 'link': if src is not None: @@ -747,7 +747,11 @@ def ensure_symlink(path, src, follow, force, timestamps): diff['after']['src'] = src changed = True else: - raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src}) + module.fail_json( + msg='unexpected position reached', + dest=path, + src=src + ) if changed and not module.check_mode: if prev_state != 'absent': @@ -763,16 +767,18 @@ def ensure_symlink(path, src, follow, force, timestamps): except OSError as e: if os.path.exists(b_tmppath): os.unlink(b_tmppath) - raise AnsibleModuleError(results={'msg': 'Error while replacing: %s' - % to_native(e, nonstring='simplerepr'), - 'path': path}) + module.fail_json( + msg=f"Error while replacing: {to_native(e, nonstring='simplerepr')}", + path=path + ) else: try: os.symlink(b_src, b_path) except OSError as e: - raise AnsibleModuleError(results={'msg': 'Error while linking: %s' - % to_native(e, nonstring='simplerepr'), - 'path': path}) + module.fail_json( + msg=f"Error while linking: {to_native(e, nonstring='simplerepr')}", + path=path + ) if module.check_mode and not os.path.exists(b_path): return {'dest': path, 'src': src, 'changed': changed, 'diff': diff} @@ -807,12 +813,18 @@ def ensure_hardlink(path, src, follow, force, timestamps): # src is the source of a hardlink. We require it if we are creating a new hardlink. # We require path in the argument_spec so we know it is present at this point. if prev_state != 'hard' and src is None: - raise AnsibleModuleError(results={'msg': 'src is required for creating new hardlinks'}) + module.fail_json( + msg='src is required for creating new hardlinks' + ) # Even if the link already exists, if src was specified it needs to exist. # The inode number will be compared to ensure the link has the correct target. if src is not None and not os.path.exists(b_src): - raise AnsibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src}) + module.fail_json( + msg='src does not exist', + dest=path, + src=src + ) diff = initial_diff(path, 'hard', prev_state) changed = False @@ -826,26 +838,39 @@ def ensure_hardlink(path, src, follow, force, timestamps): diff['after']['src'] = src changed = True elif prev_state == 'hard': - if src is not None and not os.stat(b_path).st_ino == os.stat(b_src).st_ino: + if src is not None and os.stat(b_path).st_ino != os.stat(b_src).st_ino: changed = True if not force: - raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination', - 'dest': path, 'src': src}) + module.fail_json( + msg='Cannot link, different hard link exists at destination', + dest=path, + src=src + ) elif prev_state == 'file': changed = True if not force: - raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state, - 'dest': path, 'src': src}) + module.fail_json( + msg=f'Cannot link, {prev_state} exists at destination', + dest=path, + src=src + ) elif prev_state == 'directory': changed = True if os.path.exists(b_path): if os.stat(b_path).st_ino == os.stat(b_src).st_ino: return {'path': path, 'changed': False} elif not force: - raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination', - 'dest': path, 'src': src}) + module.fail_json( + msg='Cannot link: different hard link exists at destination', + dest=path, + src=src + ) else: - raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src}) + module.fail_json( + msg='unexpected position reached', + dest=path, + src=src + ) if changed and not module.check_mode: if prev_state != 'absent': @@ -866,16 +891,20 @@ def ensure_hardlink(path, src, follow, force, timestamps): except OSError as e: if os.path.exists(b_tmppath): os.unlink(b_tmppath) - raise AnsibleModuleError(results={'msg': 'Error while replacing: %s' - % to_native(e, nonstring='simplerepr'), - 'path': path}) + module.fail_json( + msg=f"Error while replacing: {to_native(e, nonstring='simplerepr')}", + path=path + ) else: try: + if follow and os.path.islink(b_src): + b_src = os.readlink(b_src) os.link(b_src, b_path) except OSError as e: - raise AnsibleModuleError(results={'msg': 'Error while linking: %s' - % to_native(e, nonstring='simplerepr'), - 'path': path}) + module.fail_json( + msg=f"Error while linking: {to_native(e, nonstring='simplerepr')}", + path=path + ) if module.check_mode and not os.path.exists(b_path): return {'dest': path, 'src': src, 'changed': changed, 'diff': diff} @@ -937,9 +966,7 @@ def main(): supports_check_mode=True, ) - # When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule - sys.excepthook = _ansible_excepthook - additional_parameter_handling(module.params) + additional_parameter_handling(module) params = module.params state = params['state'] @@ -980,6 +1007,9 @@ def main(): elif state == 'absent': result = ensure_absent(path) + if not module._diff: + result.pop('diff', None) + module.exit_json(**result) diff --git a/lib/ansible/modules/find.py b/lib/ansible/modules/find.py index 9b3680546d4..8c2820c48e7 100644 --- a/lib/ansible/modules/find.py +++ b/lib/ansible/modules/find.py @@ -6,11 +6,10 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: find author: Brian Coca (@bcoca) @@ -30,6 +29,10 @@ options: - You can choose seconds, minutes, hours, days, or weeks by specifying the first letter of any of those words (e.g., "1w"). type: str + get_checksum: + default: false + checksum_algorithm: + version_added: "2.19" patterns: default: [] description: @@ -59,8 +62,8 @@ options: contains: description: - A regular expression or pattern which should be matched against the file content. - - If O(read_whole_file) is V(true) it matches against the beginning of the line (uses - V(re.match(\))). If O(read_whole_file) is V(false), it searches anywhere for that pattern + - If O(read_whole_file=false) it matches against the beginning of the line (uses + V(re.match(\))). If O(read_whole_file=true), it searches anywhere for that pattern (uses V(re.search(\))). - Works only when O(file_type) is V(file). type: str @@ -76,14 +79,15 @@ options: paths: description: - List of paths of directories to search. All paths must be fully qualified. + - From ansible-core 2.18 and onwards, the data type has changed from C(str) to C(path). type: list required: true aliases: [ name, path ] - elements: str + elements: path file_type: description: - Type of file to select. - - The 'link' and 'any' choices were added in Ansible 2.3. + - The V(link) and V(any) choices were added in Ansible 2.3. type: str choices: [ any, directory, file, link ] default: file @@ -111,14 +115,25 @@ options: - Set this to V(true) to include hidden files, otherwise they will be ignored. type: bool default: no - follow: + mode: description: - - Set this to V(true) to follow symlinks in path for systems with python 2.6+. + - Choose objects matching a specified permission. This value is + restricted to modes that can be applied using the python + C(os.chmod) function. + - The mode can be provided as an octal such as V("0644") or + as symbolic such as V(u=rw,g=r,o=r). + type: raw + version_added: '2.16' + exact_mode: + description: + - Restrict mode matching to exact matches only, and not as a + minimum set of permissions to match. type: bool - default: no - get_checksum: + default: true + version_added: '2.16' + follow: description: - - Set this to V(true) to retrieve a file's SHA1 checksum. + - Set this to V(true) to follow symlinks in path for systems with python 2.6+. type: bool default: no use_regex: @@ -130,11 +145,24 @@ options: depth: description: - Set the maximum number of levels to descend into. - - Setting recurse to V(false) will override this value, which is effectively depth 1. + - Setting O(recurse=false) will override this value, which is effectively depth 1. - Default is unlimited depth. type: int version_added: "2.6" -extends_documentation_fragment: action_common_attributes + encoding: + description: + - When doing a O(contains) search, determine the encoding of the files to be searched. + type: str + version_added: "2.17" + limit: + description: + - Limit the maximum number of matching paths returned. After finding this many, the find action will stop looking. + - Matches are made from the top, down (i.e. shallowest directory first). + - If not set, or set to v(null), it will do unlimited matches. + - Default is unlimited matches. + type: int + version_added: "2.18" +extends_documentation_fragment: [action_common_attributes, checksum_common] attributes: check_mode: details: since this action does not modify the target it just executes normally during check mode @@ -145,10 +173,10 @@ attributes: platforms: posix seealso: - module: ansible.windows.win_find -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Recursively find /tmp files older than 2 days ansible.builtin.find: paths: /tmp @@ -207,9 +235,19 @@ EXAMPLES = r''' - '^_[0-9]{2,4}_.*.log$' - '^[a-z]{1,5}_.*log$' -''' - -RETURN = r''' +- name: Find file containing "wally" without necessarily reading all files + ansible.builtin.find: + paths: /var/log + file_type: file + contains: wally + read_whole_file: true + patterns: "^.*\\.log$" + use_regex: true + recurse: true + limit: 1 +""" + +RETURN = r""" files: description: All matches found with the specified criteria (see stat module for full output of each dictionary) returned: success @@ -240,8 +278,9 @@ skipped_paths: type: dict sample: {"/laskdfj": "'/laskdfj' is not a directory"} version_added: '2.12' -''' +""" +import errno import fnmatch import grp import os @@ -252,10 +291,17 @@ import time from ansible.module_utils.common.text.converters import to_text, to_native from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import string_types + + +class _Object: + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) def pfilter(f, patterns=None, excludes=None, use_regex=False): - '''filter using glob patterns''' + """filter using glob patterns""" if not patterns and not excludes: return True @@ -294,7 +340,7 @@ def pfilter(f, patterns=None, excludes=None, use_regex=False): def agefilter(st, now, age, timestamp): - '''filter files older than age''' + """filter files older than age""" if age is None: return True elif age >= 0 and now - getattr(st, "st_%s" % timestamp) >= abs(age): @@ -305,7 +351,7 @@ def agefilter(st, now, age, timestamp): def sizefilter(st, size): - '''filter files greater than size''' + """filter files greater than size""" if size is None: return True elif size >= 0 and st.st_size >= abs(size): @@ -315,11 +361,12 @@ def sizefilter(st, size): return False -def contentfilter(fsname, pattern, read_whole_file=False): +def contentfilter(fsname, pattern, encoding, read_whole_file=False): """ Filter files which contain the given expression :arg fsname: Filename to scan for lines matching a pattern :arg pattern: Pattern to look for inside of line + :arg encoding: Encoding of the file to be scanned :arg read_whole_file: If true, the whole file is read into memory before the regex is applied against it. Otherwise, the regex is applied line-by-line. :rtype: bool :returns: True if one of the lines in fsname matches the pattern. Otherwise False @@ -330,7 +377,7 @@ def contentfilter(fsname, pattern, read_whole_file=False): prog = re.compile(pattern) try: - with open(fsname) as f: + with open(fsname, encoding=encoding) as f: if read_whole_file: return bool(prog.search(f.read())) @@ -338,12 +385,38 @@ def contentfilter(fsname, pattern, read_whole_file=False): if prog.match(line): return True + except LookupError as e: + raise e + except UnicodeDecodeError as e: + if encoding is None: + encoding = 'None (default determined by the Python built-in function "open")' + msg = f'Failed to read the file {fsname} due to an encoding error. current encoding: {encoding}' + raise Exception(msg) from e except Exception: pass return False +def mode_filter(st, mode, exact, module): + if not mode: + return True + + st_mode = stat.S_IMODE(st.st_mode) + + try: + mode = int(mode, 8) + except ValueError: + mode = module._symbolic_mode_to_octal(_Object(st_mode=0), mode) + + mode = stat.S_IMODE(mode) + + if exact: + return st_mode == mode + + return bool(st_mode & mode) + + def statinfo(st): pw_name = "" gr_name = "" @@ -392,14 +465,10 @@ def statinfo(st): } -def handle_walk_errors(e): - raise e - - def main(): module = AnsibleModule( argument_spec=dict( - paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'), + paths=dict(type='list', required=True, aliases=['name', 'path'], elements='path'), patterns=dict(type='list', default=[], aliases=['pattern'], elements='str'), excludes=dict(type='list', aliases=['exclude'], elements='str'), contains=dict(type='str'), @@ -412,14 +481,26 @@ def main(): hidden=dict(type='bool', default=False), follow=dict(type='bool', default=False), get_checksum=dict(type='bool', default=False), + checksum_algorithm=dict(type='str', default='sha1', + choices=['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'], + aliases=['checksum', 'checksum_algo']), use_regex=dict(type='bool', default=False), depth=dict(type='int'), + mode=dict(type='raw'), + exact_mode=dict(type='bool', default=True), + encoding=dict(type='str'), + limit=dict(type='int') ), supports_check_mode=True, ) params = module.params + if params['mode'] and not isinstance(params['mode'], string_types): + module.fail_json( + msg="argument 'mode' is not a string and conversion is not allowed, value is of type %s" % params['mode'].__class__.__name__ + ) + # Set the default match pattern to either a match-all glob or # regex depending on use_regex being set. This makes sure if you # set excludes: without a pattern pfilter gets something it can @@ -433,6 +514,12 @@ def main(): filelist = [] skipped = {} + def handle_walk_errors(e): + if e.errno in (errno.EPERM, errno.EACCES): + skipped[e.filename] = to_text(e) + return + raise e + if params['age'] is None: age = None else: @@ -455,17 +542,20 @@ def main(): else: module.fail_json(size=params['size'], msg="failed to process size") + if params['limit'] is not None and params['limit'] <= 0: + module.fail_json(msg="limit cannot be %d (use None for unlimited)" % params['limit']) + now = time.time() msg = 'All paths examined' looked = 0 has_warnings = False for npath in params['paths']: - npath = os.path.expanduser(os.path.expandvars(npath)) try: if not os.path.isdir(npath): raise Exception("'%s' is not a directory" % to_native(npath)) - for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow']): + # Setting `topdown=True` to explicitly guarantee matches are made from the shallowest directory first + for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow'], topdown=True): looked = looked + len(files) + len(dirs) for fsobj in (files + dirs): fsname = os.path.normpath(os.path.join(root, fsobj)) @@ -489,11 +579,13 @@ def main(): r = {'path': fsname} if params['file_type'] == 'any': - if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']): + if (pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and + agefilter(st, now, age, params['age_stamp']) and + mode_filter(st, params['mode'], params['exact_mode'], module)): r.update(statinfo(st)) if stat.S_ISREG(st.st_mode) and params['get_checksum']: - r['checksum'] = module.sha1(fsname) + r['checksum'] = module.digest_from_file(fsname, params['checksum_algorithm']) if stat.S_ISREG(st.st_mode): if sizefilter(st, size): @@ -502,28 +594,39 @@ def main(): filelist.append(r) elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory': - if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']): + if (pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and + agefilter(st, now, age, params['age_stamp']) and + mode_filter(st, params['mode'], params['exact_mode'], module)): r.update(statinfo(st)) filelist.append(r) elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file': - if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \ - agefilter(st, now, age, params['age_stamp']) and \ - sizefilter(st, size) and contentfilter(fsname, params['contains'], params['read_whole_file']): + if (pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and + agefilter(st, now, age, params['age_stamp']) and + sizefilter(st, size) and + contentfilter(fsname, params['contains'], params['encoding'], params['read_whole_file']) and + mode_filter(st, params['mode'], params['exact_mode'], module)): r.update(statinfo(st)) if params['get_checksum']: - r['checksum'] = module.sha1(fsname) + r['checksum'] = module.digest_from_file(fsname, params['checksum_algorithm']) filelist.append(r) elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link': - if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']): + if (pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and + agefilter(st, now, age, params['age_stamp']) and + mode_filter(st, params['mode'], params['exact_mode'], module)): r.update(statinfo(st)) filelist.append(r) - if not params['recurse']: + if len(filelist) == params["limit"]: + # Breaks out of directory files loop only + msg = "Limit of matches reached" + break + + if not params['recurse'] or len(filelist) == params["limit"]: break except Exception as e: skipped[npath] = to_text(e) diff --git a/lib/ansible/modules/gather_facts.py b/lib/ansible/modules/gather_facts.py index 123001b05ef..3d0275a0f6e 100644 --- a/lib/ansible/modules/gather_facts.py +++ b/lib/ansible/modules/gather_facts.py @@ -1,12 +1,11 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2017 Ansible Project +# Copyright: Contributors to the Ansible project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: gather_facts version_added: 2.8 @@ -28,6 +27,8 @@ options: - By default it will be true if more than one fact module is used. - For low cost/delay fact modules parallelism overhead might end up meaning the whole process takes longer. Test your specific case to see if it is a speed improvement or not. + - The C(ansible_facts_parallel) variable can be used to set this option, + overriding the default, but not the direct assignment of the option in the task. type: bool attributes: action: @@ -50,13 +51,13 @@ attributes: notes: - This is mostly a wrapper around other fact gathering modules. - Options passed into this action must be supported by all the underlying fact modules configured. - - If using C(gather_timeout) and parallel execution, it will limit the total execution time of - modules that do not accept C(gather_timeout) themselves. + - If using O(ignore:gather_timeout) and parallel execution, it will limit the total execution time of + modules that do not accept O(ignore:gather_timeout) themselves. - Facts returned by each module will be merged, conflicts will favor 'last merged'. Order is not guaranteed, when doing parallel gathering on multiple modules. author: - "Ansible Core Team" -''' +""" RETURN = """ # depends on the fact module called diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py index 146e6c2f5a6..52c812c0c61 100644 --- a/lib/ansible/modules/get_url.py +++ b/lib/ansible/modules/get_url.py @@ -3,18 +3,17 @@ # Copyright: (c) 2012, Jan-Piet Mens # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: get_url short_description: Downloads files from HTTP, HTTPS, or FTP to node description: - Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote server I(must) have direct access to the remote resource. - - By default, if an environment variable C(_proxy) is set on + - By default, if an environment variable E(_proxy) is set on the target host, requests will be sent through that proxy. This behaviour can be overridden by setting a variable for this task (see R(setting the environment,playbooks_environment)), @@ -28,23 +27,23 @@ version_added: '0.6' options: ciphers: description: - - SSL/TLS Ciphers to use for the request - - 'When a list is provided, all ciphers are joined in order with V(:)' + - SSL/TLS Ciphers to use for the request. + - 'When a list is provided, all ciphers are joined in order with C(:).' - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) for more details. - - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. type: list elements: str version_added: '2.14' decompress: description: - - Whether to attempt to decompress gzip content-encoded responses + - Whether to attempt to decompress gzip content-encoded responses. type: bool default: true version_added: '2.14' url: description: - - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path + - HTTP, HTTPS, or FTP URL in the form C((http|https|ftp)://[user[:pass]]@host.domain[:port]/path). type: str required: true dest: @@ -61,9 +60,9 @@ options: tmp_dest: description: - Absolute path of where temporary file is downloaded to. - - When run on Ansible 2.5 or greater, path defaults to ansible's remote_tmp setting + - When run on Ansible 2.5 or greater, path defaults to ansible's C(remote_tmp) setting. - When run on Ansible prior to 2.5, it defaults to E(TMPDIR), E(TEMP) or E(TMP) env variables or a platform specific value. - - U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir) + - U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir). type: path version_added: '2.1' force: @@ -88,18 +87,20 @@ options: - 'If a checksum is passed to this parameter, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. - Format: :, e.g. checksum="sha256:D98291AC[...]B6DC7B97", - checksum="sha256:http://example.com/path/sha256sum.txt"' + Format: :, for example C(checksum="sha256:D98291AC[...]B6DC7B97", + C(checksum="sha256:http://example.com/path/sha256sum.txt").' - If you worry about portability, only the sha1 algorithm is available on all platforms and python versions. - - The Python ``hashlib`` module is responsible for providing the available algorithms. + - The Python C(hashlib) module is responsible for providing the available algorithms. The choices vary based on Python version and OpenSSL version. - - On systems running in FIPS compliant mode, the ``md5`` algorithm may be unavailable. + - On systems running in FIPS compliant mode, the C(md5) algorithm may be unavailable. - Additionally, if a checksum is passed to this parameter, and the file exist under the O(dest) location, the C(destination_checksum) would be calculated, and if checksum equals C(destination_checksum), the file download would be skipped - (unless O(force) is V(true)). If the checksum does not equal C(destination_checksum), + (unless O(force=true)). If the checksum does not equal C(destination_checksum), the destination file is deleted. + - If the checksum URL requires username and password, O(url_username) and O(url_password) are used + to download the checksum file. type: str default: '' version_added: "2.0" @@ -186,16 +187,16 @@ options: authentication. - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed. - Credentials for GSSAPI can be specified with O(url_username)/O(url_password) or with the GSSAPI env var - C(KRB5CCNAME) that specified a custom Kerberos credential cache. + E(KRB5CCNAME) that specified a custom Kerberos credential cache. - NTLM authentication is I(not) supported even if the GSSAPI mech for NTLM has been installed. type: bool default: no version_added: '2.11' use_netrc: description: - - Determining whether to use credentials from ``~/.netrc`` file - - By default .netrc is used with Basic authentication headers - - When set to False, .netrc credentials are ignored + - Determining whether to use credentials from C(~/.netrc) file. + - By default C(.netrc) is used with Basic authentication headers. + - When V(false), C(.netrc) credentials are ignored. type: bool default: true version_added: '2.14' @@ -218,9 +219,9 @@ seealso: - module: ansible.windows.win_get_url author: - Jan-Piet Mens (@jpmens) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Download foo.conf ansible.builtin.get_url: url: http://example.com/path/file.conf @@ -261,7 +262,7 @@ EXAMPLES = r''' - name: Download file from a file path ansible.builtin.get_url: - url: file:///tmp/afile.txt + url: file:///tmp/a_file.txt dest: /tmp/afilecopy.txt - name: < Fetch file that requires authentication. @@ -271,9 +272,9 @@ EXAMPLES = r''' dest: /etc/foo.conf username: bar password: '{{ mysecret }}' -''' +""" -RETURN = r''' +RETURN = r""" backup_file: description: name of backup file created after download returned: changed and if backup=yes @@ -364,9 +365,9 @@ url: returned: always type: str sample: https://www.ansible.com/ -''' +""" -import datetime +import email.message import os import re import shutil @@ -375,6 +376,7 @@ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlsplit +from ansible.module_utils.compat.datetime import utcnow, utcfromtimestamp from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url, url_argument_spec @@ -397,10 +399,10 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head Return (tempfile, info about the request) """ - start = datetime.datetime.utcnow() + start = utcnow() rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method, unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc) - elapsed = (datetime.datetime.utcnow() - start).seconds + elapsed = (utcnow() - start).seconds if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''), status_code=info['status'], elapsed=elapsed) @@ -438,23 +440,16 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head def extract_filename_from_headers(headers): - """ - Extracts a filename from the given dict of HTTP headers. - - Looks for the content-disposition header and applies a regex. - Returns the filename if successful, else None.""" - cont_disp_regex = 'attachment; ?filename="?([^"]+)' - res = None - - if 'content-disposition' in headers: - cont_disp = headers['content-disposition'] - match = re.match(cont_disp_regex, cont_disp) - if match: - res = match.group(1) - # Try preventing any funny business. - res = os.path.basename(res) + """Extracts a filename from the given dict of HTTP headers. - return res + Returns the filename if successful, else None. + """ + msg = email.message.Message() + msg['content-disposition'] = headers.get('content-disposition', '') + if filename := msg.get_param('filename', header='content-disposition'): + # Avoid directory traversal + filename = os.path.basename(filename) + return filename def is_url(checksum): @@ -600,7 +595,7 @@ def main(): # If the file already exists, prepare the last modified time for the # request. mtime = os.path.getmtime(dest) - last_mod_time = datetime.datetime.utcfromtimestamp(mtime) + last_mod_time = utcfromtimestamp(mtime) # If the checksum does not match we have to force the download # because last_mod_time may be newer than on remote @@ -608,11 +603,11 @@ def main(): force = True # download to tmpsrc - start = datetime.datetime.utcnow() + start = utcnow() method = 'HEAD' if module.check_mode else 'GET' tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest, method, unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc) - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (utcnow() - start).seconds result['src'] = tmpsrc # Now the request has completed, we can finally generate the final @@ -662,6 +657,16 @@ def main(): result['checksum_src'] != result['checksum_dest']) module.exit_json(msg=info.get('msg', ''), **result) + # If a checksum was provided, ensure that the temporary file matches this checksum + # before moving it to the destination. + if checksum != '': + tmpsrc_checksum = module.digest_from_file(tmpsrc, algorithm) + + if checksum != tmpsrc_checksum: + os.remove(tmpsrc) + module.fail_json(msg=f"The checksum for {tmpsrc} did not match {checksum}; it was {tmpsrc_checksum}.", **result) + + # Copy temporary file to destination if necessary backup_file = None if result['checksum_src'] != result['checksum_dest']: try: @@ -680,13 +685,6 @@ def main(): if os.path.exists(tmpsrc): os.remove(tmpsrc) - if checksum != '': - destination_checksum = module.digest_from_file(dest, algorithm) - - if checksum != destination_checksum: - os.remove(dest) - module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum), **result) - # allow file attribute changes file_args = module.load_file_common_arguments(module.params, path=dest) result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed']) diff --git a/lib/ansible/modules/getent.py b/lib/ansible/modules/getent.py index 5487354bda5..1938af1fcfa 100644 --- a/lib/ansible/modules/getent.py +++ b/lib/ansible/modules/getent.py @@ -3,11 +3,10 @@ # Copyright: (c) 2014, Brian Coca # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: getent short_description: A wrapper to the unix getent utility @@ -59,9 +58,9 @@ notes: - Not all databases support enumeration, check system documentation for details. author: - Brian Coca (@bcoca) -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Get root user info ansible.builtin.getent: database: passwd @@ -98,9 +97,9 @@ EXAMPLES = ''' - ansible.builtin.debug: var: ansible_facts.getent_shadow -''' +""" -RETURN = ''' +RETURN = """ ansible_facts: description: Facts to add to ansible_facts. returned: always @@ -110,10 +109,10 @@ ansible_facts: description: - A list of results or a single result as a list of the fields the db provides - The list elements depend on the database queried, see getent man page for the structure - - Starting at 2.11 it now returns multiple duplicate entries, previouslly it only returned the last one + - Starting at 2.11 it now returns multiple duplicate entries, previously it only returned the last one returned: always type: list -''' +""" import traceback diff --git a/lib/ansible/modules/git.py b/lib/ansible/modules/git.py index 681708e6a62..14d26195461 100644 --- a/lib/ansible/modules/git.py +++ b/lib/ansible/modules/git.py @@ -3,11 +3,10 @@ # (c) 2012, Michael DeHaan # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: git author: @@ -43,19 +42,19 @@ options: default: "HEAD" accept_hostkey: description: - - Will ensure or not that "-o StrictHostKeyChecking=no" is present as an ssh option. + - Will ensure or not that C(-o StrictHostKeyChecking=no) is present as an ssh option. - Be aware that this disables a protection against MITM attacks. - - Those using OpenSSH >= 7.5 might want to set O(ssh_opts) to V(StrictHostKeyChecking=accept-new) + - Those using OpenSSH >= 7.5 might want to use O(accept_newhostkey) or set O(ssh_opts) to V(StrictHostKeyChecking=accept-new) instead, it does not remove the MITM issue but it does restrict it to the first attempt. type: bool default: 'no' version_added: "1.5" accept_newhostkey: description: - - As of OpenSSH 7.5, "-o StrictHostKeyChecking=accept-new" can be + - As of OpenSSH 7.5, C(-o StrictHostKeyChecking=accept-new) can be used which is safer and will only accepts host keys which are - not present or are the same. if V(true), ensure that - "-o StrictHostKeyChecking=accept-new" is present as an ssh option. + not present or are the same. If V(true), ensure that + C(-o StrictHostKeyChecking=accept-new) is present as an ssh option. type: bool default: 'no' version_added: "2.12" @@ -66,21 +65,21 @@ options: - For older versions it appends E(GIT_SSH_OPTS) (specific to this module) to the variables above or via a wrapper script. - Other options can add to this list, like O(key_file) and O(accept_hostkey). - - An example value could be "-o StrictHostKeyChecking=no" (although this particular + - An example value could be C(-o StrictHostKeyChecking=no) (although this particular option is better set by O(accept_hostkey)). - - The module ensures that 'BatchMode=yes' is always present to avoid prompts. + - The module ensures that C(BatchMode=yes) is always present to avoid prompts. type: str version_added: "1.5" key_file: description: - Specify an optional private key file path, on the target host, to use for the checkout. - - This ensures 'IdentitiesOnly=yes' is present in O(ssh_opts). + - This ensures C(IdentitiesOnly=yes) is present in O(ssh_opts). type: path version_added: "1.5" reference: description: - - Reference repository (see "git clone --reference ..."). + - Reference repository (see C(git clone --reference ...)). type: str version_added: "1.4" remote: @@ -166,7 +165,7 @@ options: track_submodules: description: - If V(true), submodules will track the latest commit on their - master branch (or other branch specified in .gitmodules). If + master branch (or other branch specified in C(.gitmodules)). If V(false), submodules will be kept at the revision specified by the main project. This is equivalent to specifying the C(--remote) flag to git submodule update. @@ -208,15 +207,18 @@ options: type: path version_added: "2.7" - gpg_whitelist: + gpg_allowlist: description: - A list of trusted GPG fingerprints to compare to the fingerprint of the GPG-signed commit. - Only used when O(verify_commit=yes). - Use of this feature requires Git 2.6+ due to its reliance on git's C(--raw) flag to C(verify-commit) and C(verify-tag). + - Alias O(gpg_allowlist) is added in version 2.17. + - Alias O(gpg_whitelist) is deprecated and will be removed in version 2.21. type: list elements: str default: [] + aliases: [ gpg_whitelist ] version_added: "2.9" requirements: @@ -233,69 +235,69 @@ notes: SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, one solution is to use the option accept_hostkey. Another solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts." -''' + the git module, with the following command: C(ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts)." +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Git checkout ansible.builtin.git: - repo: 'https://foosball.example.org/path/to/repo.git' - dest: /srv/checkout + repo: 'https://github.com/ansible/ansible.git' + dest: /tmp/checkout version: release-0.22 - name: Read-write git checkout from github ansible.builtin.git: - repo: git@github.com:mylogin/hello.git - dest: /home/mylogin/hello + repo: git@github.com:ansible/ansible.git + dest: /tmp/checkout - name: Just ensuring the repo checkout exists ansible.builtin.git: - repo: 'https://foosball.example.org/path/to/repo.git' - dest: /srv/checkout + repo: 'https://github.com/ansible/ansible.git' + dest: /tmp/checkout update: no - name: Just get information about the repository whether or not it has already been cloned locally ansible.builtin.git: - repo: 'https://foosball.example.org/path/to/repo.git' - dest: /srv/checkout + repo: git@github.com:ansible/ansible.git + dest: /tmp/checkout clone: no update: no - name: Checkout a github repo and use refspec to fetch all pull requests ansible.builtin.git: - repo: https://github.com/ansible/ansible-examples.git - dest: /src/ansible-examples + repo: 'https://github.com/ansible/ansible.git' + dest: /tmp/checkout refspec: '+refs/pull/*:refs/heads/*' - name: Create git archive from repo ansible.builtin.git: - repo: https://github.com/ansible/ansible-examples.git - dest: /src/ansible-examples - archive: /tmp/ansible-examples.zip + repo: git@github.com:ansible/ansible.git + dest: /tmp/checkout + archive: /tmp/ansible.zip - name: Clone a repo with separate git directory ansible.builtin.git: - repo: https://github.com/ansible/ansible-examples.git - dest: /src/ansible-examples - separate_git_dir: /src/ansible-examples.git + repo: 'https://github.com/ansible/ansible.git' + dest: /tmp/checkout + separate_git_dir: /tmp/repo - name: Example clone of a single branch ansible.builtin.git: - repo: https://github.com/ansible/ansible-examples.git - dest: /src/ansible-examples + repo: git@github.com:ansible/ansible.git + dest: /tmp/checkout single_branch: yes version: master - name: Avoid hanging when http(s) password is missing ansible.builtin.git: - repo: https://github.com/ansible/could-be-a-private-repo - dest: /src/from-private-repo + repo: 'https://github.com/ansible/ansible.git' + dest: /tmp/checkout environment: GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password # or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password -''' +""" -RETURN = ''' +RETURN = """ after: description: Last commit revision of the repository retrieved during the update. returned: success @@ -326,7 +328,7 @@ git_dir_before: returned: success type: str sample: /path/to/old/git/dir -''' +""" import filecmp import os @@ -364,16 +366,15 @@ def relocate_repo(module, result, repo_dir, old_repo_dir, worktree_dir): def head_splitter(headfile, remote, module=None, fail_on_error=False): - '''Extract the head reference''' + """Extract the head reference""" # https://github.com/ansible/ansible-modules-core/pull/907 res = None if os.path.exists(headfile): rawdata = None try: - f = open(headfile, 'r') - rawdata = f.readline() - f.close() + with open(headfile, 'r') as f: + rawdata = f.readline() except Exception: if fail_on_error and module: module.fail_json(msg="Unable to read %s" % headfile) @@ -427,11 +428,11 @@ def get_submodule_update_params(module, git_path, cwd): def write_ssh_wrapper(module): - ''' + """ This writes an shell wrapper for ssh options to be used with git this is only relevant for older versions of gitthat cannot handle the options themselves. Returns path to the script - ''' + """ try: # make sure we have full permission to the module_dir, which # may not be the case if we're sudo'ing to a non-root user @@ -464,10 +465,10 @@ def write_ssh_wrapper(module): def set_git_ssh_env(key_file, ssh_opts, git_version, module): - ''' + """ use environment variables to configure git's ssh execution, - which varies by version but this functino should handle all. - ''' + which varies by version but this function should handle all. + """ # initialise to existing ssh opts and/or append user provided if ssh_opts is None: @@ -498,7 +499,7 @@ def set_git_ssh_env(key_file, ssh_opts, git_version, module): # older than 2.3 does not know how to use git_ssh_command, # so we force it into get_ssh var # https://github.com/gitster/git/commit/09d60d785c68c8fa65094ecbe46fbc2a38d0fc1f - if git_version < LooseVersion('2.3.0'): + if git_version is not None and git_version < LooseVersion('2.3.0'): # for use in wrapper os.environ["GIT_SSH_OPTS"] = ssh_opts @@ -517,7 +518,7 @@ def set_git_ssh_env(key_file, ssh_opts, git_version, module): def get_version(module, git_path, dest, ref="HEAD"): - ''' samples the version of the git repo ''' + """ samples the version of the git repo """ cmd = "%s rev-parse %s" % (git_path, ref) rc, stdout, stderr = module.run_command(cmd, cwd=dest) @@ -568,8 +569,8 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'): def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch): - ''' makes a new git repo if it does not already exist ''' + reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_allowlist, single_branch): + """ makes a new git repo if it does not already exist """ dest_dirname = os.path.dirname(dest) try: os.makedirs(dest_dirname) @@ -635,7 +636,7 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, module.run_command(cmd, check_rc=True, cwd=dest) if verify_commit: - verify_commit_sign(git_path, module, dest, version, gpg_whitelist) + verify_commit_sign(git_path, module, dest, version, gpg_allowlist) def has_local_mods(module, git_path, dest, bare): @@ -651,17 +652,17 @@ def has_local_mods(module, git_path, dest, bare): def reset(git_path, module, dest): - ''' + """ Resets the index and working tree to HEAD. Discards any changes to tracked files in working tree since that commit. - ''' + """ cmd = "%s reset --hard HEAD" % (git_path,) return module.run_command(cmd, check_rc=True, cwd=dest) def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after): - ''' Return the difference between 2 versions ''' + """ Return the difference between 2 versions """ if before is None: return {'prepared': '>> Newly checked out %s' % after} elif before != after: @@ -815,13 +816,13 @@ def get_repo_path(dest, bare): def get_head_branch(git_path, module, dest, remote, bare=False): - ''' + """ Determine what branch HEAD is associated with. This is partly taken from lib/ansible/utils/__init__.py. It finds the correct path to .git/HEAD and reads from that file the branch that HEAD is associated with. In the case of a detached HEAD, this will look up the branch in .git/refs/remotes//HEAD. - ''' + """ try: repo_path = get_repo_path(dest, bare) except (IOError, ValueError) as err: @@ -843,7 +844,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): def get_remote_url(git_path, module, dest, remote): - '''Return URL of remote source for repo.''' + """Return URL of remote source for repo.""" command = [git_path, 'ls-remote', '--get-url', remote] (rc, out, err) = module.run_command(command, cwd=dest) if rc != 0: @@ -854,7 +855,7 @@ def get_remote_url(git_path, module, dest, remote): def set_remote_url(git_path, module, repo, dest, remote): - ''' updates repo from remote sources ''' + """ updates repo from remote sources """ # Return if remote URL isn't changing. remote_url = get_remote_url(git_path, module, dest, remote) if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo): @@ -872,7 +873,7 @@ def set_remote_url(git_path, module, repo, dest, remote): def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=False): - ''' updates repo from remote sources ''' + """ updates repo from remote sources """ set_remote_url(git_path, module, repo, dest, remote) commands = [] @@ -911,7 +912,7 @@ def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, g refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] else: # ensure all tags are fetched - if git_version_used >= LooseVersion('1.9'): + if git_version_used is not None and git_version_used >= LooseVersion('1.9'): fetch_cmd.append('--tags') else: # old git versions have a bug in --tags that prevents updating existing tags @@ -979,7 +980,7 @@ def submodules_fetch(git_path, module, remote, track_submodules, dest): def submodule_update(git_path, module, dest, track_submodules, force=False): - ''' init and update any submodules ''' + """ init and update any submodules """ # get the valid submodule params params = get_submodule_update_params(module, git_path, dest) @@ -1016,7 +1017,7 @@ def set_remote_branch(git_path, module, dest, remote, version, depth): module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc) -def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist): +def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_allowlist): cmd = '' if version == 'HEAD': branch = get_head_branch(git_path, module, dest, remote) @@ -1052,26 +1053,26 @@ def switch_version(git_path, module, dest, remote, version, verify_commit, depth stdout=out1, stderr=err1, rc=rc, cmd=cmd) if verify_commit: - verify_commit_sign(git_path, module, dest, version, gpg_whitelist) + verify_commit_sign(git_path, module, dest, version, gpg_allowlist) return (rc, out1, err1) -def verify_commit_sign(git_path, module, dest, version, gpg_whitelist): +def verify_commit_sign(git_path, module, dest, version, gpg_allowlist): if version in get_annotated_tags(git_path, module, dest): git_sub = "verify-tag" else: git_sub = "verify-commit" cmd = "%s %s %s" % (git_path, git_sub, version) - if gpg_whitelist: + if gpg_allowlist: cmd += " --raw" (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc) - if gpg_whitelist: + if gpg_allowlist: fingerprint = get_gpg_fingerprint(err) - if fingerprint not in gpg_whitelist: - module.fail_json(msg='The gpg_whitelist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc) + if fingerprint not in gpg_allowlist: + module.fail_json(msg='The gpg_allowlist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc) return (rc, out, err) @@ -1184,7 +1185,16 @@ def main(): clone=dict(default='yes', type='bool'), update=dict(default='yes', type='bool'), verify_commit=dict(default='no', type='bool'), - gpg_whitelist=dict(default=[], type='list', elements='str'), + gpg_allowlist=dict( + default=[], type='list', aliases=['gpg_whitelist'], elements='str', + deprecated_aliases=[ + dict( + name='gpg_whitelist', + version='2.21', + collection_name='ansible.builtin', + ) + ], + ), accept_hostkey=dict(default='no', type='bool'), accept_newhostkey=dict(default='no', type='bool'), key_file=dict(default=None, type='path', required=False), @@ -1215,7 +1225,7 @@ def main(): allow_clone = module.params['clone'] bare = module.params['bare'] verify_commit = module.params['verify_commit'] - gpg_whitelist = module.params['gpg_whitelist'] + gpg_allowlist = module.params['gpg_allowlist'] reference = module.params['reference'] single_branch = module.params['single_branch'] git_path = module.params['executable'] or module.get_bin_path('git', True) @@ -1264,7 +1274,7 @@ def main(): # We screenscrape a huge amount of git commands so use C locale anytime we # call run_command() locale = get_best_parsable_locale(module) - module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale) + module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) if separate_git_dir: separate_git_dir = os.path.realpath(separate_git_dir) @@ -1297,7 +1307,7 @@ def main(): # GIT_SSH= as an environment variable, might create sh wrapper script for older versions. set_git_ssh_env(key_file, ssh_opts, git_version_used, module) - if depth is not None and git_version_used < LooseVersion('1.9.1'): + if depth is not None and git_version_used is not None and git_version_used < LooseVersion('1.9.1'): module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.") depth = None @@ -1322,7 +1332,7 @@ def main(): module.exit_json(**result) # there's no git config, so clone clone(git_path, module, repo, dest, remote, depth, version, bare, reference, - refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch) + refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_allowlist, single_branch) elif not update: # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo @@ -1377,7 +1387,7 @@ def main(): # switch to version specified regardless of whether # we got new revisions from the repository if not bare: - switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist) + switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_allowlist) # Deal with submodules submodules_updated = False diff --git a/lib/ansible/modules/group.py b/lib/ansible/modules/group.py index f433a481fd6..a31b9f8c73a 100644 --- a/lib/ansible/modules/group.py +++ b/lib/ansible/modules/group.py @@ -3,11 +3,10 @@ # Copyright: (c) 2012, Stephen Fromm # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: group version_added: "0.0.2" @@ -38,7 +37,7 @@ options: force: description: - Whether to delete a group even if it is the primary group of a user. - - Only applicable on platforms which implement a --force flag on the group deletion command. + - Only applicable on platforms which implement a C(--force) flag on the group deletion command. type: bool default: false version_added: "2.15" @@ -63,6 +62,22 @@ options: type: bool default: no version_added: "2.8" + gid_min: + description: + - Sets the GID_MIN value for group creation. + - Overwrites /etc/login.defs default value. + - Currently supported on Linux. Does nothing when used with other platforms. + - Requires O(local) is omitted or V(False). + type: int + version_added: "2.18" + gid_max: + description: + - Sets the GID_MAX value for group creation. + - Overwrites /etc/login.defs default value. + - Currently supported on Linux. Does nothing when used with other platforms. + - Requires O(local) is omitted or V(False). + type: int + version_added: "2.18" extends_documentation_fragment: action_common_attributes attributes: check_mode: @@ -76,9 +91,9 @@ seealso: - module: ansible.windows.win_group author: - Stephen Fromm (@sfromm) -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Ensure group "somegroup" exists ansible.builtin.group: name: somegroup @@ -89,9 +104,9 @@ EXAMPLES = ''' name: docker state: present gid: 1750 -''' +""" -RETURN = r''' +RETURN = r""" gid: description: Group ID of the group. returned: When O(state) is C(present) @@ -112,7 +127,7 @@ system: returned: When O(state) is C(present) type: bool sample: False -''' +""" import grp import os @@ -152,6 +167,14 @@ class Group(object): self.system = module.params['system'] self.local = module.params['local'] self.non_unique = module.params['non_unique'] + self.gid_min = module.params['gid_min'] + self.gid_max = module.params['gid_max'] + + if self.local: + if self.gid_min is not None: + module.fail_json(msg="'gid_min' can not be used with 'local'") + if self.gid_max is not None: + module.fail_json(msg="'gid_max' can not be used with 'local'") def execute_command(self, cmd): return self.module.run_command(cmd) @@ -185,6 +208,12 @@ class Group(object): cmd.append('-o') elif key == 'system' and kwargs[key] is True: cmd.append('-r') + if self.gid_min is not None: + cmd.append('-K') + cmd.append('GID_MIN=' + str(self.gid_min)) + if self.gid_max is not None: + cmd.append('-K') + cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) @@ -227,14 +256,7 @@ class Group(object): if line.startswith(to_bytes(name_test)): exists = True break - - if not exists: - self.module.warn( - "'local: true' specified and group was not found in {file}. " - "The local group may already exist if the local group database exists somewhere other than {file}.".format(file=self.GROUPFILE)) - return exists - else: try: if grp.getgrnam(self.name): @@ -300,6 +322,12 @@ class SunOS(Group): cmd.append(str(kwargs[key])) if self.non_unique: cmd.append('-o') + if self.gid_min is not None: + cmd.append('-K') + cmd.append('GID_MIN=' + str(self.gid_min)) + if self.gid_max is not None: + cmd.append('-K') + cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) @@ -331,6 +359,12 @@ class AIX(Group): cmd.append('id=' + str(kwargs[key])) elif key == 'system' and kwargs[key] is True: cmd.append('-a') + if self.gid_min is not None: + cmd.append('-K') + cmd.append('GID_MIN=' + str(self.gid_min)) + if self.gid_max is not None: + cmd.append('-K') + cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) @@ -376,6 +410,12 @@ class FreeBsdGroup(Group): cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') + if self.gid_min is not None: + cmd.append('-K') + cmd.append('GID_MIN=' + str(self.gid_min)) + if self.gid_max is not None: + cmd.append('-K') + cmd.append('GID_MAX=' + str(self.gid_max)) return self.execute_command(cmd) def group_mod(self, **kwargs): @@ -500,6 +540,12 @@ class OpenBsdGroup(Group): cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') + if self.gid_min is not None: + cmd.append('-K') + cmd.append('GID_MIN=' + str(self.gid_min)) + if self.gid_max is not None: + cmd.append('-K') + cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) @@ -546,6 +592,12 @@ class NetBsdGroup(Group): cmd.append(str(self.gid)) if self.non_unique: cmd.append('-o') + if self.gid_min is not None: + cmd.append('-K') + cmd.append('GID_MIN=' + str(self.gid_min)) + if self.gid_max is not None: + cmd.append('-K') + cmd.append('GID_MAX=' + str(self.gid_max)) cmd.append(self.name) return self.execute_command(cmd) @@ -586,6 +638,14 @@ class BusyBoxGroup(Group): if self.system: cmd.append('-S') + if self.gid_min is not None: + cmd.append('-K') + cmd.append('GID_MIN=' + str(self.gid_min)) + + if self.gid_max is not None: + cmd.append('-K') + cmd.append('GID_MAX=' + str(self.gid_max)) + cmd.append(self.name) return self.execute_command(cmd) @@ -634,6 +694,8 @@ def main(): system=dict(type='bool', default=False), local=dict(type='bool', default=False), non_unique=dict(type='bool', default=False), + gid_min=dict(type='int'), + gid_max=dict(type='int'), ), supports_check_mode=True, required_if=[ diff --git a/lib/ansible/modules/group_by.py b/lib/ansible/modules/group_by.py index 0d1e0c8e884..5fc7b690af4 100644 --- a/lib/ansible/modules/group_by.py +++ b/lib/ansible/modules/group_by.py @@ -4,11 +4,10 @@ # Copyright: Ansible Team # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: group_by short_description: Create Ansible groups based on facts @@ -66,9 +65,9 @@ seealso: - module: ansible.builtin.add_host author: - Jeroen Hoekx (@jhoekx) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create groups based on the machine architecture ansible.builtin.group_by: key: machine_{{ ansible_machine }} @@ -86,4 +85,4 @@ EXAMPLES = r''' - name: Add all active hosts to a static group ansible.builtin.group_by: key: done -''' +""" diff --git a/lib/ansible/modules/hostname.py b/lib/ansible/modules/hostname.py index 4a1c7ead661..79f9bcb0709 100644 --- a/lib/ansible/modules/hostname.py +++ b/lib/ansible/modules/hostname.py @@ -4,11 +4,10 @@ # Copyright: (c) 2013, Hiroaki Nakamura # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: hostname author: @@ -37,7 +36,7 @@ options: description: - Which strategy to use to update the hostname. - If not set we try to autodetect, but this can be problematic, particularly with containers as they can present misleading information. - - Note that 'systemd' should be specified for RHEL/EL/CentOS 7+. Older distributions should use 'redhat'. + - Note that V(systemd) should be specified for RHEL/EL/CentOS 7+. Older distributions should use V(redhat). choices: ['alpine', 'debian', 'freebsd', 'generic', 'macos', 'macosx', 'darwin', 'openbsd', 'openrc', 'redhat', 'sles', 'solaris', 'systemd'] type: str version_added: '2.9' @@ -53,9 +52,9 @@ attributes: support: full platform: platforms: posix -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Set a hostname ansible.builtin.hostname: name: web01 @@ -64,7 +63,7 @@ EXAMPLES = ''' ansible.builtin.hostname: name: web01 use: systemd -''' +""" import os import platform @@ -82,7 +81,6 @@ from ansible.module_utils.common.sys_info import get_platform_subclass from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector from ansible.module_utils.facts.utils import get_file_lines, get_file_content from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.six import PY3, text_type STRATS = { 'alpine': 'Alpine', @@ -518,7 +516,7 @@ class DarwinStrategy(BaseStrategy): However, macOS also has LocalHostName and ComputerName settings. LocalHostName controls the Bonjour/ZeroConf name, used by services like AirDrop. This class implements a method, _scrub_hostname(), that mimics - the transformations macOS makes on hostnames when enterened in the Sharing + the transformations macOS makes on hostnames when entered in the Sharing preference pane. It replaces spaces with dashes and removes all special characters. @@ -533,21 +531,6 @@ class DarwinStrategy(BaseStrategy): self.name_types = ('HostName', 'ComputerName', 'LocalHostName') self.scrubbed_name = self._scrub_hostname(self.module.params['name']) - def _make_translation(self, replace_chars, replacement_chars, delete_chars): - if PY3: - return str.maketrans(replace_chars, replacement_chars, delete_chars) - - if not isinstance(replace_chars, text_type) or not isinstance(replacement_chars, text_type): - raise ValueError('replace_chars and replacement_chars must both be strings') - if len(replace_chars) != len(replacement_chars): - raise ValueError('replacement_chars must be the same length as replace_chars') - - table = dict(zip((ord(c) for c in replace_chars), replacement_chars)) - for char in delete_chars: - table[ord(char)] = None - - return table - def _scrub_hostname(self, name): """ LocalHostName only accepts valid DNS characters while HostName and ComputerName @@ -559,7 +542,7 @@ class DarwinStrategy(BaseStrategy): name = to_text(name) replace_chars = u'\'"~`!@#$%^&*(){}[]/=?+\\|-_ ' delete_chars = u".'" - table = self._make_translation(replace_chars, u'-' * len(replace_chars), delete_chars) + table = str.maketrans(replace_chars, '-' * len(replace_chars), delete_chars) name = name.translate(table) # Replace multiple dashes with a single dash @@ -903,8 +886,6 @@ def main(): if name != current_hostname: name_before = current_hostname - elif name != permanent_hostname: - name_before = permanent_hostname else: name_before = permanent_hostname diff --git a/lib/ansible/modules/import_playbook.py b/lib/ansible/modules/import_playbook.py index 9adaebf363f..71f1693241d 100644 --- a/lib/ansible/modules/import_playbook.py +++ b/lib/ansible/modules/import_playbook.py @@ -3,11 +3,10 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: Ansible Core Team (@ansible) module: import_playbook @@ -41,11 +40,11 @@ seealso: - module: ansible.builtin.import_tasks - module: ansible.builtin.include_role - module: ansible.builtin.include_tasks -- ref: playbooks_reuse_includes +- ref: playbooks_reuse description: More information related to including and importing playbooks, roles and tasks. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - hosts: localhost tasks: - ansible.builtin.debug: @@ -70,8 +69,8 @@ EXAMPLES = r''' - name: This fails because I'm inside a play already ansible.builtin.import_playbook: stuff.yaml -''' +""" -RETURN = r''' +RETURN = r""" # This module does not return anything except plays to execute. -''' +""" diff --git a/lib/ansible/modules/import_role.py b/lib/ansible/modules/import_role.py index 2f118f2f57b..0b9eff71244 100644 --- a/lib/ansible/modules/import_role.py +++ b/lib/ansible/modules/import_role.py @@ -2,11 +2,10 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: Ansible Core Team (@ansible) module: import_role @@ -56,6 +55,14 @@ options: type: bool default: yes version_added: '2.11' + public: + description: + - This option dictates whether the role's C(vars) and C(defaults) are exposed to the play. + - Variables are exposed to the play at playbook parsing time, and available to earlier roles and tasks as well unlike C(include_role). + - The default depends on the configuration option :ref:`default_private_role_vars`. + type: bool + default: yes + version_added: '2.17' extends_documentation_fragment: - action_common_attributes - action_common_attributes.conn @@ -78,11 +85,11 @@ seealso: - module: ansible.builtin.import_tasks - module: ansible.builtin.include_role - module: ansible.builtin.include_tasks -- ref: playbooks_reuse_includes +- ref: playbooks_reuse description: More information related to including and importing playbooks, roles and tasks. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - hosts: all tasks: - ansible.builtin.import_role: @@ -103,8 +110,8 @@ EXAMPLES = r''' ansible.builtin.import_role: name: myrole when: not idontwanttorun -''' +""" -RETURN = r''' +RETURN = r""" # This module does not return anything except tasks to execute. -''' +""" diff --git a/lib/ansible/modules/import_tasks.py b/lib/ansible/modules/import_tasks.py index e57862066d0..26ef9d90198 100644 --- a/lib/ansible/modules/import_tasks.py +++ b/lib/ansible/modules/import_tasks.py @@ -3,11 +3,10 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: Ansible Core Team (@ansible) module: import_tasks @@ -45,11 +44,11 @@ seealso: - module: ansible.builtin.import_role - module: ansible.builtin.include_role - module: ansible.builtin.include_tasks -- ref: playbooks_reuse_includes +- ref: playbooks_reuse description: More information related to including and importing playbooks, roles and tasks. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - hosts: all tasks: - ansible.builtin.debug: @@ -70,8 +69,8 @@ EXAMPLES = r''' - name: Apply conditional to all imported tasks ansible.builtin.import_tasks: stuff.yaml when: hostvar is defined -''' +""" -RETURN = r''' +RETURN = r""" # This module does not return anything except tasks to execute. -''' +""" diff --git a/lib/ansible/modules/include_role.py b/lib/ansible/modules/include_role.py index c0e24ae190c..e800c5e61c9 100644 --- a/lib/ansible/modules/include_role.py +++ b/lib/ansible/modules/include_role.py @@ -3,11 +3,10 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: Ansible Core Team (@ansible) module: include_role @@ -91,11 +90,11 @@ seealso: - module: ansible.builtin.import_role - module: ansible.builtin.import_tasks - module: ansible.builtin.include_tasks -- ref: playbooks_reuse_includes +- ref: playbooks_reuse description: More information related to including and importing playbooks, roles and tasks. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - ansible.builtin.include_role: name: myrole @@ -132,8 +131,8 @@ EXAMPLES = r''' - install tags: - always -''' +""" -RETURN = r''' +RETURN = r""" # This module does not return anything except tasks to execute. -''' +""" diff --git a/lib/ansible/modules/include_tasks.py b/lib/ansible/modules/include_tasks.py index d89950a950a..d2657960d98 100644 --- a/lib/ansible/modules/include_tasks.py +++ b/lib/ansible/modules/include_tasks.py @@ -3,11 +3,10 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: Ansible Core Team (@ansible) module: include_tasks @@ -49,11 +48,11 @@ seealso: - module: ansible.builtin.import_role - module: ansible.builtin.import_tasks - module: ansible.builtin.include_role -- ref: playbooks_reuse_includes +- ref: playbooks_reuse description: More information related to including and importing playbooks, roles and tasks. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - hosts: all tasks: - ansible.builtin.debug: @@ -92,8 +91,8 @@ EXAMPLES = r''' - install tags: - always -''' +""" -RETURN = r''' +RETURN = r""" # This module does not return anything except tasks to execute. -''' +""" diff --git a/lib/ansible/modules/include_vars.py b/lib/ansible/modules/include_vars.py index 3752ca65701..b2e3c44e386 100644 --- a/lib/ansible/modules/include_vars.py +++ b/lib/ansible/modules/include_vars.py @@ -2,11 +2,10 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: Allen Sanabria (@linuxdynasty) module: include_vars @@ -22,20 +21,20 @@ options: file: description: - The file name from which variables should be loaded. - - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook. + - If the path is relative, it will look for the file in C(vars/) subdirectory of a role or relative to playbook. type: path version_added: "2.2" dir: description: - The directory name from which the variables should be loaded. - - If the path is relative and the task is inside a role, it will look inside the role's vars/ subdirectory. + - If the path is relative and the task is inside a role, it will look inside the role's C(vars/) subdirectory. - If the path is relative and not inside a role, it will be parsed relative to the playbook. type: path version_added: "2.2" name: description: - The name of a variable into which assign the included vars. - - If omitted (null) they will be made top level vars. + - If omitted (V(null)) they will be made top level vars. type: str version_added: "2.2" depth: @@ -82,8 +81,8 @@ options: version_added: "2.12" free-form: description: - - This module allows you to specify the 'file' option directly without any other options. - - There is no 'free-form' option, this is just an indicator, see example below. + - This module allows you to specify the O(file) option directly without any other options. + - There is no O(ignore:free-form) option, this is just an indicator, see example below. extends_documentation_fragment: - action_common_attributes - action_common_attributes.conn @@ -113,9 +112,9 @@ seealso: - module: ansible.builtin.set_fact - ref: playbooks_delegation description: More information related to task delegation. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Include vars of stuff.yaml into the 'stuff' variable (2.2). ansible.builtin.include_vars: file: stuff.yaml @@ -180,9 +179,9 @@ EXAMPLES = r''' - 'yaml' - 'yml' - 'json' -''' +""" -RETURN = r''' +RETURN = r""" ansible_facts: description: Variables that were included and their values returned: success @@ -194,4 +193,4 @@ ansible_included_var_files: type: list sample: [ /path/to/file.json, /path/to/file.yaml ] version_added: '2.4' -''' +""" diff --git a/lib/ansible/modules/iptables.py b/lib/ansible/modules/iptables.py index fee80495a1b..164b53960b0 100644 --- a/lib/ansible/modules/iptables.py +++ b/lib/ansible/modules/iptables.py @@ -4,11 +4,10 @@ # Copyright: (c) 2017, Sébastien DA ROCHA # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: iptables short_description: Modify iptables rules @@ -38,9 +37,9 @@ notes: options: table: description: - - This option specifies the packet matching table which the command should operate on. + - This option specifies the packet matching table on which the command should operate. - If the kernel is configured with automatic module loading, an attempt will be made - to load the appropriate module for that table if it is not already there. + to load the appropriate module for that table if it is not already there. type: str choices: [ filter, nat, mangle, raw, security ] default: filter @@ -134,9 +133,9 @@ options: description: - Specifies a match to use, that is, an extension module that tests for a specific property. - - The set of matches make up the condition under which a target is invoked. + - The set of matches makes up the condition under which a target is invoked. - Matches are evaluated first to last if specified as an array and work in short-circuit - fashion, i.e. if one extension yields false, evaluation will stop. + fashion, in other words if one extension yields false, the evaluation will stop. type: list elements: str default: [] @@ -144,7 +143,7 @@ options: description: - This specifies the target of the rule; i.e., what to do if the packet matches it. - The target can be a user-defined chain (other than the one - this rule is in), one of the special builtin targets which decide the + this rule is in), one of the special builtin targets that decide the fate of the packet immediately, or an extension (see EXTENSIONS below). - If this option is omitted in a rule (and the goto parameter @@ -153,26 +152,26 @@ options: type: str gateway: description: - - This specifies the IP address of host to send the cloned packets. - - This option is only valid when O(jump) is set to V(TEE). + - This specifies the IP address of the host to send the cloned packets. + - This option is only valid when O(jump=TEE). type: str version_added: "2.8" log_prefix: description: - - Specifies a log text for the rule. Only make sense with a LOG jump. + - Specifies a log text for the rule. Only makes sense with a LOG jump. type: str version_added: "2.5" log_level: description: - Logging level according to the syslogd-defined priorities. - The value can be strings or numbers from 1-8. - - This parameter is only applicable if O(jump) is set to V(LOG). + - This parameter is only applicable if O(jump=LOG). type: str version_added: "2.8" choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ] goto: description: - - This specifies that the processing should continue in a user specified chain. + - This specifies that the processing should continue in a user-specified chain. - Unlike the jump argument return will not continue processing in this chain but instead in the chain that called us via jump. type: str @@ -200,7 +199,7 @@ options: of fragmented packets. - Since there is no way to tell the source or destination ports of such a packet (or ICMP type), such a packet will not match any rules which specify them. - - When the "!" argument precedes fragment argument, the rule will only match head fragments, + - When the "!" argument precedes the fragment argument, the rule will only match head fragments, or unfragmented packets. type: str set_counters: @@ -243,13 +242,13 @@ options: type: str to_destination: description: - - This specifies a destination address to use with C(DNAT). + - This specifies a destination address to use with O(ctstate=DNAT). - Without this, the destination address is never altered. type: str version_added: "2.1" to_source: description: - - This specifies a source address to use with C(SNAT). + - This specifies a source address to use with O(ctstate=SNAT). - Without this, the source address is never altered. type: str version_added: "2.2" @@ -266,6 +265,7 @@ options: description: - This allows specifying a DSCP mark to be added to packets. It takes either an integer or hex value. + - If the parameter is set, O(jump) is set to V(DSCP). - Mutually exclusive with O(set_dscp_mark_class). type: str version_added: "2.1" @@ -273,6 +273,7 @@ options: description: - This allows specifying a predefined DiffServ class which will be translated to the corresponding DSCP mark. + - If the parameter is set, O(jump) is set to V(DSCP). - Mutually exclusive with O(set_dscp_mark). type: str version_added: "2.1" @@ -289,7 +290,7 @@ options: default: [] src_range: description: - - Specifies the source IP range to match in the iprange module. + - Specifies the source IP range to match the iprange module. type: str version_added: "2.8" dst_range: @@ -299,8 +300,8 @@ options: version_added: "2.8" match_set: description: - - Specifies a set name which can be defined by ipset. - - Must be used together with the match_set_flags parameter. + - Specifies a set name that can be defined by ipset. + - Must be used together with the O(match_set_flags) parameter. - When the V(!) argument is prepended then it inverts the rule. - Uses the iptables set extension. type: str @@ -308,10 +309,11 @@ options: match_set_flags: description: - Specifies the necessary flags for the match_set parameter. - - Must be used together with the match_set parameter. + - Must be used together with the O(match_set) parameter. - Uses the iptables set extension. + - Choices V(dst,dst) and V(src,src) added in version 2.17. type: str - choices: [ "src", "dst", "src,dst", "dst,src" ] + choices: [ "src", "dst", "src,dst", "dst,src", "dst,dst", "src,src" ] version_added: "2.11" limit: description: @@ -327,27 +329,27 @@ options: version_added: "2.1" uid_owner: description: - - Specifies the UID or username to use in match by owner rule. + - Specifies the UID or username to use in the match by owner rule. - From Ansible 2.6 when the C(!) argument is prepended then the it inverts the rule to apply instead to all users except that one specified. type: str version_added: "2.1" gid_owner: description: - - Specifies the GID or group to use in match by owner rule. + - Specifies the GID or group to use in the match by owner rule. type: str version_added: "2.9" reject_with: description: - 'Specifies the error packet type to return while rejecting. It implies - "jump: REJECT".' + C(jump=REJECT).' type: str version_added: "2.1" icmp_type: description: - This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the - command 'iptables -p icmp -h' + command C(iptables -p icmp -h). type: str version_added: "2.2" flush: @@ -364,7 +366,7 @@ options: - Only built-in chains can have policies. - This parameter requires the O(chain) parameter. - If you specify this parameter, all other parameters will be ignored. - - This parameter is used to set default policy for the given O(chain). + - This parameter is used to set the default policy for the given O(chain). Do not confuse this with O(jump) parameter. type: str choices: [ ACCEPT, DROP, QUEUE, RETURN ] @@ -385,16 +387,16 @@ options: version_added: "2.13" numeric: description: - - This parameter controls the running of the list -action of iptables, which is used internally by the module - - Does not affect the actual functionality. Use this if iptables hangs when creating chain or altering policy - - If V(true), then iptables skips the DNS-lookup of the IP addresses in a chain when it uses the list -action - - Listing is used internally for example when setting a policy or creting of a chain + - This parameter controls the running of the list -action of iptables, which is used internally by the module. + - Does not affect the actual functionality. Use this if iptables hang when creating a chain or altering policy. + - If V(true), then iptables skips the DNS-lookup of the IP addresses in a chain when it uses the list -action. + - Listing is used internally for example when setting a policy or creating a chain. type: bool default: false version_added: "2.15" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Block specific IP ansible.builtin.iptables: chain: INPUT @@ -541,7 +543,7 @@ EXAMPLES = r''' - "443" - "8081:8083" jump: ACCEPT -''' +""" import re @@ -636,11 +638,16 @@ def construct_rule(params): append_param(rule, params['destination_port'], '--destination-port', False) append_param(rule, params['to_ports'], '--to-ports', False) append_param(rule, params['set_dscp_mark'], '--set-dscp', False) + if params.get('set_dscp_mark') and params.get('jump').lower() != 'dscp': + append_jump(rule, params['set_dscp_mark'], 'DSCP') + append_param( rule, params['set_dscp_mark_class'], '--set-dscp-class', False) + if params.get('set_dscp_mark_class') and params.get('jump').lower() != 'dscp': + append_jump(rule, params['set_dscp_mark_class'], 'DSCP') append_match_flag(rule, params['syn'], '--syn', True) if 'conntrack' in params['match']: append_csv(rule, params['ctstate'], '--ctstate') @@ -674,6 +681,9 @@ def construct_rule(params): append_param(rule, params['gid_owner'], '--gid-owner', False) if params['jump'] is None: append_jump(rule, params['reject_with'], 'REJECT') + append_jump(rule, params['set_dscp_mark_class'], 'DSCP') + append_jump(rule, params['set_dscp_mark'], 'DSCP') + append_param(rule, params['reject_with'], '--reject-with', False) append_param( rule, @@ -811,7 +821,10 @@ def main(): src_range=dict(type='str'), dst_range=dict(type='str'), match_set=dict(type='str'), - match_set_flags=dict(type='str', choices=['src', 'dst', 'src,dst', 'dst,src']), + match_set_flags=dict( + type='str', + choices=['src', 'dst', 'src,dst', 'dst,src', 'src,src', 'dst,dst'] + ), limit=dict(type='str'), limit_burst=dict(type='str'), uid_owner=dict(type='str'), @@ -828,9 +841,14 @@ def main(): ['set_dscp_mark', 'set_dscp_mark_class'], ['flush', 'policy'], ), + required_by=dict( + set_dscp_mark=('jump',), + set_dscp_mark_class=('jump',), + ), required_if=[ ['jump', 'TEE', ['gateway']], ['jump', 'tee', ['gateway']], + ['flush', False, ['chain']], ] ) args = dict( @@ -848,10 +866,6 @@ def main(): ip_version = module.params['ip_version'] iptables_path = module.get_bin_path(BINS[ip_version], True) - # Check if chain option is required - if args['flush'] is False and args['chain'] is None: - module.fail_json(msg="Either chain or flush parameter must be specified.") - if module.params.get('log_prefix', None) or module.params.get('log_level', None): if module.params['jump'] is None: module.params['jump'] = 'LOG' @@ -895,33 +909,38 @@ def main(): delete_chain(iptables_path, module, module.params) else: - insert = (module.params['action'] == 'insert') - rule_is_present = check_rule_present( - iptables_path, module, module.params - ) - chain_is_present = rule_is_present or check_chain_present( - iptables_path, module, module.params - ) - should_be_present = (args['state'] == 'present') - - # Check if target is up to date - args['changed'] = (rule_is_present != should_be_present) - if args['changed'] is False: - # Target is already up to date - module.exit_json(**args) - - # Check only; don't modify - if not module.check_mode: - if should_be_present: - if not chain_is_present and args['chain_management']: - create_chain(iptables_path, module, module.params) - - if insert: - insert_rule(iptables_path, module, module.params) + # Create the chain if there are no rule arguments + if (args['state'] == 'present') and not args['rule']: + chain_is_present = check_chain_present( + iptables_path, module, module.params + ) + args['changed'] = not chain_is_present + + if (not chain_is_present and args['chain_management'] and not module.check_mode): + create_chain(iptables_path, module, module.params) + + else: + insert = (module.params['action'] == 'insert') + rule_is_present = check_rule_present( + iptables_path, module, module.params + ) + + should_be_present = (args['state'] == 'present') + # Check if target is up to date + args['changed'] = (rule_is_present != should_be_present) + if args['changed'] is False: + # Target is already up to date + module.exit_json(**args) + + # Modify if not check_mode + if not module.check_mode: + if should_be_present: + if insert: + insert_rule(iptables_path, module, module.params) + else: + append_rule(iptables_path, module, module.params) else: - append_rule(iptables_path, module, module.params) - else: - remove_rule(iptables_path, module, module.params) + remove_rule(iptables_path, module, module.params) module.exit_json(**args) diff --git a/lib/ansible/modules/known_hosts.py b/lib/ansible/modules/known_hosts.py index 0c97ce2e0b1..c001915115d 100644 --- a/lib/ansible/modules/known_hosts.py +++ b/lib/ansible/modules/known_hosts.py @@ -2,16 +2,15 @@ # Copyright: (c) 2014, Matthew Vernon # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: known_hosts short_description: Add or remove a host from the C(known_hosts) file description: - - The M(ansible.builtin.known_hosts) module lets you add or remove a host keys from the C(known_hosts) file. + - The M(ansible.builtin.known_hosts) module lets you add or remove host keys from the C(known_hosts) file. - Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh. This is useful if you're going to want to use the M(ansible.builtin.git) module over ssh, for example. - If you have a very large number of host keys to manage, you will find the M(ansible.builtin.template) module more useful. @@ -20,7 +19,7 @@ options: name: aliases: [ 'host' ] description: - - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that ssh-keygen can find it. + - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that C(ssh-keygen) can find it. - Must match with or present in key attribute. - For custom SSH port, O(name) needs to specify port as well. See example section. type: str @@ -50,8 +49,8 @@ options: version_added: "2.3" state: description: - - V(present) to add the host key. - - V(absent) to remove it. + - V(present) to add host keys. + - V(absent) to remove host keys. choices: [ "absent", "present" ] default: "present" type: str @@ -66,9 +65,9 @@ extends_documentation_fragment: - action_common_attributes author: - Matthew Vernon (@mcv21) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Tell the host about our servers it might want to ssh to ansible.builtin.known_hosts: path: /etc/ssh/ssh_known_hosts @@ -88,7 +87,7 @@ EXAMPLES = r''' key: '[host1.example.com]:2222 ssh-rsa ASDeararAIUHI324324' # some key gibberish path: /etc/ssh/ssh_known_hosts state: present -''' +""" # Makes sure public host keys are present or absent in the given known_hosts # file. @@ -102,6 +101,7 @@ EXAMPLES = r''' # state = absent|present (default: present) import base64 +import copy import errno import hashlib import hmac @@ -119,6 +119,7 @@ def enforce_state(module, params): Add or remove key. """ + results = dict(changed=False) host = params["name"].lower() key = params.get("key", None) path = params.get("path") @@ -141,13 +142,12 @@ def enforce_state(module, params): found, replace_or_add, found_line = search_for_host_key(module, host, key, path, sshkeygen) - params['diff'] = compute_diff(path, found_line, replace_or_add, state, key) + results['diff'] = compute_diff(path, found_line, replace_or_add, state, key) # check if we are trying to remove a non matching key, # in that case return with no change to the host if state == 'absent' and not found_line and key: - params['changed'] = False - return params + return results # We will change state if found==True & state!="present" # or found==False & state=="present" @@ -155,15 +155,15 @@ def enforce_state(module, params): # Alternatively, if replace is true (i.e. key present, and we must change # it) if module.check_mode: - module.exit_json(changed=replace_or_add or (state == "present") != found, - diff=params['diff']) + results['changed'] = replace_or_add or (state == "present") != found + module.exit_json(**results) # Now do the work. # Only remove whole host if found and no key provided if found and not key and state == "absent": module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True) - params['changed'] = True + results['changed'] = True # Next, add a new (or replacing) entry if replace_or_add or found != (state == "present"): @@ -189,19 +189,19 @@ def enforce_state(module, params): else: module.atomic_move(outf.name, path) - params['changed'] = True + results['changed'] = True - return params + return results def sanity_check(module, host, key, sshkeygen): - '''Check supplied key is sensible + """Check supplied key is sensible host and key are parameters provided by the user; If the host provided is inconsistent with the key supplied, then this function quits, providing an error to the user. sshkeygen is the path to ssh-keygen, found earlier with get_bin_path - ''' + """ # If no key supplied, we're doing a removal, and have nothing to check here. if not key: return @@ -232,7 +232,7 @@ def sanity_check(module, host, key, sshkeygen): def search_for_host_key(module, host, key, path, sshkeygen): - '''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line) + """search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line) Looks up host and keytype in the known_hosts file path; if it's there, looks to see if one of those entries matches key. Returns: @@ -241,7 +241,7 @@ def search_for_host_key(module, host, key, path, sshkeygen): found_line (int or None): the line where a key of the same type was found if found=False, then replace is always False. sshkeygen is the path to ssh-keygen, found earlier with get_bin_path - ''' + """ if os.path.exists(path) is False: return False, False, None @@ -274,12 +274,20 @@ def search_for_host_key(module, host, key, path, sshkeygen): module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l) else: found_key = normalize_known_hosts_key(l) - if new_key['host'][:3] == '|1|' and found_key['host'][:3] == '|1|': # do not change host hash if already hashed - new_key['host'] = found_key['host'] - if new_key == found_key: # found a match - return True, False, found_line # found exactly the same key, don't replace - elif new_key['type'] == found_key['type']: # found a different key for the same key type - return True, True, found_line + + if 'options' in found_key and found_key['options'][:15] == '@cert-authority': + if new_key == found_key: # found a match + return True, False, found_line # found exactly the same key, don't replace + elif 'options' in found_key and found_key['options'][:7] == '@revoke': + if new_key == found_key: # found a match + return True, False, found_line # found exactly the same key, don't replace + else: + if new_key['host'][:3] == '|1|' and found_key['host'][:3] == '|1|': # do not change host hash if already hashed + new_key['host'] = found_key['host'] + if new_key == found_key: # found a match + return True, False, found_line # found exactly the same key, don't replace + elif new_key['type'] == found_key['type']: # found a different key for the same key type + return True, True, found_line # No match found, return found and replace, but no line return True, True, None @@ -296,14 +304,14 @@ def hash_host_key(host, key): def normalize_known_hosts_key(key): - ''' + """ Transform a key, either taken from a known_host file or provided by the user, into a normalized form. The host part (which might include multiple hostnames or be hashed) gets replaced by the provided host. Also, any spurious information gets removed from the end (like the username@host tag usually present in hostkeys, but absent in known_hosts files) - ''' + """ key = key.strip() # trim trailing newline k = key.split() d = dict() @@ -357,7 +365,9 @@ def main(): supports_check_mode=True ) - results = enforce_state(module, module.params) + # TODO: deprecate returning everything that was passed in + results = copy.copy(module.params) + results.update(enforce_state(module, module.params)) module.exit_json(**results) diff --git a/lib/ansible/modules/lineinfile.py b/lib/ansible/modules/lineinfile.py index 3d8d85dc53a..0ef882f4840 100644 --- a/lib/ansible/modules/lineinfile.py +++ b/lib/ansible/modules/lineinfile.py @@ -5,11 +5,10 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: lineinfile short_description: Manage lines in text files @@ -88,13 +87,11 @@ options: - If specified, the line will be inserted after the last match of specified regular expression. - If the first match is required, use(firstmatch=yes). - A special value is available; V(EOF) for inserting the line at the end of the file. - - If specified regular expression has no matches, EOF will be used instead. + - If specified regular expression has no matches or no value is passed, V(EOF) will be used instead. - If O(insertbefore) is set, default value V(EOF) will be ignored. - If regular expressions are passed to both O(regexp) and O(insertafter), O(insertafter) is only honored if no match for O(regexp) is found. - May not be used with O(backrefs) or O(insertbefore). type: str - choices: [ EOF, '*regex*' ] - default: EOF insertbefore: description: - Used with O(state=present). @@ -105,7 +102,6 @@ options: - If regular expressions are passed to both O(regexp) and O(insertbefore), O(insertbefore) is only honored if no match for O(regexp) is found. - May not be used with O(backrefs) or O(insertafter). type: str - choices: [ BOF, '*regex*' ] version_added: "1.1" create: description: @@ -127,10 +123,6 @@ options: type: bool default: no version_added: "2.5" - others: - description: - - All arguments accepted by the M(ansible.builtin.file) module also work here. - type: str extends_documentation_fragment: - action_common_attributes - action_common_attributes.files @@ -160,9 +152,9 @@ author: - Daniel Hokka Zakrissoni (@dhozac) - Ahti Kitsik (@ahtik) - Jose Angel Munoz (@imjoseangel) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # NOTE: Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path' - name: Ensure SELinux is set to enforcing mode ansible.builtin.lineinfile: @@ -245,9 +237,9 @@ EXAMPLES = r''' regexp: ^(host=).* line: \g<1>{{ hostname }} backrefs: yes -''' +""" -RETURN = r'''#''' +RETURN = r"""#""" import os import re diff --git a/lib/ansible/modules/meta.py b/lib/ansible/modules/meta.py index 78c3928ba19..b10a56e2444 100644 --- a/lib/ansible/modules/meta.py +++ b/lib/ansible/modules/meta.py @@ -3,11 +3,10 @@ # Copyright: (c) 2016, Ansible, a Red Hat company # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: meta short_description: Execute Ansible 'actions' version_added: '1.2' @@ -34,7 +33,12 @@ options: - V(end_host) (added in Ansible 2.8) is a per-host variation of V(end_play). Causes the play to end for the current host without failing it. - V(end_batch) (added in Ansible 2.12) causes the current batch (see C(serial)) to end without failing the host(s). Note that with C(serial=0) or undefined this behaves the same as V(end_play). - choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch ] + - V(end_role) (added in Ansible 2.18) causes the currently executing role to end without failing the host(s). + Effectively all tasks from within a role after V(end_role) is executed are ignored. Since handlers live in a global, + play scope, all handlers added via the role are unaffected and are still executed if notified. It is an error + to call V(end_role) from outside of a role or from a handler. Note that V(end_role) does not have an effect to + the parent roles or roles that depend (via dependencies in meta/main.yml) on a role executing V(end_role). + choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch, end_role ] required: true extends_documentation_fragment: - action_common_attributes @@ -63,6 +67,8 @@ attributes: connection: details: Most options in this action do not use a connection, except V(reset_connection) which still does not connect to the remote support: partial + until: + support: none notes: - V(clear_facts) will remove the persistent facts from M(ansible.builtin.set_fact) using O(ansible.builtin.set_fact#module:cacheable=True), but not the current host variable it creates for the current run. @@ -72,9 +78,9 @@ seealso: - module: ansible.builtin.fail author: - Ansible Core Team -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Example showing flushing handlers on demand, not at end of play - ansible.builtin.template: src: new.j2 @@ -120,4 +126,4 @@ EXAMPLES = r''' when: - ansible_distribution == 'CentOS' - ansible_distribution_major_version == '6' -''' +""" diff --git a/lib/ansible/modules/mount_facts.py b/lib/ansible/modules/mount_facts.py new file mode 100644 index 00000000000..f5d2bf47f3a --- /dev/null +++ b/lib/ansible/modules/mount_facts.py @@ -0,0 +1,651 @@ +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import annotations + + +DOCUMENTATION = """ +--- +module: mount_facts +version_added: 2.18 +short_description: Retrieve mount information. +description: + - Retrieve information about mounts from preferred sources and filter the results based on the filesystem type and device. +options: + devices: + description: A list of fnmatch patterns to filter mounts by the special device or remote file system. + default: ~ + type: list + elements: str + fstypes: + description: A list of fnmatch patterns to filter mounts by the type of the file system. + default: ~ + type: list + elements: str + sources: + description: + - A list of sources used to determine the mounts. Missing file sources (or empty files) are skipped. Repeat sources, including symlinks, are skipped. + - The C(mount_points) return value contains the first definition found for a mount point. + - Additional mounts to the same mount point are available from C(aggregate_mounts) (if enabled). + - By default, mounts are retrieved from all of the standard locations, which have the predefined aliases V(all)/V(static)/V(dynamic). + - V(all) contains V(dynamic) and V(static). + - V(dynamic) contains V(/etc/mtab), V(/proc/mounts), V(/etc/mnttab), and the value of O(mount_binary) if it is not None. + This allows platforms like BSD or AIX, which don't have an equivalent to V(/proc/mounts), to collect the current mounts by default. + See the O(mount_binary) option to disable the fall back or configure a different executable. + - V(static) contains V(/etc/fstab), V(/etc/vfstab), and V(/etc/filesystems). + Note that V(/etc/filesystems) is specific to AIX. The Linux file by this name has a different format/purpose and is ignored. + - The value of O(mount_binary) can be configured as a source, which will cause it to always execute. + Depending on the other sources configured, this could be inefficient/redundant. + For example, if V(/proc/mounts) and V(mount) are listed as O(sources), Linux hosts will retrieve the same mounts twice. + default: ~ + type: list + elements: str + mount_binary: + description: + - The O(mount_binary) is used if O(sources) contain the value "mount", or if O(sources) contains a dynamic + source, and none were found (as can be expected on BSD or AIX hosts). + - Set to V(null) to stop after no dynamic file source is found instead. + type: raw + default: mount + timeout: + description: + - This is the maximum number of seconds to wait for each mount to complete. When this is V(null), wait indefinitely. + - Configure in conjunction with O(on_timeout) to skip unresponsive mounts. + - This timeout also applies to the O(mount_binary) command to list mounts. + - If the module is configured to run during the play's fact gathering stage, set a timeout using module_defaults to prevent a hang (see example). + type: float + on_timeout: + description: + - The action to take when gathering mount information exceeds O(timeout). + type: str + default: error + choices: + - error + - warn + - ignore + include_aggregate_mounts: + description: + - Whether or not the module should return the C(aggregate_mounts) list in C(ansible_facts). + - When this is V(null), a warning will be emitted if multiple mounts for the same mount point are found. + default: ~ + type: bool +extends_documentation_fragment: + - action_common_attributes +attributes: + check_mode: + support: full + diff_mode: + support: none + platform: + platforms: posix +author: + - Ansible Core Team + - Sloane Hertel (@s-hertel) +""" + +EXAMPLES = """ +- name: Get non-local devices + mount_facts: + devices: "[!/]*" + +- name: Get FUSE subtype mounts + mount_facts: + fstypes: + - "fuse.*" + +- name: Get NFS mounts during gather_facts with timeout + hosts: all + gather_facts: true + vars: + ansible_facts_modules: + - ansible.builtin.mount_facts + module_default: + ansible.builtin.mount_facts: + timeout: 10 + fstypes: + - nfs + - nfs4 + +- name: Get mounts from a non-default location + mount_facts: + sources: + - /usr/etc/fstab + +- name: Get mounts from the mount binary + mount_facts: + sources: + - mount + mount_binary: /sbin/mount +""" + +RETURN = """ +ansible_facts: + description: + - An ansible_facts dictionary containing a dictionary of C(mount_points) and list of C(aggregate_mounts) when enabled. + - Each key in C(mount_points) is a mount point, and the value contains mount information (similar to C(ansible_facts["mounts"])). + Each value also contains the key C(ansible_context), with details about the source and line(s) corresponding to the parsed mount point. + - When C(aggregate_mounts) are included, the containing dictionaries are the same format as the C(mount_point) values. + returned: on success + type: dict + sample: + mount_points: + /proc/sys/fs/binfmt_misc: + ansible_context: + source: /proc/mounts + source_data: "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850 0 0" + block_available: 0 + block_size: 4096 + block_total: 0 + block_used: 0 + device: "systemd-1" + dump: 0 + fstype: "autofs" + inode_available: 0 + inode_total: 0 + inode_used: 0 + mount: "/proc/sys/fs/binfmt_misc" + options: "rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850" + passno: 0 + size_available: 0 + size_total: 0 + uuid: null + aggregate_mounts: + - ansible_context: + source: /proc/mounts + source_data: "systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850 0 0" + block_available: 0 + block_size: 4096 + block_total: 0 + block_used: 0 + device: "systemd-1" + dump: 0 + fstype: "autofs" + inode_available: 0 + inode_total: 0 + inode_used: 0 + mount: "/proc/sys/fs/binfmt_misc" + options: "rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=33850" + passno: 0 + size_available: 0 + size_total: 0 + uuid: null + - ansible_context: + source: /proc/mounts + source_data: "binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0" + block_available: 0 + block_size: 4096 + block_total: 0 + block_used: 0 + device: binfmt_misc + dump: 0 + fstype: binfmt_misc + inode_available: 0 + inode_total: 0 + inode_used: 0 + mount: "/proc/sys/fs/binfmt_misc" + options: "rw,nosuid,nodev,noexec,relatime" + passno: 0 + size_available: 0 + size_total: 0 + uuid: null +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.facts import timeout as _timeout +from ansible.module_utils.facts.utils import get_mount_size, get_file_content + +from contextlib import suppress +from dataclasses import astuple, dataclass +from fnmatch import fnmatch + +import codecs +import datetime +import functools +import os +import re +import subprocess +import typing as t + +STATIC_SOURCES = ["/etc/fstab", "/etc/vfstab", "/etc/filesystems"] +DYNAMIC_SOURCES = ["/etc/mtab", "/proc/mounts", "/etc/mnttab"] + +# AIX and BSD don't have a file-based dynamic source, so the module also supports running a mount binary to collect these. +# Pattern for Linux, including OpenBSD and NetBSD +LINUX_MOUNT_RE = re.compile(r"^(?P\S+) on (?P\S+) type (?P\S+) \((?P.+)\)$") +# Pattern for other BSD including FreeBSD, DragonFlyBSD, and MacOS +BSD_MOUNT_RE = re.compile(r"^(?P\S+) on (?P\S+) \((?P.+)\)$") +# Pattern for AIX, example in https://www.ibm.com/docs/en/aix/7.2?topic=m-mount-command +AIX_MOUNT_RE = re.compile(r"^(?P\S*)\s+(?P\S+)\s+(?P\S+)\s+(?P\S+)\s+(?P