From e259317c3e24a741dd9d9389ac3ea6d995a45401 Mon Sep 17 00:00:00 2001 From: Matt Clay Date: Thu, 26 May 2022 17:55:19 -0700 Subject: [PATCH] Remove incidental_inventory_docker_swarm tests. (#77924) ci_complete ci_coverage --- .azure-pipelines/azure-pipelines.yml | 13 - .../incidental_inventory_docker_swarm/aliases | 13 - .../inventory_1.docker_swarm.yml | 3 - .../inventory_2.docker_swarm.yml | 5 - .../meta/main.yml | 3 - .../playbooks/swarm_cleanup.yml | 19 - .../playbooks/swarm_setup.yml | 15 - .../playbooks/test_inventory_1.yml | 58 - .../playbooks/test_inventory_2.yml | 35 - .../runme.sh | 22 - .../targets/incidental_setup_docker/aliases | 2 - .../incidental_setup_docker/defaults/main.yml | 18 - .../incidental_setup_docker/handlers/main.yml | 14 - .../incidental_setup_docker/meta/main.yml | 2 - .../incidental_setup_docker/tasks/Debian.yml | 43 - .../incidental_setup_docker/tasks/Fedora.yml | 19 - .../tasks/RedHat-7.yml | 40 - .../tasks/RedHat-8.yml | 33 - .../incidental_setup_docker/tasks/Suse.yml | 7 - .../incidental_setup_docker/tasks/main.yml | 113 -- .../incidental_setup_docker/vars/Debian.yml | 9 - .../incidental_setup_docker/vars/Fedora.yml | 5 - .../incidental_setup_docker/vars/RedHat-7.yml | 18 - .../incidental_setup_docker/vars/RedHat-8.yml | 10 - .../incidental_setup_docker/vars/Suse.yml | 2 - .../vars/Ubuntu-14.yml | 5 - .../incidental_setup_docker/vars/default.yml | 0 .../plugins/inventory/docker_swarm.py | 351 ------ .../plugins/module_utils/docker/__init__.py | 0 .../plugins/module_utils/docker/common.py | 1022 ----------------- .../plugins/module_utils/docker/swarm.py | 280 ----- .../plugins/modules/docker_swarm.py | 676 ----------- test/utils/shippable/incidental/freebsd.sh | 1 - test/utils/shippable/incidental/linux.sh | 14 - test/utils/shippable/incidental/macos.sh | 1 - test/utils/shippable/incidental/remote.sh | 27 - test/utils/shippable/incidental/rhel.sh | 1 - 37 files changed, 2899 deletions(-) delete mode 100644 test/integration/targets/incidental_inventory_docker_swarm/aliases delete mode 100644 test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml delete mode 100644 test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml delete mode 100644 test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml delete mode 100644 test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml delete mode 100644 test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml delete mode 100644 test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml delete mode 100644 test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml delete mode 100755 test/integration/targets/incidental_inventory_docker_swarm/runme.sh delete mode 100644 test/integration/targets/incidental_setup_docker/aliases delete mode 100644 test/integration/targets/incidental_setup_docker/defaults/main.yml delete mode 100644 test/integration/targets/incidental_setup_docker/handlers/main.yml delete mode 100644 test/integration/targets/incidental_setup_docker/meta/main.yml delete mode 100644 test/integration/targets/incidental_setup_docker/tasks/Debian.yml delete mode 100644 test/integration/targets/incidental_setup_docker/tasks/Fedora.yml delete mode 100644 test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml delete mode 100644 test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml delete mode 100644 test/integration/targets/incidental_setup_docker/tasks/Suse.yml delete mode 100644 test/integration/targets/incidental_setup_docker/tasks/main.yml delete mode 100644 test/integration/targets/incidental_setup_docker/vars/Debian.yml delete mode 100644 test/integration/targets/incidental_setup_docker/vars/Fedora.yml delete mode 100644 test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml delete mode 100644 test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml delete mode 100644 test/integration/targets/incidental_setup_docker/vars/Suse.yml delete mode 100644 test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml delete mode 100644 test/integration/targets/incidental_setup_docker/vars/default.yml delete mode 100644 test/support/integration/plugins/inventory/docker_swarm.py delete mode 100644 test/support/integration/plugins/module_utils/docker/__init__.py delete mode 100644 test/support/integration/plugins/module_utils/docker/common.py delete mode 100644 test/support/integration/plugins/module_utils/docker/swarm.py delete mode 100644 test/support/integration/plugins/modules/docker_swarm.py delete mode 120000 test/utils/shippable/incidental/freebsd.sh delete mode 100755 test/utils/shippable/incidental/linux.sh delete mode 120000 test/utils/shippable/incidental/macos.sh delete mode 100755 test/utils/shippable/incidental/remote.sh delete mode 120000 test/utils/shippable/incidental/rhel.sh diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 8c7ecb18b01..b798a0ba669 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -158,18 +158,6 @@ stages: - test: 3.9 - test: '3.10' - test: 3.11 - - stage: Incidental_Remote - displayName: Incidental Remote - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: i/{0} - targets: - - name: RHEL 8.6 py36 - test: rhel/8.6@3.6 - - name: RHEL 8.6 py38 - test: rhel/8.6@3.8 - stage: Incidental_Windows displayName: Incidental Windows dependsOn: [] @@ -211,7 +199,6 @@ stages: - Docker - Galaxy - Generic - - Incidental_Remote - Incidental_Windows - Incidental jobs: diff --git a/test/integration/targets/incidental_inventory_docker_swarm/aliases b/test/integration/targets/incidental_inventory_docker_swarm/aliases deleted file mode 100644 index 66362758d5f..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/aliases +++ /dev/null @@ -1,13 +0,0 @@ -shippable/posix/incidental -context/controller -skip/osx -skip/macos -skip/freebsd -skip/rhel/9.0b # there are no docker-ce packages for CentOS/RHEL 9 -destructive -skip/docker # The tests sometimes make docker daemon unstable; hence, - # we skip all docker-based CI runs to avoid disrupting - # the whole CI system. On VMs, we restart docker daemon - # after finishing the tests to minimize potential effects - # on other tests. -needs/root diff --git a/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml b/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml deleted file mode 100644 index e8e6d55e5e3..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -plugin: docker_swarm -docker_host: unix://var/run/docker.sock diff --git a/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml b/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml deleted file mode 100644 index e36bd00f93a..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -plugin: docker_swarm -docker_host: unix://var/run/docker.sock -verbose_output: no -include_host_uri: yes diff --git a/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml b/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml deleted file mode 100644 index 569a453cfa5..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - incidental_setup_docker diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml deleted file mode 100644 index fc4455ec7c5..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- hosts: 127.0.0.1 - connection: local - gather_facts: yes - tasks: - - name: Make sure swarm is removed - docker_swarm: - state: absent - force: yes - - - name: remove docker pagkages - action: "{{ ansible_facts.pkg_mgr }}" - args: - name: - - docker - - docker-ce - - docker-ce-cli - - containerd.io - state: absent diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml deleted file mode 100644 index d9f777327c1..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- hosts: 127.0.0.1 - connection: local - vars: - docker_skip_cleanup: yes - - tasks: - - name: Setup docker - import_role: - name: incidental_setup_docker - - - name: Create a Swarm cluster - docker_swarm: - state: present - advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}" diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml deleted file mode 100644 index 600a89b1dad..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- hosts: 127.0.0.1 - connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 - gather_facts: no - tasks: - - name: Show all groups - debug: - var: groups - - name: Make sure docker_swarm groups are there - assert: - that: - - groups.all | length > 0 - - groups.leader | length == 1 - - groups.manager | length > 0 - - groups.worker | length >= 0 - - groups.nonleaders | length >= 0 - -- hosts: all - connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 - vars: - # for some reason, Ansible can't find the Python interpreter when connecting to the nodes, - # which is in fact just localhost in disguise. That's why we use ansible_playbook_python. - ansible_python_interpreter: "{{ ansible_playbook_python }}" - tasks: - - name: Check for groups - assert: - that: - - "groups.manager | length > 0" - - "groups.worker | length >= 0" - - "groups.leader | length == 1" - run_once: yes - - - name: List manager group - debug: - var: groups.manager - run_once: yes - - - name: List worker group - debug: - var: groups.worker - run_once: yes - - - name: List leader group - debug: - var: groups.leader - run_once: yes - - - name: Print ansible_host per host - debug: - var: ansible_host - - - name: Make sure docker_swarm_node_attributes is available - assert: - that: - - docker_swarm_node_attributes is not undefined - - name: Print docker_swarm_node_attributes per host - debug: - var: docker_swarm_node_attributes diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml deleted file mode 100644 index b2a794d35bf..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- hosts: 127.0.0.1 - connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 - gather_facts: no - tasks: - - name: Show all groups - debug: - var: groups - - name: Make sure docker_swarm groups are there - assert: - that: - - groups.all | length > 0 - - groups.leader | length == 1 - - groups.manager | length > 0 - - groups.worker | length >= 0 - - groups.nonleaders | length >= 0 - -- hosts: all - connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22 - vars: - # for some reason, Ansible can't find the Python interpreter when connecting to the nodes, - # which is in fact just localhost in disguise. That's why we use ansible_playbook_python. - ansible_python_interpreter: "{{ ansible_playbook_python }}" - tasks: - - name: Make sure docker_swarm_node_attributes is not available - assert: - that: - - docker_swarm_node_attributes is undefined - - name: Make sure ansible_host_uri is available - assert: - that: - - ansible_host_uri is defined - - name: Print ansible_host_uri - debug: - var: ansible_host_uri diff --git a/test/integration/targets/incidental_inventory_docker_swarm/runme.sh b/test/integration/targets/incidental_inventory_docker_swarm/runme.sh deleted file mode 100755 index b93d386ae1c..00000000000 --- a/test/integration/targets/incidental_inventory_docker_swarm/runme.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x - -set -euo pipefail - -cleanup() { - echo "Cleanup" - ansible-playbook playbooks/swarm_cleanup.yml - echo "Done" -} - -trap cleanup INT TERM EXIT - -echo "Setup" -ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/swarm_setup.yml - -echo "Test docker_swarm inventory 1" -ansible-playbook -i inventory_1.docker_swarm.yml playbooks/test_inventory_1.yml - -echo "Test docker_swarm inventory 2" -ansible-playbook -i inventory_2.docker_swarm.yml playbooks/test_inventory_2.yml diff --git a/test/integration/targets/incidental_setup_docker/aliases b/test/integration/targets/incidental_setup_docker/aliases deleted file mode 100644 index d466c39c200..00000000000 --- a/test/integration/targets/incidental_setup_docker/aliases +++ /dev/null @@ -1,2 +0,0 @@ -needs/target/setup_epel -hidden diff --git a/test/integration/targets/incidental_setup_docker/defaults/main.yml b/test/integration/targets/incidental_setup_docker/defaults/main.yml deleted file mode 100644 index 48959cc3ec7..00000000000 --- a/test/integration/targets/incidental_setup_docker/defaults/main.yml +++ /dev/null @@ -1,18 +0,0 @@ -docker_cli_version: '0.0' -docker_api_version: '0.0' -docker_py_version: '0.0' -docker_skip_cleanup: no -docker_prereq_packages: [] -docker_packages: - - docker-ce - -docker_pip_extra_packages: [] -docker_pip_never_remove: [] -docker_pip_packages: - - docker - -docker_cleanup_packages: - - docker - - docker-ce - - docker-ce-cli - - containerd.io diff --git a/test/integration/targets/incidental_setup_docker/handlers/main.yml b/test/integration/targets/incidental_setup_docker/handlers/main.yml deleted file mode 100644 index 9e3f928f481..00000000000 --- a/test/integration/targets/incidental_setup_docker/handlers/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -- name: remove pip packages - pip: - state: absent - name: "{{ docker_pip_packages | union(docker_pip_extra_packages) | difference(docker_pip_never_remove) }}" - listen: cleanup docker - when: not docker_skip_cleanup | bool - -- name: remove docker packages - action: "{{ ansible_facts.pkg_mgr }}" - args: - name: "{{ docker_cleanup_packages }}" - state: absent - listen: cleanup docker - when: not docker_skip_cleanup | bool diff --git a/test/integration/targets/incidental_setup_docker/meta/main.yml b/test/integration/targets/incidental_setup_docker/meta/main.yml deleted file mode 100644 index 91a63627f6a..00000000000 --- a/test/integration/targets/incidental_setup_docker/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - setup_remote_constraints diff --git a/test/integration/targets/incidental_setup_docker/tasks/Debian.yml b/test/integration/targets/incidental_setup_docker/tasks/Debian.yml deleted file mode 100644 index 0ea2cb4be2b..00000000000 --- a/test/integration/targets/incidental_setup_docker/tasks/Debian.yml +++ /dev/null @@ -1,43 +0,0 @@ -- name: Get OS version - shell: uname -r - register: os_version - -- name: Install pre-reqs - apt: - name: "{{ docker_prereq_packages }}" - state: present - update_cache: yes - notify: cleanup docker - -- name: Add gpg key - shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg >key && apt-key add key - -- name: Add Docker repo - shell: add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - -- block: - - name: Prevent service restart - copy: - content: exit 101 - dest: /usr/sbin/policy-rc.d - backup: yes - mode: 0755 - register: policy_rc_d - - - name: Install Docker CE - apt: - name: "{{ docker_packages }}" - state: present - update_cache: yes - always: - - name: Restore /usr/sbin/policy-rc.d (if needed) - command: mv {{ policy_rc_d.backup_file }} /usr/sbin/policy-rc.d - when: - - "'backup_file' in policy_rc_d" - - - name: Remove /usr/sbin/policy-rc.d (if needed) - file: - path: /usr/sbin/policy-rc.d - state: absent - when: - - "'backup_file' not in policy_rc_d" diff --git a/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml b/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml deleted file mode 100644 index ff5e665d81b..00000000000 --- a/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml +++ /dev/null @@ -1,19 +0,0 @@ -- name: Add repository - yum_repository: - file: docker-ce - name: docker-ce-stable - description: Docker CE Stable - $basearch - baseurl: https://download.docker.com/linux/fedora/$releasever/$basearch/stable - enabled: yes - gpgcheck: yes - gpgkey: https://download.docker.com/linux/fedora/gpg - -- name: Update cache - command: dnf makecache - -- name: Install docker - dnf: - name: "{{ docker_packages }}" - state: present - enablerepo: docker-ce-test - notify: cleanup docker diff --git a/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml b/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml deleted file mode 100644 index 57b00da4095..00000000000 --- a/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml +++ /dev/null @@ -1,40 +0,0 @@ -# The RHEL extras repository must be enabled to provide the container-selinux package. -# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository - -- name: Install Docker pre-reqs - yum: - name: "{{ docker_prereq_packages }}" - state: present - notify: cleanup docker - -- name: Install epel repo which is missing on rhel-7 and is needed for pigz (needed for docker-ce 18) - include_role: - name: setup_epel - -- name: Enable extras repository for RHEL on AWS - # RHEL 7.6 uses REGION-rhel-server-extras and RHEL 7.7+ use rhel-7-server-rhui-extras-rpms - command: yum-config-manager --enable REGION-rhel-server-extras rhel-7-server-rhui-extras-rpms - -# They broke their .repo file, so we set it up ourselves -- name: Set-up repository - yum_repository: - name: docker-ce - description: docker-ce - baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable - gpgcheck: true - gpgkey: https://download.docker.com/linux/centos/gpg - -- name: Update cache - command: yum -y makecache fast - -- name: Install docker - yum: - name: "{{ docker_packages }}" - state: present - notify: cleanup docker - -- name: Make sure the docker daemon is running (failure expected inside docker container) - service: - name: docker - state: started - ignore_errors: "{{ ansible_virtualization_type == 'docker' }}" diff --git a/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml b/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml deleted file mode 100644 index 6d2fb3e7586..00000000000 --- a/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml +++ /dev/null @@ -1,33 +0,0 @@ -# The RHEL extras repository must be enabled to provide the container-selinux package. -# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository - -- name: Install Docker pre-reqs - dnf: - name: "{{ docker_prereq_packages }}" - state: present - notify: cleanup docker - register: result - until: result is success - retries: 10 - delay: 2 - -# They broke their .repo file, so we set it up ourselves -- name: Set-up repository - yum_repository: - name: docker-ce - description: docker-ce - baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable - gpgcheck: true - gpgkey: https://download.docker.com/linux/centos/gpg - -- name: Install docker - dnf: - name: "{{ docker_packages }}" - state: present - notify: cleanup docker - -- name: Make sure the docker daemon is running (failure expected inside docker container) - service: - name: docker - state: started - ignore_errors: "{{ ansible_virtualization_type == 'docker' }}" diff --git a/test/integration/targets/incidental_setup_docker/tasks/Suse.yml b/test/integration/targets/incidental_setup_docker/tasks/Suse.yml deleted file mode 100644 index 93f4d34e371..00000000000 --- a/test/integration/targets/incidental_setup_docker/tasks/Suse.yml +++ /dev/null @@ -1,7 +0,0 @@ -- name: Install docker - zypper: - name: "{{ docker_packages }}" - force: yes - disable_gpg_check: yes - update_cache: yes - notify: cleanup docker diff --git a/test/integration/targets/incidental_setup_docker/tasks/main.yml b/test/integration/targets/incidental_setup_docker/tasks/main.yml deleted file mode 100644 index 359a6d44935..00000000000 --- a/test/integration/targets/incidental_setup_docker/tasks/main.yml +++ /dev/null @@ -1,113 +0,0 @@ -- name: Setup Docker - when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] - block: - - name: Include distribution specific variables - include_vars: "{{ lookup('first_found', params) }}" - vars: - params: - files: - - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" - - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" - - "{{ ansible_facts.distribution }}.yml" - - "{{ ansible_facts.os_family }}.yml" - - default.yml - paths: - - "{{ role_path }}/vars" - - - name: Include distribution specific tasks - include_tasks: "{{ lookup('first_found', params) }}" - vars: - params: - files: - - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml" - - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml" - - "{{ ansible_facts.distribution }}.yml" - - "{{ ansible_facts.os_family }}.yml" - paths: - - "{{ role_path }}/tasks" - - - name: Install Python requirements - pip: - state: present - name: "{{ docker_pip_packages | union(docker_pip_extra_packages) }}" - extra_args: "-c {{ remote_constraints }}" - notify: cleanup docker - - # Detect docker CLI, API and docker-py versions - - name: Check Docker CLI version - command: "docker version -f {% raw %}'{{.Client.Version}}'{% endraw %}" - register: docker_cli_version_stdout - ignore_errors: yes - - - name: Check Docker API version - command: "{{ ansible_python.executable }} -c 'import docker; print(docker.from_env().version()[\"ApiVersion\"])'" - register: docker_api_version_stdout - ignore_errors: yes - - - name: Check docker-py API version - command: "{{ ansible_python.executable }} -c 'import docker; print(docker.__version__)'" - register: docker_py_version_stdout - ignore_errors: yes - - - set_fact: - docker_cli_version: "{{ docker_cli_version_stdout.stdout | default('0.0') }}" - docker_api_version: "{{ docker_api_version_stdout.stdout | default('0.0') }}" - docker_py_version: "{{ docker_py_version_stdout.stdout | default('0.0') }}" - - - debug: - msg: "Docker CLI version: {{ docker_cli_version }}; Docker API version: {{ docker_api_version }}; docker-py library version: {{ docker_py_version }}" - - - block: - # Cleanup docker daemon - - name: "Remove all ansible-test-* docker containers" - shell: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker rm -f' - register: docker_containers - retries: 3 - delay: 3 - until: docker_containers is success - - - name: "Remove all ansible-test-* docker volumes" - shell: 'docker volume ls --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker volume rm -f' - register: docker_volumes - - - name: "Remove all ansible-test-* docker networks" - shell: 'docker network ls --no-trunc --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker network rm' - register: docker_networks - - - name: Cleaned docker resources - debug: - var: docker_resources - vars: - docker_resources: - containers: "{{ docker_containers.stdout_lines }}" - volumes: "{{ docker_volumes.stdout_lines }}" - networks: "{{ docker_networks.stdout_lines }}" - - # List all existing docker resources - - name: List all docker containers - command: docker ps --no-trunc -a - register: docker_containers - - - name: List all docker volumes - command: docker volume ls - register: docker_volumes - - - name: List all docker networks - command: docker network ls --no-trunc - register: docker_networks - - - name: List all docker images - command: docker images --no-trunc -a - register: docker_images - - - name: Still existing docker resources - debug: - var: docker_resources - vars: - docker_resources: - containers: "{{ docker_containers.stdout_lines }}" - volumes: "{{ docker_volumes.stdout_lines }}" - networks: "{{ docker_networks.stdout_lines }}" - images: "{{ docker_images.stdout_lines }}" - - when: docker_cli_version is version('0.0', '>') diff --git a/test/integration/targets/incidental_setup_docker/vars/Debian.yml b/test/integration/targets/incidental_setup_docker/vars/Debian.yml deleted file mode 100644 index 477bd124321..00000000000 --- a/test/integration/targets/incidental_setup_docker/vars/Debian.yml +++ /dev/null @@ -1,9 +0,0 @@ -docker_packages: - - docker-ce=5:19.03.0* - - docker-ce-cli=5:19.03.0* - -docker_prereq_packages: - - apt-transport-https - - ca-certificates - - curl - - software-properties-common diff --git a/test/integration/targets/incidental_setup_docker/vars/Fedora.yml b/test/integration/targets/incidental_setup_docker/vars/Fedora.yml deleted file mode 100644 index 9dd84344c4c..00000000000 --- a/test/integration/targets/incidental_setup_docker/vars/Fedora.yml +++ /dev/null @@ -1,5 +0,0 @@ -docker_prereq_packages: [] - -docker_packages: - - docker-ce-19.03.1 - - docker-ce-cli-19.03.1 diff --git a/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml b/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml deleted file mode 100644 index 84ba0920880..00000000000 --- a/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml +++ /dev/null @@ -1,18 +0,0 @@ -docker_prereq_packages: - - yum-utils - - device-mapper-persistent-data - - lvm2 - - libseccomp - -docker_packages: - - docker-ce-19.03.1 - - docker-ce-cli-19.03.1 - -docker_pip_extra_packages: - - requests==2.6.0 - -# We need to pin the above so pip finds the right system-installed package -# but we never want to try to remove it, so we substract this from the set of -# packages we remove on cleanup -docker_pip_never_remove: - - requests==2.6.0 diff --git a/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml b/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml deleted file mode 100644 index 823438983ef..00000000000 --- a/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml +++ /dev/null @@ -1,10 +0,0 @@ -docker_prereq_packages: - - yum-utils - - device-mapper-persistent-data - - lvm2 - - libseccomp - - iptables - -docker_packages: - - docker-ce-19.03.13 - - docker-ce-cli-19.03.13 diff --git a/test/integration/targets/incidental_setup_docker/vars/Suse.yml b/test/integration/targets/incidental_setup_docker/vars/Suse.yml deleted file mode 100644 index b740861f34b..00000000000 --- a/test/integration/targets/incidental_setup_docker/vars/Suse.yml +++ /dev/null @@ -1,2 +0,0 @@ -docker_packages: - - docker=19.03.1_ce diff --git a/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml b/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml deleted file mode 100644 index 36ab54b9d9a..00000000000 --- a/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml +++ /dev/null @@ -1,5 +0,0 @@ -docker_pip_extra_packages: - # Installing requests >=2.12.0 on Ubuntu 14.04 breaks certificate validation. We restrict to an older version - # to ensure out get_url tests work out fine. This is only an issue if pyOpenSSL is also installed. - # Not sure why RHEL7 needs this specific version - - requests==2.6.0 diff --git a/test/integration/targets/incidental_setup_docker/vars/default.yml b/test/integration/targets/incidental_setup_docker/vars/default.yml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/support/integration/plugins/inventory/docker_swarm.py b/test/support/integration/plugins/inventory/docker_swarm.py deleted file mode 100644 index d0a95ca0793..00000000000 --- a/test/support/integration/plugins/inventory/docker_swarm.py +++ /dev/null @@ -1,351 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018, Stefan Heitmueller -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' - name: docker_swarm - plugin_type: inventory - version_added: '2.8' - author: - - Stefan Heitmüller (@morph027) - short_description: Ansible dynamic inventory plugin for Docker swarm nodes. - requirements: - - python >= 2.7 - - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 - extends_documentation_fragment: - - constructed - description: - - Reads inventories from the Docker swarm API. - - Uses a YAML configuration file docker_swarm.[yml|yaml]. - - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes; - I(managers) - all manager nodes; I(leader) - the swarm leader node; - I(nonleaders) - all nodes except the swarm leader." - options: - plugin: - description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to - recognize it as it's own. - type: str - required: true - choices: docker_swarm - docker_host: - description: - - Socket of a Docker swarm manager node (C(tcp), C(unix)). - - "Use C(unix://var/run/docker.sock) to connect via local socket." - type: str - required: true - aliases: [ docker_url ] - verbose_output: - description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS), - C(EngineVersion)) - type: bool - default: yes - tls: - description: Connect using TLS without verifying the authenticity of the Docker host server. - type: bool - default: no - validate_certs: - description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker - host server. - type: bool - default: no - aliases: [ tls_verify ] - client_key: - description: Path to the client's TLS key file. - type: path - aliases: [ tls_client_key, key_path ] - ca_cert: - description: Use a CA certificate when performing server verification by providing the path to a CA - certificate file. - type: path - aliases: [ tls_ca_cert, cacert_path ] - client_cert: - description: Path to the client's TLS certificate file. - type: path - aliases: [ tls_client_cert, cert_path ] - tls_hostname: - description: When verifying the authenticity of the Docker host server, provide the expected name of - the server. - type: str - ssl_version: - description: Provide a valid SSL version number. Default value determined by ssl.py module. - type: str - api_version: - description: - - The version of the Docker API running on the Docker Host. - - Defaults to the latest version of the API supported by docker-py. - type: str - aliases: [ docker_api_version ] - timeout: - description: - - The maximum amount of time in seconds to wait on a response from the API. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) - will be used instead. If the environment variable is not set, the default value will be used. - type: int - default: 60 - aliases: [ time_out ] - include_host_uri: - description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the - swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional - modification as value of option I(docker_host) in Docker Swarm modules when connecting via API. - The port always defaults to C(2376). - type: bool - default: no - include_host_uri_port: - description: Override the detected port number included in I(ansible_host_uri) - type: int -''' - -EXAMPLES = ''' -# Minimal example using local docker -plugin: docker_swarm -docker_host: unix://var/run/docker.sock - -# Minimal example using remote docker -plugin: docker_swarm -docker_host: tcp://my-docker-host:2375 - -# Example using remote docker with unverified TLS -plugin: docker_swarm -docker_host: tcp://my-docker-host:2376 -tls: yes - -# Example using remote docker with verified TLS and client certificate verification -plugin: docker_swarm -docker_host: tcp://my-docker-host:2376 -validate_certs: yes -ca_cert: /somewhere/ca.pem -client_key: /somewhere/key.pem -client_cert: /somewhere/cert.pem - -# Example using constructed features to create groups and set ansible_host -plugin: docker_swarm -docker_host: tcp://my-docker-host:2375 -strict: False -keyed_groups: - # add e.g. x86_64 hosts to an arch_x86_64 group - - prefix: arch - key: 'Description.Platform.Architecture' - # add e.g. linux hosts to an os_linux group - - prefix: os - key: 'Description.Platform.OS' - # create a group per node label - # e.g. a node labeled w/ "production" ends up in group "label_production" - # hint: labels containing special characters will be converted to safe names - - key: 'Spec.Labels' - prefix: label -''' - -from ansible.errors import AnsibleError -from ansible.module_utils._text import to_native -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable -from ansible.parsing.utils.addresses import parse_address - -try: - import docker - from docker.errors import TLSParameterError - from docker.tls import TLSConfig - HAS_DOCKER = True -except ImportError: - HAS_DOCKER = False - - -def update_tls_hostname(result): - if result['tls_hostname'] is None: - # get default machine name from the url - parsed_url = urlparse(result['docker_host']) - if ':' in parsed_url.netloc: - result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')] - else: - result['tls_hostname'] = parsed_url - - -def _get_tls_config(fail_function, **kwargs): - try: - tls_config = TLSConfig(**kwargs) - return tls_config - except TLSParameterError as exc: - fail_function("TLS config error: %s" % exc) - - -def get_connect_params(auth, fail_function): - if auth['tls'] or auth['tls_verify']: - auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') - - if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: - # TLS with certs and host verification - if auth['cacert_path']: - tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - ca_cert=auth['cacert_path'], - verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version'], - fail_function=fail_function) - else: - tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version'], - fail_function=fail_function) - - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify'] and auth['cacert_path']: - # TLS with cacert only - tls_config = _get_tls_config(ca_cert=auth['cacert_path'], - assert_hostname=auth['tls_hostname'], - verify=True, - ssl_version=auth['ssl_version'], - fail_function=fail_function) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify']: - # TLS with verify and no certs - tls_config = _get_tls_config(verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version'], - fail_function=fail_function) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls'] and auth['cert_path'] and auth['key_path']: - # TLS with certs and no host verification - tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - verify=False, - ssl_version=auth['ssl_version'], - fail_function=fail_function) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls']: - # TLS with no certs and not host verification - tls_config = _get_tls_config(verify=False, - ssl_version=auth['ssl_version'], - fail_function=fail_function) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - # No TLS - return dict(base_url=auth['docker_host'], - version=auth['api_version'], - timeout=auth['timeout']) - - -class InventoryModule(BaseInventoryPlugin, Constructable): - ''' Host inventory parser for ansible using Docker swarm as source. ''' - - NAME = 'docker_swarm' - - def _fail(self, msg): - raise AnsibleError(msg) - - def _populate(self): - raw_params = dict( - docker_host=self.get_option('docker_host'), - tls=self.get_option('tls'), - tls_verify=self.get_option('validate_certs'), - key_path=self.get_option('client_key'), - cacert_path=self.get_option('ca_cert'), - cert_path=self.get_option('client_cert'), - tls_hostname=self.get_option('tls_hostname'), - api_version=self.get_option('api_version'), - timeout=self.get_option('timeout'), - ssl_version=self.get_option('ssl_version'), - debug=None, - ) - update_tls_hostname(raw_params) - connect_params = get_connect_params(raw_params, fail_function=self._fail) - self.client = docker.DockerClient(**connect_params) - self.inventory.add_group('all') - self.inventory.add_group('manager') - self.inventory.add_group('worker') - self.inventory.add_group('leader') - self.inventory.add_group('nonleaders') - - if self.get_option('include_host_uri'): - if self.get_option('include_host_uri_port'): - host_uri_port = str(self.get_option('include_host_uri_port')) - elif self.get_option('tls') or self.get_option('validate_certs'): - host_uri_port = '2376' - else: - host_uri_port = '2375' - - try: - self.nodes = self.client.nodes.list() - for self.node in self.nodes: - self.node_attrs = self.client.nodes.get(self.node.id).attrs - self.inventory.add_host(self.node_attrs['ID']) - self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role']) - self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', - self.node_attrs['Status']['Addr']) - if self.get_option('include_host_uri'): - self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', - 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port) - if self.get_option('verbose_output'): - self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs) - if 'ManagerStatus' in self.node_attrs: - if self.node_attrs['ManagerStatus'].get('Leader'): - # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 - # Check moby/moby#35437 for details - swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \ - self.node_attrs['Status']['Addr'] - if self.get_option('include_host_uri'): - self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', - 'tcp://' + swarm_leader_ip + ':' + host_uri_port) - self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip) - self.inventory.add_host(self.node_attrs['ID'], group='leader') - else: - self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') - else: - self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') - # Use constructed if applicable - strict = self.get_option('strict') - # Composed variables - self._set_composite_vars(self.get_option('compose'), - self.node_attrs, - self.node_attrs['ID'], - strict=strict) - # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), - self.node_attrs, - self.node_attrs['ID'], - strict=strict) - # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), - self.node_attrs, - self.node_attrs['ID'], - strict=strict) - except Exception as e: - raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' % - to_native(e)) - - def verify_file(self, path): - """Return the possibly of a file being consumable by this plugin.""" - return ( - super(InventoryModule, self).verify_file(path) and - path.endswith((self.NAME + '.yaml', self.NAME + '.yml'))) - - def parse(self, inventory, loader, path, cache=True): - if not HAS_DOCKER: - raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: ' - 'https://github.com/docker/docker-py.') - super(InventoryModule, self).parse(inventory, loader, path, cache) - self._read_config_data(path) - self._populate() diff --git a/test/support/integration/plugins/module_utils/docker/__init__.py b/test/support/integration/plugins/module_utils/docker/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/support/integration/plugins/module_utils/docker/common.py b/test/support/integration/plugins/module_utils/docker/common.py deleted file mode 100644 index 08a87702cd8..00000000000 --- a/test/support/integration/plugins/module_utils/docker/common.py +++ /dev/null @@ -1,1022 +0,0 @@ -# -# Copyright 2016 Red Hat | Ansible -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import platform -import re -import sys -from datetime import timedelta -from ansible.module_utils.compat.version import LooseVersion - - -from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib -from ansible.module_utils.common._collections_compat import Mapping, Sequence -from ansible.module_utils.six import string_types -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE - -HAS_DOCKER_PY = True -HAS_DOCKER_PY_2 = False -HAS_DOCKER_PY_3 = False -HAS_DOCKER_ERROR = None - -try: - from requests.exceptions import SSLError - from docker import __version__ as docker_version - from docker.errors import APIError, NotFound, TLSParameterError - from docker.tls import TLSConfig - from docker import auth - - if LooseVersion(docker_version) >= LooseVersion('3.0.0'): - HAS_DOCKER_PY_3 = True - from docker import APIClient as Client - elif LooseVersion(docker_version) >= LooseVersion('2.0.0'): - HAS_DOCKER_PY_2 = True - from docker import APIClient as Client - else: - from docker import Client - -except ImportError as exc: - HAS_DOCKER_ERROR = str(exc) - HAS_DOCKER_PY = False - - -# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used -# to ensure the user does not have both ``docker`` and ``docker-py`` modules -# installed, as they utilize the same namespace are are incompatible -try: - # docker (Docker SDK for Python >= 2.0.0) - import docker.models # noqa: F401 - HAS_DOCKER_MODELS = True -except ImportError: - HAS_DOCKER_MODELS = False - -try: - # docker-py (Docker SDK for Python < 2.0.0) - import docker.ssladapter # noqa: F401 - HAS_DOCKER_SSLADAPTER = True -except ImportError: - HAS_DOCKER_SSLADAPTER = False - - -try: - from requests.exceptions import RequestException -except ImportError: - # Either docker-py is no longer using requests, or docker-py isn't around either, - # or docker-py's dependency requests is missing. In any case, define an exception - # class RequestException so that our code doesn't break. - class RequestException(Exception): - pass - - -DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' -DEFAULT_TLS = False -DEFAULT_TLS_VERIFY = False -DEFAULT_TLS_HOSTNAME = 'localhost' -MIN_DOCKER_VERSION = "1.8.0" -DEFAULT_TIMEOUT_SECONDS = 60 - -DOCKER_COMMON_ARGS = dict( - docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']), - tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])), - api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']), - timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])), - ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']), - client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']), - client_key=dict(type='path', aliases=['tls_client_key', 'key_path']), - ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])), - tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])), - validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']), - debug=dict(type='bool', default=False) -) - -DOCKER_MUTUALLY_EXCLUSIVE = [] - -DOCKER_REQUIRED_TOGETHER = [ - ['client_cert', 'client_key'] -] - -DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/' -EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+' -BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] - - -if not HAS_DOCKER_PY: - docker_version = None - - # No Docker SDK for Python. Create a place holder client to allow - # instantiation of AnsibleModule and proper error handing - class Client(object): # noqa: F811 - def __init__(self, **kwargs): - pass - - class APIError(Exception): # noqa: F811 - pass - - class NotFound(Exception): # noqa: F811 - pass - - -def is_image_name_id(name): - """Check whether the given image name is in fact an image ID (hash).""" - if re.match('^sha256:[0-9a-fA-F]{64}$', name): - return True - return False - - -def is_valid_tag(tag, allow_empty=False): - """Check whether the given string is a valid docker tag name.""" - if not tag: - return allow_empty - # See here ("Extended description") for a definition what tags can be: - # https://docs.docker.com/engine/reference/commandline/tag/ - return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag)) - - -def sanitize_result(data): - """Sanitize data object for return to Ansible. - - When the data object contains types such as docker.types.containers.HostConfig, - Ansible will fail when these are returned via exit_json or fail_json. - HostConfig is derived from dict, but its constructor requires additional - arguments. This function sanitizes data structures by recursively converting - everything derived from dict to dict and everything derived from list (and tuple) - to a list. - """ - if isinstance(data, dict): - return dict((k, sanitize_result(v)) for k, v in data.items()) - elif isinstance(data, (list, tuple)): - return [sanitize_result(v) for v in data] - else: - return data - - -class DockerBaseClass(object): - - def __init__(self): - self.debug = False - - def log(self, msg, pretty_print=False): - pass - # if self.debug: - # log_file = open('docker.log', 'a') - # if pretty_print: - # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) - # log_file.write(u'\n') - # else: - # log_file.write(msg + u'\n') - - -def update_tls_hostname(result): - if result['tls_hostname'] is None: - # get default machine name from the url - parsed_url = urlparse(result['docker_host']) - if ':' in parsed_url.netloc: - result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')] - else: - result['tls_hostname'] = parsed_url - - -def _get_tls_config(fail_function, **kwargs): - try: - tls_config = TLSConfig(**kwargs) - return tls_config - except TLSParameterError as exc: - fail_function("TLS config error: %s" % exc) - - -def get_connect_params(auth, fail_function): - if auth['tls'] or auth['tls_verify']: - auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') - - if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: - # TLS with certs and host verification - if auth['cacert_path']: - tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - ca_cert=auth['cacert_path'], - verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version'], - fail_function=fail_function) - else: - tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version'], - fail_function=fail_function) - - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify'] and auth['cacert_path']: - # TLS with cacert only - tls_config = _get_tls_config(ca_cert=auth['cacert_path'], - assert_hostname=auth['tls_hostname'], - verify=True, - ssl_version=auth['ssl_version'], - fail_function=fail_function) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls_verify']: - # TLS with verify and no certs - tls_config = _get_tls_config(verify=True, - assert_hostname=auth['tls_hostname'], - ssl_version=auth['ssl_version'], - fail_function=fail_function) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls'] and auth['cert_path'] and auth['key_path']: - # TLS with certs and no host verification - tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), - verify=False, - ssl_version=auth['ssl_version'], - fail_function=fail_function) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - if auth['tls']: - # TLS with no certs and not host verification - tls_config = _get_tls_config(verify=False, - ssl_version=auth['ssl_version'], - fail_function=fail_function) - return dict(base_url=auth['docker_host'], - tls=tls_config, - version=auth['api_version'], - timeout=auth['timeout']) - - # No TLS - return dict(base_url=auth['docker_host'], - version=auth['api_version'], - timeout=auth['timeout']) - - -DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`." -DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade." -DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. " - "Hint: if you do not need Python 2.6 support, try " - "`pip uninstall docker-py` instead, followed by `pip install docker`.") - - -class AnsibleDockerClient(Client): - - def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, - required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION, - min_docker_api_version=None, option_minimal_versions=None, - option_minimal_versions_ignore_params=None, fail_results=None): - - # Modules can put information in here which will always be returned - # in case client.fail() is called. - self.fail_results = fail_results or {} - - merged_arg_spec = dict() - merged_arg_spec.update(DOCKER_COMMON_ARGS) - if argument_spec: - merged_arg_spec.update(argument_spec) - self.arg_spec = merged_arg_spec - - mutually_exclusive_params = [] - mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE - if mutually_exclusive: - mutually_exclusive_params += mutually_exclusive - - required_together_params = [] - required_together_params += DOCKER_REQUIRED_TOGETHER - if required_together: - required_together_params += required_together - - self.module = AnsibleModule( - argument_spec=merged_arg_spec, - supports_check_mode=supports_check_mode, - mutually_exclusive=mutually_exclusive_params, - required_together=required_together_params, - required_if=required_if) - - NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0')) - - self.docker_py_version = LooseVersion(docker_version) - - if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER: - self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker " - "SDK for Python) installed together as they use the same namespace and cause a corrupt " - "installation. Please uninstall both packages, and re-install only the docker-py or docker " - "python module (for %s's Python %s). It is recommended to install the docker module if no " - "support for Python 2.6 is required. Please note that simply uninstalling one of the modules " - "can leave the other module in a broken state." % (platform.node(), sys.executable)) - - if not HAS_DOCKER_PY: - if NEEDS_DOCKER_PY2: - msg = missing_required_lib("Docker SDK for Python: docker") - msg = msg + ", for example via `pip install docker`. The error was: %s" - else: - msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)") - msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s" - self.fail(msg % HAS_DOCKER_ERROR) - - if self.docker_py_version < LooseVersion(min_docker_version): - msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s." - if not NEEDS_DOCKER_PY2: - # The minimal required version is < 2.0 (and the current version as well). - # Advertise docker (instead of docker-py) for non-Python-2.6 users. - msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER - elif docker_version < LooseVersion('2.0'): - msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER - else: - msg += DOCKERPYUPGRADE_UPGRADE_DOCKER - self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version)) - - self.debug = self.module.params.get('debug') - self.check_mode = self.module.check_mode - self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail) - - try: - super(AnsibleDockerClient, self).__init__(**self._connect_params) - self.docker_api_version_str = self.version()['ApiVersion'] - except APIError as exc: - self.fail("Docker API error: %s" % exc) - except Exception as exc: - self.fail("Error connecting: %s" % exc) - - self.docker_api_version = LooseVersion(self.docker_api_version_str) - if min_docker_api_version is not None: - if self.docker_api_version < LooseVersion(min_docker_api_version): - self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) - - if option_minimal_versions is not None: - self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params) - - def log(self, msg, pretty_print=False): - pass - # if self.debug: - # log_file = open('docker.log', 'a') - # if pretty_print: - # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) - # log_file.write(u'\n') - # else: - # log_file.write(msg + u'\n') - - def fail(self, msg, **kwargs): - self.fail_results.update(kwargs) - self.module.fail_json(msg=msg, **sanitize_result(self.fail_results)) - - @staticmethod - def _get_value(param_name, param_value, env_variable, default_value): - if param_value is not None: - # take module parameter value - if param_value in BOOLEANS_TRUE: - return True - if param_value in BOOLEANS_FALSE: - return False - return param_value - - if env_variable is not None: - env_value = os.environ.get(env_variable) - if env_value is not None: - # take the env variable value - if param_name == 'cert_path': - return os.path.join(env_value, 'cert.pem') - if param_name == 'cacert_path': - return os.path.join(env_value, 'ca.pem') - if param_name == 'key_path': - return os.path.join(env_value, 'key.pem') - if env_value in BOOLEANS_TRUE: - return True - if env_value in BOOLEANS_FALSE: - return False - return env_value - - # take the default - return default_value - - @property - def auth_params(self): - # Get authentication credentials. - # Precedence: module parameters-> environment variables-> defaults. - - self.log('Getting credentials') - - params = dict() - for key in DOCKER_COMMON_ARGS: - params[key] = self.module.params.get(key) - - if self.module.params.get('use_tls'): - # support use_tls option in docker_image.py. This will be deprecated. - use_tls = self.module.params.get('use_tls') - if use_tls == 'encrypt': - params['tls'] = True - if use_tls == 'verify': - params['validate_certs'] = True - - result = dict( - docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST', - DEFAULT_DOCKER_HOST), - tls_hostname=self._get_value('tls_hostname', params['tls_hostname'], - 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME), - api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION', - 'auto'), - cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None), - cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None), - key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None), - ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None), - tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS), - tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY', - DEFAULT_TLS_VERIFY), - timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT', - DEFAULT_TIMEOUT_SECONDS), - ) - - update_tls_hostname(result) - - return result - - def _handle_ssl_error(self, error): - match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) - if match: - self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. " - "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME " - "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by " - "setting the `tls` parameter to true." - % (self.auth_params['tls_hostname'], match.group(1), match.group(1))) - self.fail("SSL Exception: %s" % (error)) - - def _get_minimal_versions(self, option_minimal_versions, ignore_params=None): - self.option_minimal_versions = dict() - for option in self.module.argument_spec: - if ignore_params is not None: - if option in ignore_params: - continue - self.option_minimal_versions[option] = dict() - self.option_minimal_versions.update(option_minimal_versions) - - for option, data in self.option_minimal_versions.items(): - # Test whether option is supported, and store result - support_docker_py = True - support_docker_api = True - if 'docker_py_version' in data: - support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version']) - if 'docker_api_version' in data: - support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version']) - data['supported'] = support_docker_py and support_docker_api - # Fail if option is not supported but used - if not data['supported']: - # Test whether option is specified - if 'detect_usage' in data: - used = data['detect_usage'](self) - else: - used = self.module.params.get(option) is not None - if used and 'default' in self.module.argument_spec[option]: - used = self.module.params[option] != self.module.argument_spec[option]['default'] - if used: - # If the option is used, compose error message. - if 'usage_msg' in data: - usg = data['usage_msg'] - else: - usg = 'set %s option' % (option, ) - if not support_docker_api: - msg = 'Docker API version is %s. Minimum version required is %s to %s.' - msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg) - elif not support_docker_py: - msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. " - if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'): - msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER - elif self.docker_py_version < LooseVersion('2.0.0'): - msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER - else: - msg += DOCKERPYUPGRADE_UPGRADE_DOCKER - msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg) - else: - # should not happen - msg = 'Cannot %s with your configuration.' % (usg, ) - self.fail(msg) - - def get_container_by_id(self, container_id): - try: - self.log("Inspecting container Id %s" % container_id) - result = self.inspect_container(container=container_id) - self.log("Completed container inspection") - return result - except NotFound as dummy: - return None - except Exception as exc: - self.fail("Error inspecting container: %s" % exc) - - def get_container(self, name=None): - ''' - Lookup a container and return the inspection results. - ''' - if name is None: - return None - - search_name = name - if not name.startswith('/'): - search_name = '/' + name - - result = None - try: - for container in self.containers(all=True): - self.log("testing container: %s" % (container['Names'])) - if isinstance(container['Names'], list) and search_name in container['Names']: - result = container - break - if container['Id'].startswith(name): - result = container - break - if container['Id'] == name: - result = container - break - except SSLError as exc: - self._handle_ssl_error(exc) - except Exception as exc: - self.fail("Error retrieving container list: %s" % exc) - - if result is None: - return None - - return self.get_container_by_id(result['Id']) - - def get_network(self, name=None, network_id=None): - ''' - Lookup a network and return the inspection results. - ''' - if name is None and network_id is None: - return None - - result = None - - if network_id is None: - try: - for network in self.networks(): - self.log("testing network: %s" % (network['Name'])) - if name == network['Name']: - result = network - break - if network['Id'].startswith(name): - result = network - break - except SSLError as exc: - self._handle_ssl_error(exc) - except Exception as exc: - self.fail("Error retrieving network list: %s" % exc) - - if result is not None: - network_id = result['Id'] - - if network_id is not None: - try: - self.log("Inspecting network Id %s" % network_id) - result = self.inspect_network(network_id) - self.log("Completed network inspection") - except NotFound as dummy: - return None - except Exception as exc: - self.fail("Error inspecting network: %s" % exc) - - return result - - def find_image(self, name, tag): - ''' - Lookup an image (by name and tag) and return the inspection results. - ''' - if not name: - return None - - self.log("Find image %s:%s" % (name, tag)) - images = self._image_lookup(name, tag) - if not images: - # In API <= 1.20 seeing 'docker.io/' as the name of images pulled from docker hub - registry, repo_name = auth.resolve_repository_name(name) - if registry == 'docker.io': - # If docker.io is explicitly there in name, the image - # isn't found in some cases (#41509) - self.log("Check for docker.io image: %s" % repo_name) - images = self._image_lookup(repo_name, tag) - if not images and repo_name.startswith('library/'): - # Sometimes library/xxx images are not found - lookup = repo_name[len('library/'):] - self.log("Check for docker.io image: %s" % lookup) - images = self._image_lookup(lookup, tag) - if not images: - # Last case: if docker.io wasn't there, it can be that - # the image wasn't found either (#15586) - lookup = "%s/%s" % (registry, repo_name) - self.log("Check for docker.io image: %s" % lookup) - images = self._image_lookup(lookup, tag) - - if len(images) > 1: - self.fail("Registry returned more than one result for %s:%s" % (name, tag)) - - if len(images) == 1: - try: - inspection = self.inspect_image(images[0]['Id']) - except Exception as exc: - self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc))) - return inspection - - self.log("Image %s:%s not found." % (name, tag)) - return None - - def find_image_by_id(self, image_id): - ''' - Lookup an image (by ID) and return the inspection results. - ''' - if not image_id: - return None - - self.log("Find image %s (by ID)" % image_id) - try: - inspection = self.inspect_image(image_id) - except Exception as exc: - self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc))) - return inspection - - def _image_lookup(self, name, tag): - ''' - Including a tag in the name parameter sent to the Docker SDK for Python images method - does not work consistently. Instead, get the result set for name and manually check - if the tag exists. - ''' - try: - response = self.images(name=name) - except Exception as exc: - self.fail("Error searching for image %s - %s" % (name, str(exc))) - images = response - if tag: - lookup = "%s:%s" % (name, tag) - lookup_digest = "%s@%s" % (name, tag) - images = [] - for image in response: - tags = image.get('RepoTags') - digests = image.get('RepoDigests') - if (tags and lookup in tags) or (digests and lookup_digest in digests): - images = [image] - break - return images - - def pull_image(self, name, tag="latest"): - ''' - Pull an image - ''' - self.log("Pulling image %s:%s" % (name, tag)) - old_tag = self.find_image(name, tag) - try: - for line in self.pull(name, tag=tag, stream=True, decode=True): - self.log(line, pretty_print=True) - if line.get('error'): - if line.get('errorDetail'): - error_detail = line.get('errorDetail') - self.fail("Error pulling %s - code: %s message: %s" % (name, - error_detail.get('code'), - error_detail.get('message'))) - else: - self.fail("Error pulling %s - %s" % (name, line.get('error'))) - except Exception as exc: - self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc))) - - new_tag = self.find_image(name, tag) - - return new_tag, old_tag == new_tag - - def report_warnings(self, result, warnings_key=None): - ''' - Checks result of client operation for warnings, and if present, outputs them. - - warnings_key should be a list of keys used to crawl the result dictionary. - For example, if warnings_key == ['a', 'b'], the function will consider - result['a']['b'] if these keys exist. If the result is a non-empty string, it - will be reported as a warning. If the result is a list, every entry will be - reported as a warning. - - In most cases (if warnings are returned at all), warnings_key should be - ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings']. - ''' - if warnings_key is None: - warnings_key = ['Warnings'] - for key in warnings_key: - if not isinstance(result, Mapping): - return - result = result.get(key) - if isinstance(result, Sequence): - for warning in result: - self.module.warn('Docker warning: {0}'.format(warning)) - elif isinstance(result, string_types) and result: - self.module.warn('Docker warning: {0}'.format(result)) - - def inspect_distribution(self, image, **kwargs): - ''' - Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0 - since prior versions did not support accessing private repositories. - ''' - if self.docker_py_version < LooseVersion('4.0.0'): - registry = auth.resolve_repository_name(image)[0] - header = auth.get_config_header(self, registry) - if header: - return self._result(self._get( - self._url('/distribution/{0}/json', image), - headers={'X-Registry-Auth': header} - ), json=True) - return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs) - - -def compare_dict_allow_more_present(av, bv): - ''' - Compare two dictionaries for whether every entry of the first is in the second. - ''' - for key, value in av.items(): - if key not in bv: - return False - if bv[key] != value: - return False - return True - - -def compare_generic(a, b, method, datatype): - ''' - Compare values a and b as described by method and datatype. - - Returns ``True`` if the values compare equal, and ``False`` if not. - - ``a`` is usually the module's parameter, while ``b`` is a property - of the current object. ``a`` must not be ``None`` (except for - ``datatype == 'value'``). - - Valid values for ``method`` are: - - ``ignore`` (always compare as equal); - - ``strict`` (only compare if really equal) - - ``allow_more_present`` (allow b to have elements which a does not have). - - Valid values for ``datatype`` are: - - ``value``: for simple values (strings, numbers, ...); - - ``list``: for ``list``s or ``tuple``s where order matters; - - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not - matter; - - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does - not matter and which contain ``dict``s; ``allow_more_present`` is used - for the ``dict``s, and these are assumed to be dictionaries of values; - - ``dict``: for dictionaries of values. - ''' - if method == 'ignore': - return True - # If a or b is None: - if a is None or b is None: - # If both are None: equality - if a == b: - return True - # Otherwise, not equal for values, and equal - # if the other is empty for set/list/dict - if datatype == 'value': - return False - # For allow_more_present, allow a to be None - if method == 'allow_more_present' and a is None: - return True - # Otherwise, the iterable object which is not None must have length 0 - return len(b if a is None else a) == 0 - # Do proper comparison (both objects not None) - if datatype == 'value': - return a == b - elif datatype == 'list': - if method == 'strict': - return a == b - else: - i = 0 - for v in a: - while i < len(b) and b[i] != v: - i += 1 - if i == len(b): - return False - i += 1 - return True - elif datatype == 'dict': - if method == 'strict': - return a == b - else: - return compare_dict_allow_more_present(a, b) - elif datatype == 'set': - set_a = set(a) - set_b = set(b) - if method == 'strict': - return set_a == set_b - else: - return set_b >= set_a - elif datatype == 'set(dict)': - for av in a: - found = False - for bv in b: - if compare_dict_allow_more_present(av, bv): - found = True - break - if not found: - return False - if method == 'strict': - # If we would know that both a and b do not contain duplicates, - # we could simply compare len(a) to len(b) to finish this test. - # We can assume that b has no duplicates (as it is returned by - # docker), but we don't know for a. - for bv in b: - found = False - for av in a: - if compare_dict_allow_more_present(av, bv): - found = True - break - if not found: - return False - return True - - -class DifferenceTracker(object): - def __init__(self): - self._diff = [] - - def add(self, name, parameter=None, active=None): - self._diff.append(dict( - name=name, - parameter=parameter, - active=active, - )) - - def merge(self, other_tracker): - self._diff.extend(other_tracker._diff) - - @property - def empty(self): - return len(self._diff) == 0 - - def get_before_after(self): - ''' - Return texts ``before`` and ``after``. - ''' - before = dict() - after = dict() - for item in self._diff: - before[item['name']] = item['active'] - after[item['name']] = item['parameter'] - return before, after - - def has_difference_for(self, name): - ''' - Returns a boolean if a difference exists for name - ''' - return any(diff for diff in self._diff if diff['name'] == name) - - def get_legacy_docker_container_diffs(self): - ''' - Return differences in the docker_container legacy format. - ''' - result = [] - for entry in self._diff: - item = dict() - item[entry['name']] = dict( - parameter=entry['parameter'], - container=entry['active'], - ) - result.append(item) - return result - - def get_legacy_docker_diffs(self): - ''' - Return differences in the docker_container legacy format. - ''' - result = [entry['name'] for entry in self._diff] - return result - - -def clean_dict_booleans_for_docker_api(data): - ''' - Go doesn't like Python booleans 'True' or 'False', while Ansible is just - fine with them in YAML. As such, they need to be converted in cases where - we pass dictionaries to the Docker API (e.g. docker_network's - driver_options and docker_prune's filters). - ''' - result = dict() - if data is not None: - for k, v in data.items(): - if v is True: - v = 'true' - elif v is False: - v = 'false' - else: - v = str(v) - result[str(k)] = v - return result - - -def convert_duration_to_nanosecond(time_str): - """ - Return time duration in nanosecond. - """ - if not isinstance(time_str, str): - raise ValueError('Missing unit in duration - %s' % time_str) - - regex = re.compile( - r'^(((?P\d+)h)?' - r'((?P\d+)m(?!s))?' - r'((?P\d+)s)?' - r'((?P\d+)ms)?' - r'((?P\d+)us)?)$' - ) - parts = regex.match(time_str) - - if not parts: - raise ValueError('Invalid time duration - %s' % time_str) - - parts = parts.groupdict() - time_params = {} - for (name, value) in parts.items(): - if value: - time_params[name] = int(value) - - delta = timedelta(**time_params) - time_in_nanoseconds = ( - delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6 - ) * 10 ** 3 - - return time_in_nanoseconds - - -def parse_healthcheck(healthcheck): - """ - Return dictionary of healthcheck parameters and boolean if - healthcheck defined in image was requested to be disabled. - """ - if (not healthcheck) or (not healthcheck.get('test')): - return None, None - - result = dict() - - # All supported healthcheck parameters - options = dict( - test='test', - interval='interval', - timeout='timeout', - start_period='start_period', - retries='retries' - ) - - duration_options = ['interval', 'timeout', 'start_period'] - - for (key, value) in options.items(): - if value in healthcheck: - if healthcheck.get(value) is None: - # due to recursive argument_spec, all keys are always present - # (but have default value None if not specified) - continue - if value in duration_options: - time = convert_duration_to_nanosecond(healthcheck.get(value)) - if time: - result[key] = time - elif healthcheck.get(value): - result[key] = healthcheck.get(value) - if key == 'test': - if isinstance(result[key], (tuple, list)): - result[key] = [str(e) for e in result[key]] - else: - result[key] = ['CMD-SHELL', str(result[key])] - elif key == 'retries': - try: - result[key] = int(result[key]) - except ValueError: - raise ValueError( - 'Cannot parse number of retries for healthcheck. ' - 'Expected an integer, got "{0}".'.format(result[key]) - ) - - if result['test'] == ['NONE']: - # If the user explicitly disables the healthcheck, return None - # as the healthcheck object, and set disable_healthcheck to True - return None, True - - return result, False - - -def omit_none_from_dict(d): - """ - Return a copy of the dictionary with all keys with value None omitted. - """ - return dict((k, v) for (k, v) in d.items() if v is not None) diff --git a/test/support/integration/plugins/module_utils/docker/swarm.py b/test/support/integration/plugins/module_utils/docker/swarm.py deleted file mode 100644 index 55d94db06bb..00000000000 --- a/test/support/integration/plugins/module_utils/docker/swarm.py +++ /dev/null @@ -1,280 +0,0 @@ -# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) -# (c) Thierry Bouvet (@tbouvet) -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import json -from time import sleep - -try: - from docker.errors import APIError, NotFound -except ImportError: - # missing Docker SDK for Python handled in ansible.module_utils.docker.common - pass - -from ansible.module_utils._text import to_native -from ansible.module_utils.docker.common import ( - AnsibleDockerClient, - LooseVersion, -) - - -class AnsibleDockerSwarmClient(AnsibleDockerClient): - - def __init__(self, **kwargs): - super(AnsibleDockerSwarmClient, self).__init__(**kwargs) - - def get_swarm_node_id(self): - """ - Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID - of Docker host the module is executed on - :return: - NodeID of host or 'None' if not part of Swarm - """ - - try: - info = self.info() - except APIError as exc: - self.fail("Failed to get node information for %s" % to_native(exc)) - - if info: - json_str = json.dumps(info, ensure_ascii=False) - swarm_info = json.loads(json_str) - if swarm_info['Swarm']['NodeID']: - return swarm_info['Swarm']['NodeID'] - return None - - def check_if_swarm_node(self, node_id=None): - """ - Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host - system information looking if specific key in output exists. If 'node_id' is provided then it tries to - read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if - it is not executed on Swarm manager - - :param node_id: Node identifier - :return: - bool: True if node is part of Swarm, False otherwise - """ - - if node_id is None: - try: - info = self.info() - except APIError: - self.fail("Failed to get host information.") - - if info: - json_str = json.dumps(info, ensure_ascii=False) - swarm_info = json.loads(json_str) - if swarm_info['Swarm']['NodeID']: - return True - if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'): - return True - return False - else: - try: - node_info = self.get_node_inspect(node_id=node_id) - except APIError: - return - - if node_info['ID'] is not None: - return True - return False - - def check_if_swarm_manager(self): - """ - Checks if node role is set as Manager in Swarm. The node is the docker host on which module action - is performed. The inspect_swarm() will fail if node is not a manager - - :return: True if node is Swarm Manager, False otherwise - """ - - try: - self.inspect_swarm() - return True - except APIError: - return False - - def fail_task_if_not_swarm_manager(self): - """ - If host is not a swarm manager then Ansible task on this host should end with 'failed' state - """ - if not self.check_if_swarm_manager(): - self.fail("Error running docker swarm module: must run on swarm manager node") - - def check_if_swarm_worker(self): - """ - Checks if node role is set as Worker in Swarm. The node is the docker host on which module action - is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node() - - :return: True if node is Swarm Worker, False otherwise - """ - - if self.check_if_swarm_node() and not self.check_if_swarm_manager(): - return True - return False - - def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1): - """ - Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about - node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or - host that is not part of Swarm it will fail the playbook - - :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once - :param node_id: node ID or name, if None then method will try to get node_id of host module run on - :return: - True if node is part of swarm but its state is down, False otherwise - """ - - if repeat_check < 1: - repeat_check = 1 - - if node_id is None: - node_id = self.get_swarm_node_id() - - for retry in range(0, repeat_check): - if retry > 0: - sleep(5) - node_info = self.get_node_inspect(node_id=node_id) - if node_info['Status']['State'] == 'down': - return True - return False - - def get_node_inspect(self, node_id=None, skip_missing=False): - """ - Returns Swarm node info as in 'docker node inspect' command about single node - - :param skip_missing: if True then function will return None instead of failing the task - :param node_id: node ID or name, if None then method will try to get node_id of host module run on - :return: - Single node information structure - """ - - if node_id is None: - node_id = self.get_swarm_node_id() - - if node_id is None: - self.fail("Failed to get node information.") - - try: - node_info = self.inspect_node(node_id=node_id) - except APIError as exc: - if exc.status_code == 503: - self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") - if exc.status_code == 404: - if skip_missing: - return None - self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) - except Exception as exc: - self.fail("Error inspecting swarm node: %s" % exc) - - json_str = json.dumps(node_info, ensure_ascii=False) - node_info = json.loads(json_str) - - if 'ManagerStatus' in node_info: - if node_info['ManagerStatus'].get('Leader'): - # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 - # Check moby/moby#35437 for details - count_colons = node_info['ManagerStatus']['Addr'].count(":") - if count_colons == 1: - swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr'] - else: - swarm_leader_ip = node_info['Status']['Addr'] - node_info['Status']['Addr'] = swarm_leader_ip - return node_info - - def get_all_nodes_inspect(self): - """ - Returns Swarm node info as in 'docker node inspect' command about all registered nodes - - :return: - Structure with information about all nodes - """ - try: - node_info = self.nodes() - except APIError as exc: - if exc.status_code == 503: - self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") - self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) - except Exception as exc: - self.fail("Error inspecting swarm node: %s" % exc) - - json_str = json.dumps(node_info, ensure_ascii=False) - node_info = json.loads(json_str) - return node_info - - def get_all_nodes_list(self, output='short'): - """ - Returns list of nodes registered in Swarm - - :param output: Defines format of returned data - :return: - If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm, - if 'output' is 'long' then returns data is list of dict containing the attributes as in - output of command 'docker node ls' - """ - nodes_list = [] - - nodes_inspect = self.get_all_nodes_inspect() - if nodes_inspect is None: - return None - - if output == 'short': - for node in nodes_inspect: - nodes_list.append(node['Description']['Hostname']) - elif output == 'long': - for node in nodes_inspect: - node_property = {} - - node_property.update({'ID': node['ID']}) - node_property.update({'Hostname': node['Description']['Hostname']}) - node_property.update({'Status': node['Status']['State']}) - node_property.update({'Availability': node['Spec']['Availability']}) - if 'ManagerStatus' in node: - if node['ManagerStatus']['Leader'] is True: - node_property.update({'Leader': True}) - node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']}) - node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']}) - - nodes_list.append(node_property) - else: - return None - - return nodes_list - - def get_node_name_by_id(self, nodeid): - return self.get_node_inspect(nodeid)['Description']['Hostname'] - - def get_unlock_key(self): - if self.docker_py_version < LooseVersion('2.7.0'): - return None - return super(AnsibleDockerSwarmClient, self).get_unlock_key() - - def get_service_inspect(self, service_id, skip_missing=False): - """ - Returns Swarm service info as in 'docker service inspect' command about single service - - :param service_id: service ID or name - :param skip_missing: if True then function will return None instead of failing the task - :return: - Single service information structure - """ - try: - service_info = self.inspect_service(service_id) - except NotFound as exc: - if skip_missing is False: - self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) - else: - return None - except APIError as exc: - if exc.status_code == 503: - self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager") - self.fail("Error inspecting swarm service: %s" % exc) - except Exception as exc: - self.fail("Error inspecting swarm service: %s" % exc) - - json_str = json.dumps(service_info, ensure_ascii=False) - service_info = json.loads(json_str) - return service_info diff --git a/test/support/integration/plugins/modules/docker_swarm.py b/test/support/integration/plugins/modules/docker_swarm.py deleted file mode 100644 index c025b8dc20c..00000000000 --- a/test/support/integration/plugins/modules/docker_swarm.py +++ /dev/null @@ -1,676 +0,0 @@ -#!/usr/bin/python - -# Copyright 2016 Red Hat | Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} - -DOCUMENTATION = ''' ---- -module: docker_swarm -short_description: Manage Swarm cluster -version_added: "2.7" -description: - - Create a new Swarm cluster. - - Add/Remove nodes or managers to an existing cluster. -options: - advertise_addr: - description: - - Externally reachable address advertised to other nodes. - - This can either be an address/port combination - in the form C(192.168.1.1:4567), or an interface followed by a - port number, like C(eth0:4567). - - If the port number is omitted, - the port number from the listen address is used. - - If I(advertise_addr) is not specified, it will be automatically - detected when possible. - - Only used when swarm is initialised or joined. Because of this it's not - considered for idempotency checking. - type: str - default_addr_pool: - description: - - Default address pool in CIDR format. - - Only used when swarm is initialised. Because of this it's not considered - for idempotency checking. - - Requires API version >= 1.39. - type: list - elements: str - version_added: "2.8" - subnet_size: - description: - - Default address pool subnet mask length. - - Only used when swarm is initialised. Because of this it's not considered - for idempotency checking. - - Requires API version >= 1.39. - type: int - version_added: "2.8" - listen_addr: - description: - - Listen address used for inter-manager communication. - - This can either be an address/port combination in the form - C(192.168.1.1:4567), or an interface followed by a port number, - like C(eth0:4567). - - If the port number is omitted, the default swarm listening port - is used. - - Only used when swarm is initialised or joined. Because of this it's not - considered for idempotency checking. - type: str - default: 0.0.0.0:2377 - force: - description: - - Use with state C(present) to force creating a new Swarm, even if already part of one. - - Use with state C(absent) to Leave the swarm even if this node is a manager. - type: bool - default: no - state: - description: - - Set to C(present), to create/update a new cluster. - - Set to C(join), to join an existing cluster. - - Set to C(absent), to leave an existing cluster. - - Set to C(remove), to remove an absent node from the cluster. - Note that removing requires Docker SDK for Python >= 2.4.0. - - Set to C(inspect) to display swarm informations. - type: str - default: present - choices: - - present - - join - - absent - - remove - - inspect - node_id: - description: - - Swarm id of the node to remove. - - Used with I(state=remove). - type: str - join_token: - description: - - Swarm token used to join a swarm cluster. - - Used with I(state=join). - type: str - remote_addrs: - description: - - Remote address of one or more manager nodes of an existing Swarm to connect to. - - Used with I(state=join). - type: list - elements: str - task_history_retention_limit: - description: - - Maximum number of tasks history stored. - - Docker default value is C(5). - type: int - snapshot_interval: - description: - - Number of logs entries between snapshot. - - Docker default value is C(10000). - type: int - keep_old_snapshots: - description: - - Number of snapshots to keep beyond the current snapshot. - - Docker default value is C(0). - type: int - log_entries_for_slow_followers: - description: - - Number of log entries to keep around to sync up slow followers after a snapshot is created. - type: int - heartbeat_tick: - description: - - Amount of ticks (in seconds) between each heartbeat. - - Docker default value is C(1s). - type: int - election_tick: - description: - - Amount of ticks (in seconds) needed without a leader to trigger a new election. - - Docker default value is C(10s). - type: int - dispatcher_heartbeat_period: - description: - - The delay for an agent to send a heartbeat to the dispatcher. - - Docker default value is C(5s). - type: int - node_cert_expiry: - description: - - Automatic expiry for nodes certificates. - - Docker default value is C(3months). - type: int - name: - description: - - The name of the swarm. - type: str - labels: - description: - - User-defined key/value metadata. - - Label operations in this module apply to the docker swarm cluster. - Use M(docker_node) module to add/modify/remove swarm node labels. - - Requires API version >= 1.32. - type: dict - signing_ca_cert: - description: - - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. - - This must not be a path to a certificate, but the contents of the certificate. - - Requires API version >= 1.30. - type: str - signing_ca_key: - description: - - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. - - This must not be a path to a key, but the contents of the key. - - Requires API version >= 1.30. - type: str - ca_force_rotate: - description: - - An integer whose purpose is to force swarm to generate a new signing CA certificate and key, - if none have been specified. - - Docker default value is C(0). - - Requires API version >= 1.30. - type: int - autolock_managers: - description: - - If set, generate a key and use it to lock data stored on the managers. - - Docker default value is C(no). - - M(docker_swarm_info) can be used to retrieve the unlock key. - type: bool - rotate_worker_token: - description: Rotate the worker join token. - type: bool - default: no - rotate_manager_token: - description: Rotate the manager join token. - type: bool - default: no -extends_documentation_fragment: - - docker - - docker.docker_py_1_documentation -requirements: - - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)" - - Docker API >= 1.25 -author: - - Thierry Bouvet (@tbouvet) - - Piotr Wojciechowski (@WojciechowskiPiotr) -''' - -EXAMPLES = ''' - -- name: Init a new swarm with default parameters - docker_swarm: - state: present - -- name: Update swarm configuration - docker_swarm: - state: present - election_tick: 5 - -- name: Add nodes - docker_swarm: - state: join - advertise_addr: 192.168.1.2 - join_token: SWMTKN-1--xxxxx - remote_addrs: [ '192.168.1.1:2377' ] - -- name: Leave swarm for a node - docker_swarm: - state: absent - -- name: Remove a swarm manager - docker_swarm: - state: absent - force: true - -- name: Remove node from swarm - docker_swarm: - state: remove - node_id: mynode - -- name: Inspect swarm - docker_swarm: - state: inspect - register: swarm_info -''' - -RETURN = ''' -swarm_facts: - description: Informations about swarm. - returned: success - type: dict - contains: - JoinTokens: - description: Tokens to connect to the Swarm. - returned: success - type: dict - contains: - Worker: - description: Token to create a new *worker* node - returned: success - type: str - example: SWMTKN-1--xxxxx - Manager: - description: Token to create a new *manager* node - returned: success - type: str - example: SWMTKN-1--xxxxx - UnlockKey: - description: The swarm unlock-key if I(autolock_managers) is C(true). - returned: on success if I(autolock_managers) is C(true) - and swarm is initialised, or if I(autolock_managers) has changed. - type: str - example: SWMKEY-1-xxx - -actions: - description: Provides the actions done on the swarm. - returned: when action failed. - type: list - elements: str - example: "['This cluster is already a swarm cluster']" - -''' - -import json -import traceback - -try: - from docker.errors import DockerException, APIError -except ImportError: - # missing Docker SDK for Python handled in ansible.module_utils.docker.common - pass - -from ansible.module_utils.docker.common import ( - DockerBaseClass, - DifferenceTracker, - RequestException, -) - -from ansible.module_utils.docker.swarm import AnsibleDockerSwarmClient - -from ansible.module_utils._text import to_native - - -class TaskParameters(DockerBaseClass): - def __init__(self): - super(TaskParameters, self).__init__() - - self.advertise_addr = None - self.listen_addr = None - self.remote_addrs = None - self.join_token = None - - # Spec - self.snapshot_interval = None - self.task_history_retention_limit = None - self.keep_old_snapshots = None - self.log_entries_for_slow_followers = None - self.heartbeat_tick = None - self.election_tick = None - self.dispatcher_heartbeat_period = None - self.node_cert_expiry = None - self.name = None - self.labels = None - self.log_driver = None - self.signing_ca_cert = None - self.signing_ca_key = None - self.ca_force_rotate = None - self.autolock_managers = None - self.rotate_worker_token = None - self.rotate_manager_token = None - self.default_addr_pool = None - self.subnet_size = None - - @staticmethod - def from_ansible_params(client): - result = TaskParameters() - for key, value in client.module.params.items(): - if key in result.__dict__: - setattr(result, key, value) - - result.update_parameters(client) - return result - - def update_from_swarm_info(self, swarm_info): - spec = swarm_info['Spec'] - - ca_config = spec.get('CAConfig') or dict() - if self.node_cert_expiry is None: - self.node_cert_expiry = ca_config.get('NodeCertExpiry') - if self.ca_force_rotate is None: - self.ca_force_rotate = ca_config.get('ForceRotate') - - dispatcher = spec.get('Dispatcher') or dict() - if self.dispatcher_heartbeat_period is None: - self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod') - - raft = spec.get('Raft') or dict() - if self.snapshot_interval is None: - self.snapshot_interval = raft.get('SnapshotInterval') - if self.keep_old_snapshots is None: - self.keep_old_snapshots = raft.get('KeepOldSnapshots') - if self.heartbeat_tick is None: - self.heartbeat_tick = raft.get('HeartbeatTick') - if self.log_entries_for_slow_followers is None: - self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers') - if self.election_tick is None: - self.election_tick = raft.get('ElectionTick') - - orchestration = spec.get('Orchestration') or dict() - if self.task_history_retention_limit is None: - self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit') - - encryption_config = spec.get('EncryptionConfig') or dict() - if self.autolock_managers is None: - self.autolock_managers = encryption_config.get('AutoLockManagers') - - if self.name is None: - self.name = spec['Name'] - - if self.labels is None: - self.labels = spec.get('Labels') or {} - - if 'LogDriver' in spec['TaskDefaults']: - self.log_driver = spec['TaskDefaults']['LogDriver'] - - def update_parameters(self, client): - assign = dict( - snapshot_interval='snapshot_interval', - task_history_retention_limit='task_history_retention_limit', - keep_old_snapshots='keep_old_snapshots', - log_entries_for_slow_followers='log_entries_for_slow_followers', - heartbeat_tick='heartbeat_tick', - election_tick='election_tick', - dispatcher_heartbeat_period='dispatcher_heartbeat_period', - node_cert_expiry='node_cert_expiry', - name='name', - labels='labels', - signing_ca_cert='signing_ca_cert', - signing_ca_key='signing_ca_key', - ca_force_rotate='ca_force_rotate', - autolock_managers='autolock_managers', - log_driver='log_driver', - ) - params = dict() - for dest, source in assign.items(): - if not client.option_minimal_versions[source]['supported']: - continue - value = getattr(self, source) - if value is not None: - params[dest] = value - self.spec = client.create_swarm_spec(**params) - - def compare_to_active(self, other, client, differences): - for k in self.__dict__: - if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token', - 'rotate_worker_token', 'rotate_manager_token', 'spec', - 'default_addr_pool', 'subnet_size'): - continue - if not client.option_minimal_versions[k]['supported']: - continue - value = getattr(self, k) - if value is None: - continue - other_value = getattr(other, k) - if value != other_value: - differences.add(k, parameter=value, active=other_value) - if self.rotate_worker_token: - differences.add('rotate_worker_token', parameter=True, active=False) - if self.rotate_manager_token: - differences.add('rotate_manager_token', parameter=True, active=False) - return differences - - -class SwarmManager(DockerBaseClass): - - def __init__(self, client, results): - - super(SwarmManager, self).__init__() - - self.client = client - self.results = results - self.check_mode = self.client.check_mode - self.swarm_info = {} - - self.state = client.module.params['state'] - self.force = client.module.params['force'] - self.node_id = client.module.params['node_id'] - - self.differences = DifferenceTracker() - self.parameters = TaskParameters.from_ansible_params(client) - - self.created = False - - def __call__(self): - choice_map = { - "present": self.init_swarm, - "join": self.join, - "absent": self.leave, - "remove": self.remove, - "inspect": self.inspect_swarm - } - - choice_map.get(self.state)() - - if self.client.module._diff or self.parameters.debug: - diff = dict() - diff['before'], diff['after'] = self.differences.get_before_after() - self.results['diff'] = diff - - def inspect_swarm(self): - try: - data = self.client.inspect_swarm() - json_str = json.dumps(data, ensure_ascii=False) - self.swarm_info = json.loads(json_str) - - self.results['changed'] = False - self.results['swarm_facts'] = self.swarm_info - - unlock_key = self.get_unlock_key() - self.swarm_info.update(unlock_key) - except APIError: - return - - def get_unlock_key(self): - default = {'UnlockKey': None} - if not self.has_swarm_lock_changed(): - return default - try: - return self.client.get_unlock_key() or default - except APIError: - return default - - def has_swarm_lock_changed(self): - return self.parameters.autolock_managers and ( - self.created or self.differences.has_difference_for('autolock_managers') - ) - - def init_swarm(self): - if not self.force and self.client.check_if_swarm_manager(): - self.__update_swarm() - return - - if not self.check_mode: - init_arguments = { - 'advertise_addr': self.parameters.advertise_addr, - 'listen_addr': self.parameters.listen_addr, - 'force_new_cluster': self.force, - 'swarm_spec': self.parameters.spec, - } - if self.parameters.default_addr_pool is not None: - init_arguments['default_addr_pool'] = self.parameters.default_addr_pool - if self.parameters.subnet_size is not None: - init_arguments['subnet_size'] = self.parameters.subnet_size - try: - self.client.init_swarm(**init_arguments) - except APIError as exc: - self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc)) - - if not self.client.check_if_swarm_manager(): - if not self.check_mode: - self.client.fail("Swarm not created or other error!") - - self.created = True - self.inspect_swarm() - self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID'))) - self.differences.add('state', parameter='present', active='absent') - self.results['changed'] = True - self.results['swarm_facts'] = { - 'JoinTokens': self.swarm_info.get('JoinTokens'), - 'UnlockKey': self.swarm_info.get('UnlockKey') - } - - def __update_swarm(self): - try: - self.inspect_swarm() - version = self.swarm_info['Version']['Index'] - self.parameters.update_from_swarm_info(self.swarm_info) - old_parameters = TaskParameters() - old_parameters.update_from_swarm_info(self.swarm_info) - self.parameters.compare_to_active(old_parameters, self.client, self.differences) - if self.differences.empty: - self.results['actions'].append("No modification") - self.results['changed'] = False - return - update_parameters = TaskParameters.from_ansible_params(self.client) - update_parameters.update_parameters(self.client) - if not self.check_mode: - self.client.update_swarm( - version=version, swarm_spec=update_parameters.spec, - rotate_worker_token=self.parameters.rotate_worker_token, - rotate_manager_token=self.parameters.rotate_manager_token) - except APIError as exc: - self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc)) - return - - self.inspect_swarm() - self.results['actions'].append("Swarm cluster updated") - self.results['changed'] = True - - def join(self): - if self.client.check_if_swarm_node(): - self.results['actions'].append("This node is already part of a swarm.") - return - if not self.check_mode: - try: - self.client.join_swarm( - remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token, - listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr) - except APIError as exc: - self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc)) - self.results['actions'].append("New node is added to swarm cluster") - self.differences.add('joined', parameter=True, active=False) - self.results['changed'] = True - - def leave(self): - if not self.client.check_if_swarm_node(): - self.results['actions'].append("This node is not part of a swarm.") - return - if not self.check_mode: - try: - self.client.leave_swarm(force=self.force) - except APIError as exc: - self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc)) - self.results['actions'].append("Node has left the swarm cluster") - self.differences.add('joined', parameter='absent', active='present') - self.results['changed'] = True - - def remove(self): - if not self.client.check_if_swarm_manager(): - self.client.fail("This node is not a manager.") - - try: - status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5) - except APIError: - return - - if not status_down: - self.client.fail("Can not remove the node. The status node is ready and not down.") - - if not self.check_mode: - try: - self.client.remove_node(node_id=self.node_id, force=self.force) - except APIError as exc: - self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc)) - self.results['actions'].append("Node is removed from swarm cluster.") - self.differences.add('joined', parameter=False, active=True) - self.results['changed'] = True - - -def _detect_remove_operation(client): - return client.module.params['state'] == 'remove' - - -def main(): - argument_spec = dict( - advertise_addr=dict(type='str'), - state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']), - force=dict(type='bool', default=False), - listen_addr=dict(type='str', default='0.0.0.0:2377'), - remote_addrs=dict(type='list', elements='str'), - join_token=dict(type='str'), - snapshot_interval=dict(type='int'), - task_history_retention_limit=dict(type='int'), - keep_old_snapshots=dict(type='int'), - log_entries_for_slow_followers=dict(type='int'), - heartbeat_tick=dict(type='int'), - election_tick=dict(type='int'), - dispatcher_heartbeat_period=dict(type='int'), - node_cert_expiry=dict(type='int'), - name=dict(type='str'), - labels=dict(type='dict'), - signing_ca_cert=dict(type='str'), - signing_ca_key=dict(type='str'), - ca_force_rotate=dict(type='int'), - autolock_managers=dict(type='bool'), - node_id=dict(type='str'), - rotate_worker_token=dict(type='bool', default=False), - rotate_manager_token=dict(type='bool', default=False), - default_addr_pool=dict(type='list', elements='str'), - subnet_size=dict(type='int'), - ) - - required_if = [ - ('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']), - ('state', 'remove', ['node_id']) - ] - - option_minimal_versions = dict( - labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'), - signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'), - signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'), - ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'), - autolock_managers=dict(docker_py_version='2.6.0'), - log_driver=dict(docker_py_version='2.6.0'), - remove_operation=dict( - docker_py_version='2.4.0', - detect_usage=_detect_remove_operation, - usage_msg='remove swarm nodes' - ), - default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'), - subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'), - ) - - client = AnsibleDockerSwarmClient( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=required_if, - min_docker_version='1.10.0', - min_docker_api_version='1.25', - option_minimal_versions=option_minimal_versions, - ) - - try: - results = dict( - changed=False, - result='', - actions=[] - ) - - SwarmManager(client, results)() - client.module.exit_json(**results) - except DockerException as e: - client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc()) - except RequestException as e: - client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc()) - - -if __name__ == '__main__': - main() diff --git a/test/utils/shippable/incidental/freebsd.sh b/test/utils/shippable/incidental/freebsd.sh deleted file mode 120000 index 6ddb7768545..00000000000 --- a/test/utils/shippable/incidental/freebsd.sh +++ /dev/null @@ -1 +0,0 @@ -remote.sh \ No newline at end of file diff --git a/test/utils/shippable/incidental/linux.sh b/test/utils/shippable/incidental/linux.sh deleted file mode 100755 index 2f513dd2523..00000000000 --- a/test/utils/shippable/incidental/linux.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail -eux - -declare -a args -IFS='/:' read -ra args <<< "$1" - -image="${args[1]}" - -target="shippable/posix/incidental/" - -# shellcheck disable=SC2086 -ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ - --docker "${image}" \ diff --git a/test/utils/shippable/incidental/macos.sh b/test/utils/shippable/incidental/macos.sh deleted file mode 120000 index 6ddb7768545..00000000000 --- a/test/utils/shippable/incidental/macos.sh +++ /dev/null @@ -1 +0,0 @@ -remote.sh \ No newline at end of file diff --git a/test/utils/shippable/incidental/remote.sh b/test/utils/shippable/incidental/remote.sh deleted file mode 100755 index 95f304d54a3..00000000000 --- a/test/utils/shippable/incidental/remote.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail -eux - -declare -a args -IFS='/:' read -ra args <<< "$1" - -platform="${args[0]}" -version="${args[1]}" -pyver=default -target="shippable/posix/incidental/" - -# check for explicit python version like 8.3@3.8 -declare -a splitversion -IFS='@' read -ra splitversion <<< "$version" - -if [ "${#splitversion[@]}" -gt 1 ]; then - version="${splitversion[0]}" - pyver="${splitversion[1]}" -fi - -stage="${S:-prod}" -provider="${P:-default}" - -# shellcheck disable=SC2086 -ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ - --python "${pyver}" --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" \ diff --git a/test/utils/shippable/incidental/rhel.sh b/test/utils/shippable/incidental/rhel.sh deleted file mode 120000 index 6ddb7768545..00000000000 --- a/test/utils/shippable/incidental/rhel.sh +++ /dev/null @@ -1 +0,0 @@ -remote.sh \ No newline at end of file