mirror of https://github.com/ansible/ansible.git
parent
1214b63f4f
commit
e259317c3e
@ -1,13 +0,0 @@
|
||||
shippable/posix/incidental
|
||||
context/controller
|
||||
skip/osx
|
||||
skip/macos
|
||||
skip/freebsd
|
||||
skip/rhel/9.0b # there are no docker-ce packages for CentOS/RHEL 9
|
||||
destructive
|
||||
skip/docker # The tests sometimes make docker daemon unstable; hence,
|
||||
# we skip all docker-based CI runs to avoid disrupting
|
||||
# the whole CI system. On VMs, we restart docker daemon
|
||||
# after finishing the tests to minimize potential effects
|
||||
# on other tests.
|
||||
needs/root
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
plugin: docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
plugin: docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
||||
verbose_output: no
|
||||
include_host_uri: yes
|
@ -1,3 +0,0 @@
|
||||
---
|
||||
dependencies:
|
||||
- incidental_setup_docker
|
@ -1,19 +0,0 @@
|
||||
---
|
||||
- hosts: 127.0.0.1
|
||||
connection: local
|
||||
gather_facts: yes
|
||||
tasks:
|
||||
- name: Make sure swarm is removed
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: yes
|
||||
|
||||
- name: remove docker pagkages
|
||||
action: "{{ ansible_facts.pkg_mgr }}"
|
||||
args:
|
||||
name:
|
||||
- docker
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
state: absent
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
- hosts: 127.0.0.1
|
||||
connection: local
|
||||
vars:
|
||||
docker_skip_cleanup: yes
|
||||
|
||||
tasks:
|
||||
- name: Setup docker
|
||||
import_role:
|
||||
name: incidental_setup_docker
|
||||
|
||||
- name: Create a Swarm cluster
|
||||
docker_swarm:
|
||||
state: present
|
||||
advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
|
@ -1,58 +0,0 @@
|
||||
---
|
||||
- hosts: 127.0.0.1
|
||||
connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Show all groups
|
||||
debug:
|
||||
var: groups
|
||||
- name: Make sure docker_swarm groups are there
|
||||
assert:
|
||||
that:
|
||||
- groups.all | length > 0
|
||||
- groups.leader | length == 1
|
||||
- groups.manager | length > 0
|
||||
- groups.worker | length >= 0
|
||||
- groups.nonleaders | length >= 0
|
||||
|
||||
- hosts: all
|
||||
connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
|
||||
vars:
|
||||
# for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
|
||||
# which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
tasks:
|
||||
- name: Check for groups
|
||||
assert:
|
||||
that:
|
||||
- "groups.manager | length > 0"
|
||||
- "groups.worker | length >= 0"
|
||||
- "groups.leader | length == 1"
|
||||
run_once: yes
|
||||
|
||||
- name: List manager group
|
||||
debug:
|
||||
var: groups.manager
|
||||
run_once: yes
|
||||
|
||||
- name: List worker group
|
||||
debug:
|
||||
var: groups.worker
|
||||
run_once: yes
|
||||
|
||||
- name: List leader group
|
||||
debug:
|
||||
var: groups.leader
|
||||
run_once: yes
|
||||
|
||||
- name: Print ansible_host per host
|
||||
debug:
|
||||
var: ansible_host
|
||||
|
||||
- name: Make sure docker_swarm_node_attributes is available
|
||||
assert:
|
||||
that:
|
||||
- docker_swarm_node_attributes is not undefined
|
||||
- name: Print docker_swarm_node_attributes per host
|
||||
debug:
|
||||
var: docker_swarm_node_attributes
|
@ -1,35 +0,0 @@
|
||||
---
|
||||
- hosts: 127.0.0.1
|
||||
connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
|
||||
gather_facts: no
|
||||
tasks:
|
||||
- name: Show all groups
|
||||
debug:
|
||||
var: groups
|
||||
- name: Make sure docker_swarm groups are there
|
||||
assert:
|
||||
that:
|
||||
- groups.all | length > 0
|
||||
- groups.leader | length == 1
|
||||
- groups.manager | length > 0
|
||||
- groups.worker | length >= 0
|
||||
- groups.nonleaders | length >= 0
|
||||
|
||||
- hosts: all
|
||||
connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
|
||||
vars:
|
||||
# for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
|
||||
# which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
|
||||
ansible_python_interpreter: "{{ ansible_playbook_python }}"
|
||||
tasks:
|
||||
- name: Make sure docker_swarm_node_attributes is not available
|
||||
assert:
|
||||
that:
|
||||
- docker_swarm_node_attributes is undefined
|
||||
- name: Make sure ansible_host_uri is available
|
||||
assert:
|
||||
that:
|
||||
- ansible_host_uri is defined
|
||||
- name: Print ansible_host_uri
|
||||
debug:
|
||||
var: ansible_host_uri
|
@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cleanup() {
|
||||
echo "Cleanup"
|
||||
ansible-playbook playbooks/swarm_cleanup.yml
|
||||
echo "Done"
|
||||
}
|
||||
|
||||
trap cleanup INT TERM EXIT
|
||||
|
||||
echo "Setup"
|
||||
ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/swarm_setup.yml
|
||||
|
||||
echo "Test docker_swarm inventory 1"
|
||||
ansible-playbook -i inventory_1.docker_swarm.yml playbooks/test_inventory_1.yml
|
||||
|
||||
echo "Test docker_swarm inventory 2"
|
||||
ansible-playbook -i inventory_2.docker_swarm.yml playbooks/test_inventory_2.yml
|
@ -1,2 +0,0 @@
|
||||
needs/target/setup_epel
|
||||
hidden
|
@ -1,18 +0,0 @@
|
||||
docker_cli_version: '0.0'
|
||||
docker_api_version: '0.0'
|
||||
docker_py_version: '0.0'
|
||||
docker_skip_cleanup: no
|
||||
docker_prereq_packages: []
|
||||
docker_packages:
|
||||
- docker-ce
|
||||
|
||||
docker_pip_extra_packages: []
|
||||
docker_pip_never_remove: []
|
||||
docker_pip_packages:
|
||||
- docker
|
||||
|
||||
docker_cleanup_packages:
|
||||
- docker
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
@ -1,14 +0,0 @@
|
||||
- name: remove pip packages
|
||||
pip:
|
||||
state: absent
|
||||
name: "{{ docker_pip_packages | union(docker_pip_extra_packages) | difference(docker_pip_never_remove) }}"
|
||||
listen: cleanup docker
|
||||
when: not docker_skip_cleanup | bool
|
||||
|
||||
- name: remove docker packages
|
||||
action: "{{ ansible_facts.pkg_mgr }}"
|
||||
args:
|
||||
name: "{{ docker_cleanup_packages }}"
|
||||
state: absent
|
||||
listen: cleanup docker
|
||||
when: not docker_skip_cleanup | bool
|
@ -1,2 +0,0 @@
|
||||
dependencies:
|
||||
- setup_remote_constraints
|
@ -1,43 +0,0 @@
|
||||
- name: Get OS version
|
||||
shell: uname -r
|
||||
register: os_version
|
||||
|
||||
- name: Install pre-reqs
|
||||
apt:
|
||||
name: "{{ docker_prereq_packages }}"
|
||||
state: present
|
||||
update_cache: yes
|
||||
notify: cleanup docker
|
||||
|
||||
- name: Add gpg key
|
||||
shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg >key && apt-key add key
|
||||
|
||||
- name: Add Docker repo
|
||||
shell: add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
|
||||
- block:
|
||||
- name: Prevent service restart
|
||||
copy:
|
||||
content: exit 101
|
||||
dest: /usr/sbin/policy-rc.d
|
||||
backup: yes
|
||||
mode: 0755
|
||||
register: policy_rc_d
|
||||
|
||||
- name: Install Docker CE
|
||||
apt:
|
||||
name: "{{ docker_packages }}"
|
||||
state: present
|
||||
update_cache: yes
|
||||
always:
|
||||
- name: Restore /usr/sbin/policy-rc.d (if needed)
|
||||
command: mv {{ policy_rc_d.backup_file }} /usr/sbin/policy-rc.d
|
||||
when:
|
||||
- "'backup_file' in policy_rc_d"
|
||||
|
||||
- name: Remove /usr/sbin/policy-rc.d (if needed)
|
||||
file:
|
||||
path: /usr/sbin/policy-rc.d
|
||||
state: absent
|
||||
when:
|
||||
- "'backup_file' not in policy_rc_d"
|
@ -1,19 +0,0 @@
|
||||
- name: Add repository
|
||||
yum_repository:
|
||||
file: docker-ce
|
||||
name: docker-ce-stable
|
||||
description: Docker CE Stable - $basearch
|
||||
baseurl: https://download.docker.com/linux/fedora/$releasever/$basearch/stable
|
||||
enabled: yes
|
||||
gpgcheck: yes
|
||||
gpgkey: https://download.docker.com/linux/fedora/gpg
|
||||
|
||||
- name: Update cache
|
||||
command: dnf makecache
|
||||
|
||||
- name: Install docker
|
||||
dnf:
|
||||
name: "{{ docker_packages }}"
|
||||
state: present
|
||||
enablerepo: docker-ce-test
|
||||
notify: cleanup docker
|
@ -1,40 +0,0 @@
|
||||
# The RHEL extras repository must be enabled to provide the container-selinux package.
|
||||
# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
|
||||
|
||||
- name: Install Docker pre-reqs
|
||||
yum:
|
||||
name: "{{ docker_prereq_packages }}"
|
||||
state: present
|
||||
notify: cleanup docker
|
||||
|
||||
- name: Install epel repo which is missing on rhel-7 and is needed for pigz (needed for docker-ce 18)
|
||||
include_role:
|
||||
name: setup_epel
|
||||
|
||||
- name: Enable extras repository for RHEL on AWS
|
||||
# RHEL 7.6 uses REGION-rhel-server-extras and RHEL 7.7+ use rhel-7-server-rhui-extras-rpms
|
||||
command: yum-config-manager --enable REGION-rhel-server-extras rhel-7-server-rhui-extras-rpms
|
||||
|
||||
# They broke their .repo file, so we set it up ourselves
|
||||
- name: Set-up repository
|
||||
yum_repository:
|
||||
name: docker-ce
|
||||
description: docker-ce
|
||||
baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
|
||||
gpgcheck: true
|
||||
gpgkey: https://download.docker.com/linux/centos/gpg
|
||||
|
||||
- name: Update cache
|
||||
command: yum -y makecache fast
|
||||
|
||||
- name: Install docker
|
||||
yum:
|
||||
name: "{{ docker_packages }}"
|
||||
state: present
|
||||
notify: cleanup docker
|
||||
|
||||
- name: Make sure the docker daemon is running (failure expected inside docker container)
|
||||
service:
|
||||
name: docker
|
||||
state: started
|
||||
ignore_errors: "{{ ansible_virtualization_type == 'docker' }}"
|
@ -1,33 +0,0 @@
|
||||
# The RHEL extras repository must be enabled to provide the container-selinux package.
|
||||
# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
|
||||
|
||||
- name: Install Docker pre-reqs
|
||||
dnf:
|
||||
name: "{{ docker_prereq_packages }}"
|
||||
state: present
|
||||
notify: cleanup docker
|
||||
register: result
|
||||
until: result is success
|
||||
retries: 10
|
||||
delay: 2
|
||||
|
||||
# They broke their .repo file, so we set it up ourselves
|
||||
- name: Set-up repository
|
||||
yum_repository:
|
||||
name: docker-ce
|
||||
description: docker-ce
|
||||
baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
|
||||
gpgcheck: true
|
||||
gpgkey: https://download.docker.com/linux/centos/gpg
|
||||
|
||||
- name: Install docker
|
||||
dnf:
|
||||
name: "{{ docker_packages }}"
|
||||
state: present
|
||||
notify: cleanup docker
|
||||
|
||||
- name: Make sure the docker daemon is running (failure expected inside docker container)
|
||||
service:
|
||||
name: docker
|
||||
state: started
|
||||
ignore_errors: "{{ ansible_virtualization_type == 'docker' }}"
|
@ -1,7 +0,0 @@
|
||||
- name: Install docker
|
||||
zypper:
|
||||
name: "{{ docker_packages }}"
|
||||
force: yes
|
||||
disable_gpg_check: yes
|
||||
update_cache: yes
|
||||
notify: cleanup docker
|
@ -1,113 +0,0 @@
|
||||
- name: Setup Docker
|
||||
when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
|
||||
block:
|
||||
- name: Include distribution specific variables
|
||||
include_vars: "{{ lookup('first_found', params) }}"
|
||||
vars:
|
||||
params:
|
||||
files:
|
||||
- "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
|
||||
- "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
|
||||
- "{{ ansible_facts.distribution }}.yml"
|
||||
- "{{ ansible_facts.os_family }}.yml"
|
||||
- default.yml
|
||||
paths:
|
||||
- "{{ role_path }}/vars"
|
||||
|
||||
- name: Include distribution specific tasks
|
||||
include_tasks: "{{ lookup('first_found', params) }}"
|
||||
vars:
|
||||
params:
|
||||
files:
|
||||
- "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
|
||||
- "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
|
||||
- "{{ ansible_facts.distribution }}.yml"
|
||||
- "{{ ansible_facts.os_family }}.yml"
|
||||
paths:
|
||||
- "{{ role_path }}/tasks"
|
||||
|
||||
- name: Install Python requirements
|
||||
pip:
|
||||
state: present
|
||||
name: "{{ docker_pip_packages | union(docker_pip_extra_packages) }}"
|
||||
extra_args: "-c {{ remote_constraints }}"
|
||||
notify: cleanup docker
|
||||
|
||||
# Detect docker CLI, API and docker-py versions
|
||||
- name: Check Docker CLI version
|
||||
command: "docker version -f {% raw %}'{{.Client.Version}}'{% endraw %}"
|
||||
register: docker_cli_version_stdout
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Check Docker API version
|
||||
command: "{{ ansible_python.executable }} -c 'import docker; print(docker.from_env().version()[\"ApiVersion\"])'"
|
||||
register: docker_api_version_stdout
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Check docker-py API version
|
||||
command: "{{ ansible_python.executable }} -c 'import docker; print(docker.__version__)'"
|
||||
register: docker_py_version_stdout
|
||||
ignore_errors: yes
|
||||
|
||||
- set_fact:
|
||||
docker_cli_version: "{{ docker_cli_version_stdout.stdout | default('0.0') }}"
|
||||
docker_api_version: "{{ docker_api_version_stdout.stdout | default('0.0') }}"
|
||||
docker_py_version: "{{ docker_py_version_stdout.stdout | default('0.0') }}"
|
||||
|
||||
- debug:
|
||||
msg: "Docker CLI version: {{ docker_cli_version }}; Docker API version: {{ docker_api_version }}; docker-py library version: {{ docker_py_version }}"
|
||||
|
||||
- block:
|
||||
# Cleanup docker daemon
|
||||
- name: "Remove all ansible-test-* docker containers"
|
||||
shell: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker rm -f'
|
||||
register: docker_containers
|
||||
retries: 3
|
||||
delay: 3
|
||||
until: docker_containers is success
|
||||
|
||||
- name: "Remove all ansible-test-* docker volumes"
|
||||
shell: 'docker volume ls --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker volume rm -f'
|
||||
register: docker_volumes
|
||||
|
||||
- name: "Remove all ansible-test-* docker networks"
|
||||
shell: 'docker network ls --no-trunc --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker network rm'
|
||||
register: docker_networks
|
||||
|
||||
- name: Cleaned docker resources
|
||||
debug:
|
||||
var: docker_resources
|
||||
vars:
|
||||
docker_resources:
|
||||
containers: "{{ docker_containers.stdout_lines }}"
|
||||
volumes: "{{ docker_volumes.stdout_lines }}"
|
||||
networks: "{{ docker_networks.stdout_lines }}"
|
||||
|
||||
# List all existing docker resources
|
||||
- name: List all docker containers
|
||||
command: docker ps --no-trunc -a
|
||||
register: docker_containers
|
||||
|
||||
- name: List all docker volumes
|
||||
command: docker volume ls
|
||||
register: docker_volumes
|
||||
|
||||
- name: List all docker networks
|
||||
command: docker network ls --no-trunc
|
||||
register: docker_networks
|
||||
|
||||
- name: List all docker images
|
||||
command: docker images --no-trunc -a
|
||||
register: docker_images
|
||||
|
||||
- name: Still existing docker resources
|
||||
debug:
|
||||
var: docker_resources
|
||||
vars:
|
||||
docker_resources:
|
||||
containers: "{{ docker_containers.stdout_lines }}"
|
||||
volumes: "{{ docker_volumes.stdout_lines }}"
|
||||
networks: "{{ docker_networks.stdout_lines }}"
|
||||
images: "{{ docker_images.stdout_lines }}"
|
||||
|
||||
when: docker_cli_version is version('0.0', '>')
|
@ -1,9 +0,0 @@
|
||||
docker_packages:
|
||||
- docker-ce=5:19.03.0*
|
||||
- docker-ce-cli=5:19.03.0*
|
||||
|
||||
docker_prereq_packages:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
- software-properties-common
|
@ -1,5 +0,0 @@
|
||||
docker_prereq_packages: []
|
||||
|
||||
docker_packages:
|
||||
- docker-ce-19.03.1
|
||||
- docker-ce-cli-19.03.1
|
@ -1,18 +0,0 @@
|
||||
docker_prereq_packages:
|
||||
- yum-utils
|
||||
- device-mapper-persistent-data
|
||||
- lvm2
|
||||
- libseccomp
|
||||
|
||||
docker_packages:
|
||||
- docker-ce-19.03.1
|
||||
- docker-ce-cli-19.03.1
|
||||
|
||||
docker_pip_extra_packages:
|
||||
- requests==2.6.0
|
||||
|
||||
# We need to pin the above so pip finds the right system-installed package
|
||||
# but we never want to try to remove it, so we substract this from the set of
|
||||
# packages we remove on cleanup
|
||||
docker_pip_never_remove:
|
||||
- requests==2.6.0
|
@ -1,10 +0,0 @@
|
||||
docker_prereq_packages:
|
||||
- yum-utils
|
||||
- device-mapper-persistent-data
|
||||
- lvm2
|
||||
- libseccomp
|
||||
- iptables
|
||||
|
||||
docker_packages:
|
||||
- docker-ce-19.03.13
|
||||
- docker-ce-cli-19.03.13
|
@ -1,2 +0,0 @@
|
||||
docker_packages:
|
||||
- docker=19.03.1_ce
|
@ -1,5 +0,0 @@
|
||||
docker_pip_extra_packages:
|
||||
# Installing requests >=2.12.0 on Ubuntu 14.04 breaks certificate validation. We restrict to an older version
|
||||
# to ensure out get_url tests work out fine. This is only an issue if pyOpenSSL is also installed.
|
||||
# Not sure why RHEL7 needs this specific version
|
||||
- requests==2.6.0
|
@ -1,351 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_swarm
|
||||
plugin_type: inventory
|
||||
version_added: '2.8'
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file docker_swarm.[yml|yaml].
|
||||
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
|
||||
I(managers) - all manager nodes; I(leader) - the swarm leader node;
|
||||
I(nonleaders) - all nodes except the swarm leader."
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
|
||||
recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: docker_swarm
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- "Use C(unix://var/run/docker.sock) to connect via local socket."
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ docker_url ]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
|
||||
C(EngineVersion))
|
||||
type: bool
|
||||
default: yes
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
|
||||
host server.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ca_cert:
|
||||
description: Use a CA certificate when performing server verification by providing the path to a CA
|
||||
certificate file.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of
|
||||
the server.
|
||||
type: str
|
||||
ssl_version:
|
||||
description: Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by docker-py.
|
||||
type: str
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
|
||||
will be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [ time_out ]
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
|
||||
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
|
||||
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
|
||||
The port always defaults to C(2376).
|
||||
type: bool
|
||||
default: no
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in I(ansible_host_uri)
|
||||
type: int
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example using local docker
|
||||
plugin: docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
||||
|
||||
# Minimal example using remote docker
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: yes
|
||||
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: yes
|
||||
ca_cert: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: False
|
||||
keyed_groups:
|
||||
# add e.g. x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
key: 'Description.Platform.Architecture'
|
||||
# add e.g. linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'Description.Platform.OS'
|
||||
# create a group per node label
|
||||
# e.g. a node labeled w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
|
||||
try:
|
||||
import docker
|
||||
from docker.errors import TLSParameterError
|
||||
from docker.tls import TLSConfig
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
def update_tls_hostname(result):
|
||||
if result['tls_hostname'] is None:
|
||||
# get default machine name from the url
|
||||
parsed_url = urlparse(result['docker_host'])
|
||||
if ':' in parsed_url.netloc:
|
||||
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
|
||||
else:
|
||||
result['tls_hostname'] = parsed_url
|
||||
|
||||
|
||||
def _get_tls_config(fail_function, **kwargs):
|
||||
try:
|
||||
tls_config = TLSConfig(**kwargs)
|
||||
return tls_config
|
||||
except TLSParameterError as exc:
|
||||
fail_function("TLS config error: %s" % exc)
|
||||
|
||||
|
||||
def get_connect_params(auth, fail_function):
|
||||
if auth['tls'] or auth['tls_verify']:
|
||||
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
|
||||
|
||||
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
|
||||
# TLS with certs and host verification
|
||||
if auth['cacert_path']:
|
||||
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
|
||||
ca_cert=auth['cacert_path'],
|
||||
verify=True,
|
||||
assert_hostname=auth['tls_hostname'],
|
||||
ssl_version=auth['ssl_version'],
|
||||
fail_function=fail_function)
|
||||
else:
|
||||
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
|
||||
verify=True,
|
||||
assert_hostname=auth['tls_hostname'],
|
||||
ssl_version=auth['ssl_version'],
|
||||
fail_function=fail_function)
|
||||
|
||||
return dict(base_url=auth['docker_host'],
|
||||
tls=tls_config,
|
||||
version=auth['api_version'],
|
||||
timeout=auth['timeout'])
|
||||
|
||||
if auth['tls_verify'] and auth['cacert_path']:
|
||||
# TLS with cacert only
|
||||
tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
|
||||
assert_hostname=auth['tls_hostname'],
|
||||
verify=True,
|
||||
ssl_version=auth['ssl_version'],
|
||||
fail_function=fail_function)
|
||||
return dict(base_url=auth['docker_host'],
|
||||
tls=tls_config,
|
||||
version=auth['api_version'],
|
||||
timeout=auth['timeout'])
|
||||
|
||||
if auth['tls_verify']:
|
||||
# TLS with verify and no certs
|
||||
tls_config = _get_tls_config(verify=True,
|
||||
assert_hostname=auth['tls_hostname'],
|
||||
ssl_version=auth['ssl_version'],
|
||||
fail_function=fail_function)
|
||||
return dict(base_url=auth['docker_host'],
|
||||
tls=tls_config,
|
||||
version=auth['api_version'],
|
||||
timeout=auth['timeout'])
|
||||
|
||||
if auth['tls'] and auth['cert_path'] and auth['key_path']:
|
||||
# TLS with certs and no host verification
|
||||
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
|
||||
verify=False,
|
||||
ssl_version=auth['ssl_version'],
|
||||
fail_function=fail_function)
|
||||
return dict(base_url=auth['docker_host'],
|
||||
tls=tls_config,
|
||||
version=auth['api_version'],
|
||||
timeout=auth['timeout'])
|
||||
|
||||
if auth['tls']:
|
||||
# TLS with no certs and not host verification
|
||||
tls_config = _get_tls_config(verify=False,
|
||||
ssl_version=auth['ssl_version'],
|
||||
fail_function=fail_function)
|
||||
return dict(base_url=auth['docker_host'],
|
||||
tls=tls_config,
|
||||
version=auth['api_version'],
|
||||
timeout=auth['timeout'])
|
||||
|
||||
# No TLS
|
||||
return dict(base_url=auth['docker_host'],
|
||||
version=auth['api_version'],
|
||||
timeout=auth['timeout'])
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker swarm as source. '''
|
||||
|
||||
NAME = 'docker_swarm'
|
||||
|
||||
def _fail(self, msg):
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self):
|
||||
raw_params = dict(
|
||||
docker_host=self.get_option('docker_host'),
|
||||
tls=self.get_option('tls'),
|
||||
tls_verify=self.get_option('validate_certs'),
|
||||
key_path=self.get_option('client_key'),
|
||||
cacert_path=self.get_option('ca_cert'),
|
||||
cert_path=self.get_option('client_cert'),
|
||||
tls_hostname=self.get_option('tls_hostname'),
|
||||
api_version=self.get_option('api_version'),
|
||||
timeout=self.get_option('timeout'),
|
||||
ssl_version=self.get_option('ssl_version'),
|
||||
debug=None,
|
||||
)
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
self.client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group('all')
|
||||
self.inventory.add_group('manager')
|
||||
self.inventory.add_group('worker')
|
||||
self.inventory.add_group('leader')
|
||||
self.inventory.add_group('nonleaders')
|
||||
|
||||
if self.get_option('include_host_uri'):
|
||||
if self.get_option('include_host_uri_port'):
|
||||
host_uri_port = str(self.get_option('include_host_uri_port'))
|
||||
elif self.get_option('tls') or self.get_option('validate_certs'):
|
||||
host_uri_port = '2376'
|
||||
else:
|
||||
host_uri_port = '2375'
|
||||
|
||||
try:
|
||||
self.nodes = self.client.nodes.list()
|
||||
for self.node in self.nodes:
|
||||
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
||||
self.inventory.add_host(self.node_attrs['ID'])
|
||||
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
||||
self.node_attrs['Status']['Addr'])
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
||||
if 'ManagerStatus' in self.node_attrs:
|
||||
if self.node_attrs['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
self.node_attrs['Status']['Addr']
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||
to_native(e))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
||||
'https://github.com/docker/docker-py.')
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
File diff suppressed because it is too large
Load Diff
@ -1,280 +0,0 @@
|
||||
# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
|
||||
# (c) Thierry Bouvet (@tbouvet)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import json
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.docker.common import (
|
||||
AnsibleDockerClient,
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
|
||||
|
||||
def get_swarm_node_id(self):
|
||||
"""
|
||||
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
|
||||
of Docker host the module is executed on
|
||||
:return:
|
||||
NodeID of host or 'None' if not part of Swarm
|
||||
"""
|
||||
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError as exc:
|
||||
self.fail("Failed to get node information for %s" % to_native(exc))
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info['Swarm']['NodeID']:
|
||||
return swarm_info['Swarm']['NodeID']
|
||||
return None
|
||||
|
||||
def check_if_swarm_node(self, node_id=None):
|
||||
"""
|
||||
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
|
||||
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
|
||||
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
|
||||
it is not executed on Swarm manager
|
||||
|
||||
:param node_id: Node identifier
|
||||
:return:
|
||||
bool: True if node is part of Swarm, False otherwise
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError:
|
||||
self.fail("Failed to get host information.")
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info['Swarm']['NodeID']:
|
||||
return True
|
||||
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
|
||||
return True
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
if node_info['ID'] is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_manager(self):
|
||||
"""
|
||||
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
|
||||
is performed. The inspect_swarm() will fail if node is not a manager
|
||||
|
||||
:return: True if node is Swarm Manager, False otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
return True
|
||||
except APIError:
|
||||
return False
|
||||
|
||||
def fail_task_if_not_swarm_manager(self):
|
||||
"""
|
||||
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
|
||||
"""
|
||||
if not self.check_if_swarm_manager():
|
||||
self.fail("Error running docker swarm module: must run on swarm manager node")
|
||||
|
||||
def check_if_swarm_worker(self):
|
||||
"""
|
||||
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
|
||||
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
|
||||
|
||||
:return: True if node is Swarm Worker, False otherwise
|
||||
"""
|
||||
|
||||
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
|
||||
"""
|
||||
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
|
||||
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
|
||||
host that is not part of Swarm it will fail the playbook
|
||||
|
||||
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
True if node is part of swarm but its state is down, False otherwise
|
||||
"""
|
||||
|
||||
if repeat_check < 1:
|
||||
repeat_check = 1
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
for retry in range(0, repeat_check):
|
||||
if retry > 0:
|
||||
sleep(5)
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
if node_info['Status']['State'] == 'down':
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_node_inspect(self, node_id=None, skip_missing=False):
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about single node
|
||||
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
Single node information structure
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
if node_id is None:
|
||||
self.fail("Failed to get node information.")
|
||||
|
||||
try:
|
||||
node_info = self.inspect_node(node_id=node_id)
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
if exc.status_code == 404:
|
||||
if skip_missing:
|
||||
return None
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
|
||||
if 'ManagerStatus' in node_info:
|
||||
if node_info['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
count_colons = node_info['ManagerStatus']['Addr'].count(":")
|
||||
if count_colons == 1:
|
||||
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
|
||||
else:
|
||||
swarm_leader_ip = node_info['Status']['Addr']
|
||||
node_info['Status']['Addr'] = swarm_leader_ip
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_inspect(self):
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
|
||||
|
||||
:return:
|
||||
Structure with information about all nodes
|
||||
"""
|
||||
try:
|
||||
node_info = self.nodes()
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm node: %s" % exc)
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_list(self, output='short'):
|
||||
"""
|
||||
Returns list of nodes registered in Swarm
|
||||
|
||||
:param output: Defines format of returned data
|
||||
:return:
|
||||
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
|
||||
if 'output' is 'long' then returns data is list of dict containing the attributes as in
|
||||
output of command 'docker node ls'
|
||||
"""
|
||||
nodes_list = []
|
||||
|
||||
nodes_inspect = self.get_all_nodes_inspect()
|
||||
if nodes_inspect is None:
|
||||
return None
|
||||
|
||||
if output == 'short':
|
||||
for node in nodes_inspect:
|
||||
nodes_list.append(node['Description']['Hostname'])
|
||||
elif output == 'long':
|
||||
for node in nodes_inspect:
|
||||
node_property = {}
|
||||
|
||||
node_property.update({'ID': node['ID']})
|
||||
node_property.update({'Hostname': node['Description']['Hostname']})
|
||||
node_property.update({'Status': node['Status']['State']})
|
||||
node_property.update({'Availability': node['Spec']['Availability']})
|
||||
if 'ManagerStatus' in node:
|
||||
if node['ManagerStatus']['Leader'] is True:
|
||||
node_property.update({'Leader': True})
|
||||
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
|
||||
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
|
||||
|
||||
nodes_list.append(node_property)
|
||||
else:
|
||||
return None
|
||||
|
||||
return nodes_list
|
||||
|
||||
def get_node_name_by_id(self, nodeid):
|
||||
return self.get_node_inspect(nodeid)['Description']['Hostname']
|
||||
|
||||
def get_unlock_key(self):
|
||||
if self.docker_py_version < LooseVersion('2.7.0'):
|
||||
return None
|
||||
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
|
||||
|
||||
def get_service_inspect(self, service_id, skip_missing=False):
|
||||
"""
|
||||
Returns Swarm service info as in 'docker service inspect' command about single service
|
||||
|
||||
:param service_id: service ID or name
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:return:
|
||||
Single service information structure
|
||||
"""
|
||||
try:
|
||||
service_info = self.inspect_service(service_id)
|
||||
except NotFound as exc:
|
||||
if skip_missing is False:
|
||||
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
|
||||
else:
|
||||
return None
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
|
||||
self.fail("Error inspecting swarm service: %s" % exc)
|
||||
except Exception as exc:
|
||||
self.fail("Error inspecting swarm service: %s" % exc)
|
||||
|
||||
json_str = json.dumps(service_info, ensure_ascii=False)
|
||||
service_info = json.loads(json_str)
|
||||
return service_info
|
@ -1,676 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: docker_swarm
|
||||
short_description: Manage Swarm cluster
|
||||
version_added: "2.7"
|
||||
description:
|
||||
- Create a new Swarm cluster.
|
||||
- Add/Remove nodes or managers to an existing cluster.
|
||||
options:
|
||||
advertise_addr:
|
||||
description:
|
||||
- Externally reachable address advertised to other nodes.
|
||||
- This can either be an address/port combination
|
||||
in the form C(192.168.1.1:4567), or an interface followed by a
|
||||
port number, like C(eth0:4567).
|
||||
- If the port number is omitted,
|
||||
the port number from the listen address is used.
|
||||
- If I(advertise_addr) is not specified, it will be automatically
|
||||
detected when possible.
|
||||
- Only used when swarm is initialised or joined. Because of this it's not
|
||||
considered for idempotency checking.
|
||||
type: str
|
||||
default_addr_pool:
|
||||
description:
|
||||
- Default address pool in CIDR format.
|
||||
- Only used when swarm is initialised. Because of this it's not considered
|
||||
for idempotency checking.
|
||||
- Requires API version >= 1.39.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: "2.8"
|
||||
subnet_size:
|
||||
description:
|
||||
- Default address pool subnet mask length.
|
||||
- Only used when swarm is initialised. Because of this it's not considered
|
||||
for idempotency checking.
|
||||
- Requires API version >= 1.39.
|
||||
type: int
|
||||
version_added: "2.8"
|
||||
listen_addr:
|
||||
description:
|
||||
- Listen address used for inter-manager communication.
|
||||
- This can either be an address/port combination in the form
|
||||
C(192.168.1.1:4567), or an interface followed by a port number,
|
||||
like C(eth0:4567).
|
||||
- If the port number is omitted, the default swarm listening port
|
||||
is used.
|
||||
- Only used when swarm is initialised or joined. Because of this it's not
|
||||
considered for idempotency checking.
|
||||
type: str
|
||||
default: 0.0.0.0:2377
|
||||
force:
|
||||
description:
|
||||
- Use with state C(present) to force creating a new Swarm, even if already part of one.
|
||||
- Use with state C(absent) to Leave the swarm even if this node is a manager.
|
||||
type: bool
|
||||
default: no
|
||||
state:
|
||||
description:
|
||||
- Set to C(present), to create/update a new cluster.
|
||||
- Set to C(join), to join an existing cluster.
|
||||
- Set to C(absent), to leave an existing cluster.
|
||||
- Set to C(remove), to remove an absent node from the cluster.
|
||||
Note that removing requires Docker SDK for Python >= 2.4.0.
|
||||
- Set to C(inspect) to display swarm informations.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- join
|
||||
- absent
|
||||
- remove
|
||||
- inspect
|
||||
node_id:
|
||||
description:
|
||||
- Swarm id of the node to remove.
|
||||
- Used with I(state=remove).
|
||||
type: str
|
||||
join_token:
|
||||
description:
|
||||
- Swarm token used to join a swarm cluster.
|
||||
- Used with I(state=join).
|
||||
type: str
|
||||
remote_addrs:
|
||||
description:
|
||||
- Remote address of one or more manager nodes of an existing Swarm to connect to.
|
||||
- Used with I(state=join).
|
||||
type: list
|
||||
elements: str
|
||||
task_history_retention_limit:
|
||||
description:
|
||||
- Maximum number of tasks history stored.
|
||||
- Docker default value is C(5).
|
||||
type: int
|
||||
snapshot_interval:
|
||||
description:
|
||||
- Number of logs entries between snapshot.
|
||||
- Docker default value is C(10000).
|
||||
type: int
|
||||
keep_old_snapshots:
|
||||
description:
|
||||
- Number of snapshots to keep beyond the current snapshot.
|
||||
- Docker default value is C(0).
|
||||
type: int
|
||||
log_entries_for_slow_followers:
|
||||
description:
|
||||
- Number of log entries to keep around to sync up slow followers after a snapshot is created.
|
||||
type: int
|
||||
heartbeat_tick:
|
||||
description:
|
||||
- Amount of ticks (in seconds) between each heartbeat.
|
||||
- Docker default value is C(1s).
|
||||
type: int
|
||||
election_tick:
|
||||
description:
|
||||
- Amount of ticks (in seconds) needed without a leader to trigger a new election.
|
||||
- Docker default value is C(10s).
|
||||
type: int
|
||||
dispatcher_heartbeat_period:
|
||||
description:
|
||||
- The delay for an agent to send a heartbeat to the dispatcher.
|
||||
- Docker default value is C(5s).
|
||||
type: int
|
||||
node_cert_expiry:
|
||||
description:
|
||||
- Automatic expiry for nodes certificates.
|
||||
- Docker default value is C(3months).
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- The name of the swarm.
|
||||
type: str
|
||||
labels:
|
||||
description:
|
||||
- User-defined key/value metadata.
|
||||
- Label operations in this module apply to the docker swarm cluster.
|
||||
Use M(docker_node) module to add/modify/remove swarm node labels.
|
||||
- Requires API version >= 1.32.
|
||||
type: dict
|
||||
signing_ca_cert:
|
||||
description:
|
||||
- The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
|
||||
- This must not be a path to a certificate, but the contents of the certificate.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
signing_ca_key:
|
||||
description:
|
||||
- The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
|
||||
- This must not be a path to a key, but the contents of the key.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
ca_force_rotate:
|
||||
description:
|
||||
- An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
|
||||
if none have been specified.
|
||||
- Docker default value is C(0).
|
||||
- Requires API version >= 1.30.
|
||||
type: int
|
||||
autolock_managers:
|
||||
description:
|
||||
- If set, generate a key and use it to lock data stored on the managers.
|
||||
- Docker default value is C(no).
|
||||
- M(docker_swarm_info) can be used to retrieve the unlock key.
|
||||
type: bool
|
||||
rotate_worker_token:
|
||||
description: Rotate the worker join token.
|
||||
type: bool
|
||||
default: no
|
||||
rotate_manager_token:
|
||||
description: Rotate the manager join token.
|
||||
type: bool
|
||||
default: no
|
||||
extends_documentation_fragment:
|
||||
- docker
|
||||
- docker.docker_py_1_documentation
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
|
||||
- Docker API >= 1.25
|
||||
author:
|
||||
- Thierry Bouvet (@tbouvet)
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
- name: Init a new swarm with default parameters
|
||||
docker_swarm:
|
||||
state: present
|
||||
|
||||
- name: Update swarm configuration
|
||||
docker_swarm:
|
||||
state: present
|
||||
election_tick: 5
|
||||
|
||||
- name: Add nodes
|
||||
docker_swarm:
|
||||
state: join
|
||||
advertise_addr: 192.168.1.2
|
||||
join_token: SWMTKN-1--xxxxx
|
||||
remote_addrs: [ '192.168.1.1:2377' ]
|
||||
|
||||
- name: Leave swarm for a node
|
||||
docker_swarm:
|
||||
state: absent
|
||||
|
||||
- name: Remove a swarm manager
|
||||
docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
|
||||
- name: Remove node from swarm
|
||||
docker_swarm:
|
||||
state: remove
|
||||
node_id: mynode
|
||||
|
||||
- name: Inspect swarm
|
||||
docker_swarm:
|
||||
state: inspect
|
||||
register: swarm_info
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
swarm_facts:
|
||||
description: Informations about swarm.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
JoinTokens:
|
||||
description: Tokens to connect to the Swarm.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
Worker:
|
||||
description: Token to create a new *worker* node
|
||||
returned: success
|
||||
type: str
|
||||
example: SWMTKN-1--xxxxx
|
||||
Manager:
|
||||
description: Token to create a new *manager* node
|
||||
returned: success
|
||||
type: str
|
||||
example: SWMTKN-1--xxxxx
|
||||
UnlockKey:
|
||||
description: The swarm unlock-key if I(autolock_managers) is C(true).
|
||||
returned: on success if I(autolock_managers) is C(true)
|
||||
and swarm is initialised, or if I(autolock_managers) has changed.
|
||||
type: str
|
||||
example: SWMKEY-1-xxx
|
||||
|
||||
actions:
|
||||
description: Provides the actions done on the swarm.
|
||||
returned: when action failed.
|
||||
type: list
|
||||
elements: str
|
||||
example: "['This cluster is already a swarm cluster']"
|
||||
|
||||
'''
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException, APIError
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.docker.common import (
|
||||
DockerBaseClass,
|
||||
DifferenceTracker,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
from ansible.module_utils.docker.swarm import AnsibleDockerSwarmClient
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self):
|
||||
super(TaskParameters, self).__init__()
|
||||
|
||||
self.advertise_addr = None
|
||||
self.listen_addr = None
|
||||
self.remote_addrs = None
|
||||
self.join_token = None
|
||||
|
||||
# Spec
|
||||
self.snapshot_interval = None
|
||||
self.task_history_retention_limit = None
|
||||
self.keep_old_snapshots = None
|
||||
self.log_entries_for_slow_followers = None
|
||||
self.heartbeat_tick = None
|
||||
self.election_tick = None
|
||||
self.dispatcher_heartbeat_period = None
|
||||
self.node_cert_expiry = None
|
||||
self.name = None
|
||||
self.labels = None
|
||||
self.log_driver = None
|
||||
self.signing_ca_cert = None
|
||||
self.signing_ca_key = None
|
||||
self.ca_force_rotate = None
|
||||
self.autolock_managers = None
|
||||
self.rotate_worker_token = None
|
||||
self.rotate_manager_token = None
|
||||
self.default_addr_pool = None
|
||||
self.subnet_size = None
|
||||
|
||||
@staticmethod
|
||||
def from_ansible_params(client):
|
||||
result = TaskParameters()
|
||||
for key, value in client.module.params.items():
|
||||
if key in result.__dict__:
|
||||
setattr(result, key, value)
|
||||
|
||||
result.update_parameters(client)
|
||||
return result
|
||||
|
||||
def update_from_swarm_info(self, swarm_info):
|
||||
spec = swarm_info['Spec']
|
||||
|
||||
ca_config = spec.get('CAConfig') or dict()
|
||||
if self.node_cert_expiry is None:
|
||||
self.node_cert_expiry = ca_config.get('NodeCertExpiry')
|
||||
if self.ca_force_rotate is None:
|
||||
self.ca_force_rotate = ca_config.get('ForceRotate')
|
||||
|
||||
dispatcher = spec.get('Dispatcher') or dict()
|
||||
if self.dispatcher_heartbeat_period is None:
|
||||
self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
|
||||
|
||||
raft = spec.get('Raft') or dict()
|
||||
if self.snapshot_interval is None:
|
||||
self.snapshot_interval = raft.get('SnapshotInterval')
|
||||
if self.keep_old_snapshots is None:
|
||||
self.keep_old_snapshots = raft.get('KeepOldSnapshots')
|
||||
if self.heartbeat_tick is None:
|
||||
self.heartbeat_tick = raft.get('HeartbeatTick')
|
||||
if self.log_entries_for_slow_followers is None:
|
||||
self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
|
||||
if self.election_tick is None:
|
||||
self.election_tick = raft.get('ElectionTick')
|
||||
|
||||
orchestration = spec.get('Orchestration') or dict()
|
||||
if self.task_history_retention_limit is None:
|
||||
self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
|
||||
|
||||
encryption_config = spec.get('EncryptionConfig') or dict()
|
||||
if self.autolock_managers is None:
|
||||
self.autolock_managers = encryption_config.get('AutoLockManagers')
|
||||
|
||||
if self.name is None:
|
||||
self.name = spec['Name']
|
||||
|
||||
if self.labels is None:
|
||||
self.labels = spec.get('Labels') or {}
|
||||
|
||||
if 'LogDriver' in spec['TaskDefaults']:
|
||||
self.log_driver = spec['TaskDefaults']['LogDriver']
|
||||
|
||||
def update_parameters(self, client):
|
||||
assign = dict(
|
||||
snapshot_interval='snapshot_interval',
|
||||
task_history_retention_limit='task_history_retention_limit',
|
||||
keep_old_snapshots='keep_old_snapshots',
|
||||
log_entries_for_slow_followers='log_entries_for_slow_followers',
|
||||
heartbeat_tick='heartbeat_tick',
|
||||
election_tick='election_tick',
|
||||
dispatcher_heartbeat_period='dispatcher_heartbeat_period',
|
||||
node_cert_expiry='node_cert_expiry',
|
||||
name='name',
|
||||
labels='labels',
|
||||
signing_ca_cert='signing_ca_cert',
|
||||
signing_ca_key='signing_ca_key',
|
||||
ca_force_rotate='ca_force_rotate',
|
||||
autolock_managers='autolock_managers',
|
||||
log_driver='log_driver',
|
||||
)
|
||||
params = dict()
|
||||
for dest, source in assign.items():
|
||||
if not client.option_minimal_versions[source]['supported']:
|
||||
continue
|
||||
value = getattr(self, source)
|
||||
if value is not None:
|
||||
params[dest] = value
|
||||
self.spec = client.create_swarm_spec(**params)
|
||||
|
||||
def compare_to_active(self, other, client, differences):
|
||||
for k in self.__dict__:
|
||||
if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
|
||||
'rotate_worker_token', 'rotate_manager_token', 'spec',
|
||||
'default_addr_pool', 'subnet_size'):
|
||||
continue
|
||||
if not client.option_minimal_versions[k]['supported']:
|
||||
continue
|
||||
value = getattr(self, k)
|
||||
if value is None:
|
||||
continue
|
||||
other_value = getattr(other, k)
|
||||
if value != other_value:
|
||||
differences.add(k, parameter=value, active=other_value)
|
||||
if self.rotate_worker_token:
|
||||
differences.add('rotate_worker_token', parameter=True, active=False)
|
||||
if self.rotate_manager_token:
|
||||
differences.add('rotate_manager_token', parameter=True, active=False)
|
||||
return differences
|
||||
|
||||
|
||||
class SwarmManager(DockerBaseClass):
|
||||
|
||||
def __init__(self, client, results):
|
||||
|
||||
super(SwarmManager, self).__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
self.swarm_info = {}
|
||||
|
||||
self.state = client.module.params['state']
|
||||
self.force = client.module.params['force']
|
||||
self.node_id = client.module.params['node_id']
|
||||
|
||||
self.differences = DifferenceTracker()
|
||||
self.parameters = TaskParameters.from_ansible_params(client)
|
||||
|
||||
self.created = False
|
||||
|
||||
def __call__(self):
|
||||
choice_map = {
|
||||
"present": self.init_swarm,
|
||||
"join": self.join,
|
||||
"absent": self.leave,
|
||||
"remove": self.remove,
|
||||
"inspect": self.inspect_swarm
|
||||
}
|
||||
|
||||
choice_map.get(self.state)()
|
||||
|
||||
if self.client.module._diff or self.parameters.debug:
|
||||
diff = dict()
|
||||
diff['before'], diff['after'] = self.differences.get_before_after()
|
||||
self.results['diff'] = diff
|
||||
|
||||
def inspect_swarm(self):
|
||||
try:
|
||||
data = self.client.inspect_swarm()
|
||||
json_str = json.dumps(data, ensure_ascii=False)
|
||||
self.swarm_info = json.loads(json_str)
|
||||
|
||||
self.results['changed'] = False
|
||||
self.results['swarm_facts'] = self.swarm_info
|
||||
|
||||
unlock_key = self.get_unlock_key()
|
||||
self.swarm_info.update(unlock_key)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
def get_unlock_key(self):
|
||||
default = {'UnlockKey': None}
|
||||
if not self.has_swarm_lock_changed():
|
||||
return default
|
||||
try:
|
||||
return self.client.get_unlock_key() or default
|
||||
except APIError:
|
||||
return default
|
||||
|
||||
def has_swarm_lock_changed(self):
|
||||
return self.parameters.autolock_managers and (
|
||||
self.created or self.differences.has_difference_for('autolock_managers')
|
||||
)
|
||||
|
||||
def init_swarm(self):
|
||||
if not self.force and self.client.check_if_swarm_manager():
|
||||
self.__update_swarm()
|
||||
return
|
||||
|
||||
if not self.check_mode:
|
||||
init_arguments = {
|
||||
'advertise_addr': self.parameters.advertise_addr,
|
||||
'listen_addr': self.parameters.listen_addr,
|
||||
'force_new_cluster': self.force,
|
||||
'swarm_spec': self.parameters.spec,
|
||||
}
|
||||
if self.parameters.default_addr_pool is not None:
|
||||
init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
|
||||
if self.parameters.subnet_size is not None:
|
||||
init_arguments['subnet_size'] = self.parameters.subnet_size
|
||||
try:
|
||||
self.client.init_swarm(**init_arguments)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
|
||||
|
||||
if not self.client.check_if_swarm_manager():
|
||||
if not self.check_mode:
|
||||
self.client.fail("Swarm not created or other error!")
|
||||
|
||||
self.created = True
|
||||
self.inspect_swarm()
|
||||
self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
|
||||
self.differences.add('state', parameter='present', active='absent')
|
||||
self.results['changed'] = True
|
||||
self.results['swarm_facts'] = {
|
||||
'JoinTokens': self.swarm_info.get('JoinTokens'),
|
||||
'UnlockKey': self.swarm_info.get('UnlockKey')
|
||||
}
|
||||
|
||||
def __update_swarm(self):
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
version = self.swarm_info['Version']['Index']
|
||||
self.parameters.update_from_swarm_info(self.swarm_info)
|
||||
old_parameters = TaskParameters()
|
||||
old_parameters.update_from_swarm_info(self.swarm_info)
|
||||
self.parameters.compare_to_active(old_parameters, self.client, self.differences)
|
||||
if self.differences.empty:
|
||||
self.results['actions'].append("No modification")
|
||||
self.results['changed'] = False
|
||||
return
|
||||
update_parameters = TaskParameters.from_ansible_params(self.client)
|
||||
update_parameters.update_parameters(self.client)
|
||||
if not self.check_mode:
|
||||
self.client.update_swarm(
|
||||
version=version, swarm_spec=update_parameters.spec,
|
||||
rotate_worker_token=self.parameters.rotate_worker_token,
|
||||
rotate_manager_token=self.parameters.rotate_manager_token)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
|
||||
return
|
||||
|
||||
self.inspect_swarm()
|
||||
self.results['actions'].append("Swarm cluster updated")
|
||||
self.results['changed'] = True
|
||||
|
||||
def join(self):
|
||||
if self.client.check_if_swarm_node():
|
||||
self.results['actions'].append("This node is already part of a swarm.")
|
||||
return
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.join_swarm(
|
||||
remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
|
||||
listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("New node is added to swarm cluster")
|
||||
self.differences.add('joined', parameter=True, active=False)
|
||||
self.results['changed'] = True
|
||||
|
||||
def leave(self):
|
||||
if not self.client.check_if_swarm_node():
|
||||
self.results['actions'].append("This node is not part of a swarm.")
|
||||
return
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.leave_swarm(force=self.force)
|
||||
except APIError as exc:
|
||||
self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("Node has left the swarm cluster")
|
||||
self.differences.add('joined', parameter='absent', active='present')
|
||||
self.results['changed'] = True
|
||||
|
||||
def remove(self):
|
||||
if not self.client.check_if_swarm_manager():
|
||||
self.client.fail("This node is not a manager.")
|
||||
|
||||
try:
|
||||
status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
if not status_down:
|
||||
self.client.fail("Can not remove the node. The status node is ready and not down.")
|
||||
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.remove_node(node_id=self.node_id, force=self.force)
|
||||
except APIError as exc:
|
||||
self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
|
||||
self.results['actions'].append("Node is removed from swarm cluster.")
|
||||
self.differences.add('joined', parameter=False, active=True)
|
||||
self.results['changed'] = True
|
||||
|
||||
|
||||
def _detect_remove_operation(client):
|
||||
return client.module.params['state'] == 'remove'
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
advertise_addr=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
|
||||
force=dict(type='bool', default=False),
|
||||
listen_addr=dict(type='str', default='0.0.0.0:2377'),
|
||||
remote_addrs=dict(type='list', elements='str'),
|
||||
join_token=dict(type='str'),
|
||||
snapshot_interval=dict(type='int'),
|
||||
task_history_retention_limit=dict(type='int'),
|
||||
keep_old_snapshots=dict(type='int'),
|
||||
log_entries_for_slow_followers=dict(type='int'),
|
||||
heartbeat_tick=dict(type='int'),
|
||||
election_tick=dict(type='int'),
|
||||
dispatcher_heartbeat_period=dict(type='int'),
|
||||
node_cert_expiry=dict(type='int'),
|
||||
name=dict(type='str'),
|
||||
labels=dict(type='dict'),
|
||||
signing_ca_cert=dict(type='str'),
|
||||
signing_ca_key=dict(type='str'),
|
||||
ca_force_rotate=dict(type='int'),
|
||||
autolock_managers=dict(type='bool'),
|
||||
node_id=dict(type='str'),
|
||||
rotate_worker_token=dict(type='bool', default=False),
|
||||
rotate_manager_token=dict(type='bool', default=False),
|
||||
default_addr_pool=dict(type='list', elements='str'),
|
||||
subnet_size=dict(type='int'),
|
||||
)
|
||||
|
||||
required_if = [
|
||||
('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']),
|
||||
('state', 'remove', ['node_id'])
|
||||
]
|
||||
|
||||
option_minimal_versions = dict(
|
||||
labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
|
||||
signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
|
||||
autolock_managers=dict(docker_py_version='2.6.0'),
|
||||
log_driver=dict(docker_py_version='2.6.0'),
|
||||
remove_operation=dict(
|
||||
docker_py_version='2.4.0',
|
||||
detect_usage=_detect_remove_operation,
|
||||
usage_msg='remove swarm nodes'
|
||||
),
|
||||
default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
|
||||
subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
|
||||
)
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
min_docker_version='1.10.0',
|
||||
min_docker_api_version='1.25',
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
results = dict(
|
||||
changed=False,
|
||||
result='',
|
||||
actions=[]
|
||||
)
|
||||
|
||||
SwarmManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
|
||||
except RequestException as e:
|
||||
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1 +0,0 @@
|
||||
remote.sh
|
@ -1,14 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o pipefail -eux
|
||||
|
||||
declare -a args
|
||||
IFS='/:' read -ra args <<< "$1"
|
||||
|
||||
image="${args[1]}"
|
||||
|
||||
target="shippable/posix/incidental/"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
|
||||
--docker "${image}" \
|
@ -1 +0,0 @@
|
||||
remote.sh
|
@ -1,27 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o pipefail -eux
|
||||
|
||||
declare -a args
|
||||
IFS='/:' read -ra args <<< "$1"
|
||||
|
||||
platform="${args[0]}"
|
||||
version="${args[1]}"
|
||||
pyver=default
|
||||
target="shippable/posix/incidental/"
|
||||
|
||||
# check for explicit python version like 8.3@3.8
|
||||
declare -a splitversion
|
||||
IFS='@' read -ra splitversion <<< "$version"
|
||||
|
||||
if [ "${#splitversion[@]}" -gt 1 ]; then
|
||||
version="${splitversion[0]}"
|
||||
pyver="${splitversion[1]}"
|
||||
fi
|
||||
|
||||
stage="${S:-prod}"
|
||||
provider="${P:-default}"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
|
||||
--python "${pyver}" --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" \
|
@ -1 +0,0 @@
|
||||
remote.sh
|
Loading…
Reference in New Issue