Migrated to community.kubernetes

pull/68117/head
Ansible Core Team 5 years ago
parent 54b2c8f113
commit 9d2d137038

@ -1,290 +0,0 @@
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import copy
import json
import os
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.dict_transformations import recursive_diff
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_native
K8S_IMP_ERR = None
try:
import kubernetes
import openshift
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import ResourceNotFoundError, ResourceNotUniqueError
HAS_K8S_MODULE_HELPER = True
k8s_import_exception = None
except ImportError as e:
HAS_K8S_MODULE_HELPER = False
k8s_import_exception = e
K8S_IMP_ERR = traceback.format_exc()
YAML_IMP_ERR = None
try:
import yaml
HAS_YAML = True
except ImportError:
YAML_IMP_ERR = traceback.format_exc()
HAS_YAML = False
try:
import urllib3
urllib3.disable_warnings()
except ImportError:
pass
def list_dict_str(value):
if isinstance(value, list):
return value
elif isinstance(value, dict):
return value
elif isinstance(value, string_types):
return value
raise TypeError
ARG_ATTRIBUTES_BLACKLIST = ('property_path',)
COMMON_ARG_SPEC = {
'state': {
'default': 'present',
'choices': ['present', 'absent'],
},
'force': {
'type': 'bool',
'default': False,
},
'resource_definition': {
'type': list_dict_str,
'aliases': ['definition', 'inline']
},
'src': {
'type': 'path',
},
'kind': {},
'name': {},
'namespace': {},
'api_version': {
'default': 'v1',
'aliases': ['api', 'version'],
},
}
AUTH_ARG_SPEC = {
'kubeconfig': {
'type': 'path',
},
'context': {},
'host': {},
'api_key': {
'no_log': True,
},
'username': {},
'password': {
'no_log': True,
},
'validate_certs': {
'type': 'bool',
'aliases': ['verify_ssl'],
},
'ca_cert': {
'type': 'path',
'aliases': ['ssl_ca_cert'],
},
'client_cert': {
'type': 'path',
'aliases': ['cert_file'],
},
'client_key': {
'type': 'path',
'aliases': ['key_file'],
},
'proxy': {},
'persist_config': {
'type': 'bool',
},
}
# Map kubernetes-client parameters to ansible parameters
AUTH_ARG_MAP = {
'kubeconfig': 'kubeconfig',
'context': 'context',
'host': 'host',
'api_key': 'api_key',
'username': 'username',
'password': 'password',
'verify_ssl': 'validate_certs',
'ssl_ca_cert': 'ca_cert',
'cert_file': 'client_cert',
'key_file': 'client_key',
'proxy': 'proxy',
'persist_config': 'persist_config',
}
class K8sAnsibleMixin(object):
_argspec_cache = None
@property
def argspec(self):
"""
Introspect the model properties, and return an Ansible module arg_spec dict.
:return: dict
"""
if self._argspec_cache:
return self._argspec_cache
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
self._argspec_cache = argument_spec
return self._argspec_cache
def get_api_client(self, **auth_params):
auth_params = auth_params or getattr(self, 'params', {})
auth = {}
# If authorization variables aren't defined, look for them in environment variables
for true_name, arg_name in AUTH_ARG_MAP.items():
if auth_params.get(arg_name) is None:
env_value = os.getenv('K8S_AUTH_{0}'.format(arg_name.upper()), None) or os.getenv('K8S_AUTH_{0}'.format(true_name.upper()), None)
if env_value is not None:
if AUTH_ARG_SPEC[arg_name].get('type') == 'bool':
env_value = env_value.lower() not in ['0', 'false', 'no']
auth[true_name] = env_value
else:
auth[true_name] = auth_params[arg_name]
def auth_set(*names):
return all([auth.get(name) for name in names])
if auth_set('username', 'password', 'host') or auth_set('api_key', 'host'):
# We have enough in the parameters to authenticate, no need to load incluster or kubeconfig
pass
elif auth_set('kubeconfig') or auth_set('context'):
kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
else:
# First try to do incluster config, then kubeconfig
try:
kubernetes.config.load_incluster_config()
except kubernetes.config.ConfigException:
kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
# Override any values in the default configuration with Ansible parameters
configuration = kubernetes.client.Configuration()
for key, value in iteritems(auth):
if key in AUTH_ARG_MAP.keys() and value is not None:
if key == 'api_key':
setattr(configuration, key, {'authorization': "Bearer {0}".format(value)})
else:
setattr(configuration, key, value)
kubernetes.client.Configuration.set_default(configuration)
return DynamicClient(kubernetes.client.ApiClient(configuration))
def find_resource(self, kind, api_version, fail=False):
for attribute in ['kind', 'name', 'singular_name']:
try:
return self.client.resources.get(**{'api_version': api_version, attribute: kind})
except (ResourceNotFoundError, ResourceNotUniqueError):
pass
try:
return self.client.resources.get(api_version=api_version, short_names=[kind])
except (ResourceNotFoundError, ResourceNotUniqueError):
if fail:
self.fail(msg='Failed to find exact match for {0}.{1} by [kind, name, singularName, shortNames]'.format(api_version, kind))
def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_selectors=None, field_selectors=None):
resource = self.find_resource(kind, api_version)
if not resource:
return dict(resources=[])
try:
result = resource.get(name=name,
namespace=namespace,
label_selector=','.join(label_selectors),
field_selector=','.join(field_selectors)).to_dict()
except openshift.dynamic.exceptions.NotFoundError:
return dict(resources=[])
if 'items' in result:
return dict(resources=result['items'])
else:
return dict(resources=[result])
def remove_aliases(self):
"""
The helper doesn't know what to do with aliased keys
"""
for k, v in iteritems(self.argspec):
if 'aliases' in v:
for alias in v['aliases']:
if alias in self.params:
self.params.pop(alias)
def load_resource_definitions(self, src):
""" Load the requested src path """
result = None
path = os.path.normpath(src)
if not os.path.exists(path):
self.fail(msg="Error accessing {0}. Does the file exist?".format(path))
try:
with open(path, 'r') as f:
result = list(yaml.safe_load_all(f))
except (IOError, yaml.YAMLError) as exc:
self.fail(msg="Error loading resource_definition: {0}".format(exc))
return result
@staticmethod
def diff_objects(existing, new):
result = dict()
diff = recursive_diff(existing, new)
if diff:
result['before'] = diff[0]
result['after'] = diff[1]
return not diff, result
class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin):
resource_definition = None
api_version = None
kind = None
def __init__(self, *args, **kwargs):
kwargs['argument_spec'] = self.argspec
AnsibleModule.__init__(self, *args, **kwargs)
if not HAS_K8S_MODULE_HELPER:
self.fail_json(msg=missing_required_lib('openshift'), exception=K8S_IMP_ERR,
error=to_native(k8s_import_exception))
self.openshift_version = openshift.__version__
if not HAS_YAML:
self.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
def execute_module(self):
raise NotImplementedError()
def fail(self, msg=None):
self.fail_json(msg=msg)

@ -1,519 +0,0 @@
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import copy
from datetime import datetime
from distutils.version import LooseVersion
import time
import sys
import traceback
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
from ansible.module_utils.six import string_types
from ansible.module_utils.k8s.common import KubernetesAnsibleModule
from ansible.module_utils.common.dict_transformations import dict_merge
try:
import yaml
from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ConflictError, ForbiddenError, KubernetesValidateMissing
import urllib3
except ImportError:
# Exceptions handled in common
pass
try:
import kubernetes_validate
HAS_KUBERNETES_VALIDATE = True
except ImportError:
HAS_KUBERNETES_VALIDATE = False
K8S_CONFIG_HASH_IMP_ERR = None
try:
from openshift.helper.hashes import generate_hash
HAS_K8S_CONFIG_HASH = True
except ImportError:
K8S_CONFIG_HASH_IMP_ERR = traceback.format_exc()
HAS_K8S_CONFIG_HASH = False
HAS_K8S_APPLY = None
try:
from openshift.dynamic.apply import apply_object
HAS_K8S_APPLY = True
except ImportError:
HAS_K8S_APPLY = False
class KubernetesRawModule(KubernetesAnsibleModule):
@property
def validate_spec(self):
return dict(
fail_on_error=dict(type='bool'),
version=dict(),
strict=dict(type='bool', default=True)
)
@property
def condition_spec(self):
return dict(
type=dict(),
status=dict(default=True, choices=[True, False, "Unknown"]),
reason=dict()
)
@property
def argspec(self):
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
argument_spec['merge_type'] = dict(type='list', choices=['json', 'merge', 'strategic-merge'])
argument_spec['wait'] = dict(type='bool', default=False)
argument_spec['wait_sleep'] = dict(type='int', default=5)
argument_spec['wait_timeout'] = dict(type='int', default=120)
argument_spec['wait_condition'] = dict(type='dict', default=None, options=self.condition_spec)
argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec)
argument_spec['append_hash'] = dict(type='bool', default=False)
argument_spec['apply'] = dict(type='bool', default=False)
return argument_spec
def __init__(self, k8s_kind=None, *args, **kwargs):
self.client = None
self.warnings = []
mutually_exclusive = [
('resource_definition', 'src'),
('merge_type', 'apply'),
]
KubernetesAnsibleModule.__init__(self, *args,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
**kwargs)
self.kind = k8s_kind or self.params.get('kind')
self.api_version = self.params.get('api_version')
self.name = self.params.get('name')
self.namespace = self.params.get('namespace')
resource_definition = self.params.get('resource_definition')
validate = self.params.get('validate')
if validate:
if LooseVersion(self.openshift_version) < LooseVersion("0.8.0"):
self.fail_json(msg="openshift >= 0.8.0 is required for validate")
self.append_hash = self.params.get('append_hash')
if self.append_hash:
if not HAS_K8S_CONFIG_HASH:
self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"),
exception=K8S_CONFIG_HASH_IMP_ERR)
if self.params['merge_type']:
if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"))
self.apply = self.params.get('apply', False)
if self.apply:
if not HAS_K8S_APPLY:
self.fail_json(msg=missing_required_lib("openshift >= 0.9.2", reason="for apply"))
if resource_definition:
if isinstance(resource_definition, string_types):
try:
self.resource_definitions = yaml.safe_load_all(resource_definition)
except (IOError, yaml.YAMLError) as exc:
self.fail(msg="Error loading resource_definition: {0}".format(exc))
elif isinstance(resource_definition, list):
self.resource_definitions = resource_definition
else:
self.resource_definitions = [resource_definition]
src = self.params.get('src')
if src:
self.resource_definitions = self.load_resource_definitions(src)
try:
self.resource_definitions = [item for item in self.resource_definitions if item]
except AttributeError:
pass
if not resource_definition and not src:
implicit_definition = dict(
kind=self.kind,
apiVersion=self.api_version,
metadata=dict(name=self.name)
)
if self.namespace:
implicit_definition['metadata']['namespace'] = self.namespace
self.resource_definitions = [implicit_definition]
def flatten_list_kind(self, list_resource, definitions):
flattened = []
parent_api_version = list_resource.group_version if list_resource else None
parent_kind = list_resource.kind[:-4] if list_resource else None
for definition in definitions.get('items', []):
resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True)
flattened.append((resource, self.set_defaults(resource, definition)))
return flattened
def execute_module(self):
changed = False
results = []
try:
self.client = self.get_api_client()
# Hopefully the kubernetes client will provide its own exception class one day
except (urllib3.exceptions.RequestError) as e:
self.fail_json(msg="Couldn't connect to Kubernetes: %s" % str(e))
flattened_definitions = []
for definition in self.resource_definitions:
kind = definition.get('kind', self.kind)
api_version = definition.get('apiVersion', self.api_version)
if kind.endswith('List'):
resource = self.find_resource(kind, api_version, fail=False)
flattened_definitions.extend(self.flatten_list_kind(resource, definition))
else:
resource = self.find_resource(kind, api_version, fail=True)
flattened_definitions.append((resource, definition))
for (resource, definition) in flattened_definitions:
kind = definition.get('kind', self.kind)
api_version = definition.get('apiVersion', self.api_version)
definition = self.set_defaults(resource, definition)
self.warnings = []
if self.params['validate'] is not None:
self.warnings = self.validate(definition)
result = self.perform_action(resource, definition)
result['warnings'] = self.warnings
changed = changed or result['changed']
results.append(result)
if len(results) == 1:
self.exit_json(**results[0])
self.exit_json(**{
'changed': changed,
'result': {
'results': results
}
})
def validate(self, resource):
def _prepend_resource_info(resource, msg):
return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg)
try:
warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict'))
except KubernetesValidateMissing:
self.fail_json(msg="kubernetes-validate python library is required to validate resources")
if errors and self.params['validate']['fail_on_error']:
self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors]))
else:
return [_prepend_resource_info(resource, msg) for msg in warnings + errors]
def set_defaults(self, resource, definition):
definition['kind'] = resource.kind
definition['apiVersion'] = resource.group_version
metadata = definition.get('metadata', {})
if self.name and not metadata.get('name'):
metadata['name'] = self.name
if resource.namespaced and self.namespace and not metadata.get('namespace'):
metadata['namespace'] = self.namespace
definition['metadata'] = metadata
return definition
def perform_action(self, resource, definition):
result = {'changed': False, 'result': {}}
state = self.params.get('state', None)
force = self.params.get('force', False)
name = definition['metadata'].get('name')
namespace = definition['metadata'].get('namespace')
existing = None
wait = self.params.get('wait')
wait_sleep = self.params.get('wait_sleep')
wait_timeout = self.params.get('wait_timeout')
wait_condition = None
if self.params.get('wait_condition') and self.params['wait_condition'].get('type'):
wait_condition = self.params['wait_condition']
self.remove_aliases()
try:
# ignore append_hash for resources other than ConfigMap and Secret
if self.append_hash and definition['kind'] in ['ConfigMap', 'Secret']:
name = '%s-%s' % (name, generate_hash(definition))
definition['metadata']['name'] = name
params = dict(name=name)
if namespace:
params['namespace'] = namespace
existing = resource.get(**params)
except NotFoundError:
# Remove traceback so that it doesn't show up in later failures
try:
sys.exc_clear()
except AttributeError:
# no sys.exc_clear on python3
pass
except ForbiddenError as exc:
if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent':
return self.create_project_request(definition)
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
except DynamicApiError as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
if state == 'absent':
result['method'] = "delete"
if not existing:
# The object already does not exist
return result
else:
# Delete the object
result['changed'] = True
if not self.check_mode:
try:
k8s_obj = resource.delete(**params)
result['result'] = k8s_obj.to_dict()
except DynamicApiError as exc:
self.fail_json(msg="Failed to delete object: {0}".format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
if wait:
success, resource, duration = self.wait(resource, definition, wait_sleep, wait_timeout, 'absent')
result['duration'] = duration
if not success:
self.fail_json(msg="Resource deletion timed out", **result)
return result
else:
if self.apply:
if self.check_mode:
ignored, k8s_obj = apply_object(resource, definition)
else:
try:
k8s_obj = resource.apply(definition, namespace=namespace).to_dict()
except DynamicApiError as exc:
msg = "Failed to apply object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
success = True
result['result'] = k8s_obj
if wait:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
if existing:
existing = existing.to_dict()
else:
existing = {}
match, diffs = self.diff_objects(existing, result['result'])
result['changed'] = not match
result['diff'] = diffs
result['method'] = 'apply'
if not success:
self.fail_json(msg="Resource apply timed out", **result)
return result
if not existing:
if self.check_mode:
k8s_obj = definition
else:
try:
k8s_obj = resource.create(definition, namespace=namespace).to_dict()
except ConflictError:
# Some resources, like ProjectRequests, can't be created multiple times,
# because the resources that they create don't match their kind
# In this case we'll mark it as unchanged and warn the user
self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \
if the resource you are creating does not directly create a resource of the same kind.".format(name))
return result
except DynamicApiError as exc:
msg = "Failed to create object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
success = True
result['result'] = k8s_obj
if wait and not self.check_mode:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
result['changed'] = True
result['method'] = 'create'
if not success:
self.fail_json(msg="Resource creation timed out", **result)
return result
match = False
diffs = []
if existing and force:
if self.check_mode:
k8s_obj = definition
else:
try:
k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict()
except DynamicApiError as exc:
msg = "Failed to replace object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
success = True
result['result'] = k8s_obj
if wait:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
match, diffs = self.diff_objects(existing.to_dict(), result['result'])
result['changed'] = not match
result['method'] = 'replace'
result['diff'] = diffs
if not success:
self.fail_json(msg="Resource replacement timed out", **result)
return result
# Differences exist between the existing obj and requested params
if self.check_mode:
k8s_obj = dict_merge(existing.to_dict(), definition)
else:
if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
k8s_obj, error = self.patch_resource(resource, definition, existing, name,
namespace)
else:
for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']:
k8s_obj, error = self.patch_resource(resource, definition, existing, name,
namespace, merge_type=merge_type)
if not error:
break
if error:
self.fail_json(**error)
success = True
result['result'] = k8s_obj
if wait:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
match, diffs = self.diff_objects(existing.to_dict(), result['result'])
result['changed'] = not match
result['method'] = 'patch'
result['diff'] = diffs
if not success:
self.fail_json(msg="Resource update timed out", **result)
return result
def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None):
try:
params = dict(name=name, namespace=namespace)
if merge_type:
params['content_type'] = 'application/{0}-patch+json'.format(merge_type)
k8s_obj = resource.patch(definition, **params).to_dict()
match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
error = {}
return k8s_obj, {}
except DynamicApiError as exc:
msg = "Failed to patch object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings)
return None, error
def create_project_request(self, definition):
definition['kind'] = 'ProjectRequest'
result = {'changed': False, 'result': {}}
resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True)
if not self.check_mode:
try:
k8s_obj = resource.create(definition)
result['result'] = k8s_obj.to_dict()
except DynamicApiError as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
result['changed'] = True
result['method'] = 'create'
return result
def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state):
start = datetime.now()
def _wait_for_elapsed():
return (datetime.now() - start).seconds
response = None
while _wait_for_elapsed() < timeout:
try:
response = resource.get(name=name, namespace=namespace)
if predicate(response):
if response:
return True, response.to_dict(), _wait_for_elapsed()
else:
return True, {}, _wait_for_elapsed()
time.sleep(sleep)
except NotFoundError:
if state == 'absent':
return True, {}, _wait_for_elapsed()
if response:
response = response.to_dict()
return False, response, _wait_for_elapsed()
def wait(self, resource, definition, sleep, timeout, state='present', condition=None):
def _deployment_ready(deployment):
# FIXME: frustratingly bool(deployment.status) is True even if status is empty
# Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty
return (deployment.status and deployment.status.replicas is not None and
deployment.status.availableReplicas == deployment.status.replicas and
deployment.status.observedGeneration == deployment.metadata.generation)
def _pod_ready(pod):
return (pod.status and pod.status.containerStatuses is not None and
all([container.ready for container in pod.status.containerStatuses]))
def _daemonset_ready(daemonset):
return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and
daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and
daemonset.status.observedGeneration == daemonset.metadata.generation)
def _custom_condition(resource):
if not resource.status or not resource.status.conditions:
return False
match = [x for x in resource.status.conditions if x.type == condition['type']]
if not match:
return False
# There should never be more than one condition of a specific type
match = match[0]
if match.status == 'Unknown':
if match.status == condition['status']:
if 'reason' not in condition:
return True
if condition['reason']:
return match.reason == condition['reason']
return False
status = True if match.status == 'True' else False
if status == condition['status']:
if condition.get('reason'):
return match.reason == condition['reason']
return True
return False
def _resource_absent(resource):
return not resource
waiter = dict(
Deployment=_deployment_ready,
DaemonSet=_daemonset_ready,
Pod=_pod_ready
)
kind = definition['kind']
if state == 'present' and not condition:
predicate = waiter.get(kind, lambda x: x)
elif state == 'present' and condition:
predicate = _custom_condition
else:
predicate = _resource_absent
return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state)

@ -1,243 +0,0 @@
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import copy
import math
import time
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
from ansible.module_utils.k8s.common import KubernetesAnsibleModule
from ansible.module_utils.six import string_types
try:
import yaml
from openshift import watch
from openshift.dynamic.client import ResourceInstance
from openshift.helper.exceptions import KubernetesException
except ImportError as exc:
class KubernetesException(Exception):
pass
SCALE_ARG_SPEC = {
'replicas': {'type': 'int', 'required': True},
'current_replicas': {'type': 'int'},
'resource_version': {},
'wait': {'type': 'bool', 'default': True},
'wait_timeout': {'type': 'int', 'default': 20},
}
class KubernetesAnsibleScaleModule(KubernetesAnsibleModule):
def __init__(self, k8s_kind=None, *args, **kwargs):
self.client = None
self.warnings = []
mutually_exclusive = [
('resource_definition', 'src'),
]
KubernetesAnsibleModule.__init__(self, *args,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
**kwargs)
self.kind = k8s_kind or self.params.get('kind')
self.api_version = self.params.get('api_version')
self.name = self.params.get('name')
self.namespace = self.params.get('namespace')
resource_definition = self.params.get('resource_definition')
if resource_definition:
if isinstance(resource_definition, string_types):
try:
self.resource_definitions = yaml.safe_load_all(resource_definition)
except (IOError, yaml.YAMLError) as exc:
self.fail(msg="Error loading resource_definition: {0}".format(exc))
elif isinstance(resource_definition, list):
self.resource_definitions = resource_definition
else:
self.resource_definitions = [resource_definition]
src = self.params.get('src')
if src:
self.resource_definitions = self.load_resource_definitions(src)
if not resource_definition and not src:
implicit_definition = dict(
kind=self.kind,
apiVersion=self.api_version,
metadata=dict(name=self.name)
)
if self.namespace:
implicit_definition['metadata']['namespace'] = self.namespace
self.resource_definitions = [implicit_definition]
def execute_module(self):
definition = self.resource_definitions[0]
self.client = self.get_api_client()
name = definition['metadata']['name']
namespace = definition['metadata'].get('namespace')
api_version = definition['apiVersion']
kind = definition['kind']
current_replicas = self.params.get('current_replicas')
replicas = self.params.get('replicas')
resource_version = self.params.get('resource_version')
wait = self.params.get('wait')
wait_time = self.params.get('wait_timeout')
existing = None
existing_count = None
return_attributes = dict(changed=False, result=dict())
resource = self.find_resource(kind, api_version, fail=True)
try:
existing = resource.get(name=name, namespace=namespace)
return_attributes['result'] = existing.to_dict()
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc),
error=exc.value.get('status'))
if self.kind == 'job':
existing_count = existing.spec.parallelism
elif hasattr(existing.spec, 'replicas'):
existing_count = existing.spec.replicas
if existing_count is None:
self.fail_json(msg='Failed to retrieve the available count for the requested object.')
if resource_version and resource_version != existing.metadata.resourceVersion:
self.exit_json(**return_attributes)
if current_replicas is not None and existing_count != current_replicas:
self.exit_json(**return_attributes)
if existing_count != replicas:
return_attributes['changed'] = True
if not self.check_mode:
if self.kind == 'job':
existing.spec.parallelism = replicas
k8s_obj = resource.patch(existing.to_dict())
else:
k8s_obj = self.scale(resource, existing, replicas, wait, wait_time)
return_attributes['result'] = k8s_obj.to_dict()
self.exit_json(**return_attributes)
@property
def argspec(self):
args = copy.deepcopy(COMMON_ARG_SPEC)
args.pop('state')
args.pop('force')
args.update(AUTH_ARG_SPEC)
args.update(SCALE_ARG_SPEC)
return args
def scale(self, resource, existing_object, replicas, wait, wait_time):
name = existing_object.metadata.name
namespace = existing_object.metadata.namespace
if not hasattr(resource, 'scale'):
self.fail_json(
msg="Cannot perform scale on resource of kind {0}".format(resource.kind)
)
scale_obj = {'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}}
return_obj = None
stream = None
if wait:
w, stream = self._create_stream(resource, namespace, wait_time)
try:
resource.scale.patch(body=scale_obj)
except Exception as exc:
self.fail_json(
msg="Scale request failed: {0}".format(exc)
)
if wait and stream is not None:
return_obj = self._read_stream(resource, w, stream, name, replicas)
if not return_obj:
return_obj = self._wait_for_response(resource, name, namespace)
return return_obj
def _create_stream(self, resource, namespace, wait_time):
""" Create a stream of events for the object """
w = None
stream = None
try:
w = watch.Watch()
w._api_client = self.client.client
if namespace:
stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time)
else:
stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time)
except KubernetesException:
pass
return w, stream
def _read_stream(self, resource, watcher, stream, name, replicas):
""" Wait for ready_replicas to equal the requested number of replicas. """
return_obj = None
try:
for event in stream:
if event.get('object'):
obj = ResourceInstance(resource, event['object'])
if obj.metadata.name == name and hasattr(obj, 'status'):
if replicas == 0:
if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas:
return_obj = obj
watcher.stop()
break
if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas:
return_obj = obj
watcher.stop()
break
except Exception as exc:
self.fail_json(msg="Exception reading event stream: {0}".format(exc))
if not return_obj:
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas is None:
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas != replicas:
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
return return_obj
def _wait_for_response(self, resource, name, namespace):
""" Wait for an API response """
tries = 0
half = math.ceil(20 / 2)
obj = None
while tries <= half:
obj = resource.get(name=name, namespace=namespace)
if obj:
break
tries += 2
time.sleep(2)
return obj

@ -1,274 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s
short_description: Manage Kubernetes (K8s) objects
version_added: "2.6"
author:
- "Chris Houseknecht (@chouseknecht)"
- "Fabian von Feilitzsch (@fabianvf)"
description:
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates or vault-encrypted files.
- Access to the full range of K8s APIs.
- Use the M(k8s_info) module to obtain a list of items about an object of type C(kind)
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
notes:
- If your OpenShift Python library is not 0.9.0 or newer and you are trying to
remove an item from an associative array/dictionary, for example a label or
an annotation, you will need to explicitly set the value of the item to be
removed to `null`. Simply deleting the entry in the dictionary will not
remove it from openshift or kubernetes.
options:
merge_type:
description:
- Whether to override the default patch merge approach with a specific type. By default, the strategic
merge will typically be used.
- For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
want to use C(merge) if you see "strategic merge patch format is not supported"
- See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
- Requires openshift >= 0.6.2
- If more than one merge_type is given, the merge_types will be tried in order
- If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
is simply C(strategic-merge).
- mutually exclusive with C(apply)
choices:
- json
- merge
- strategic-merge
type: list
version_added: "2.7"
wait:
description:
- Whether to wait for certain resource kinds to end up in the desired state. By default the module exits once Kubernetes has
received the request
- Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds.
- For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set.
default: no
type: bool
version_added: "2.8"
wait_sleep:
description:
- Number of seconds to sleep between checks.
default: 5
version_added: "2.9"
wait_timeout:
description:
- How long in seconds to wait for the resource to end up in the desired state. Ignored if C(wait) is not set.
default: 120
version_added: "2.8"
wait_condition:
description:
- Specifies a custom condition on the status to wait for. Ignored if C(wait) is not set or is set to False.
suboptions:
type:
description:
- The type of condition to wait for. For example, the C(Pod) resource will set the C(Ready) condition (among others)
- Required if you are specifying a C(wait_condition). If left empty, the C(wait_condition) field will be ignored.
- The possible types for a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
for a given resource to see possible choices.
status:
description:
- The value of the status field in your desired condition.
- For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status.
choices:
- True
- False
- Unknown
reason:
description:
- The value of the reason field in your desired condition
- For example, if a C(Deployment) is paused, The C(Progressing) C(type) will have the C(DeploymentPaused) reason.
- The possible reasons in a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
for a given resource to see possible choices.
version_added: "2.8"
validate:
description:
- how (if at all) to validate the resource definition against the kubernetes schema.
Requires the kubernetes-validate python module
suboptions:
fail_on_error:
description: whether to fail on validation errors.
required: yes
type: bool
version:
description: version of Kubernetes to validate against. defaults to Kubernetes server version
strict:
description: whether to fail when passing unexpected properties
default: no
type: bool
version_added: "2.8"
append_hash:
description:
- Whether to append a hash to a resource name for immutability purposes
- Applies only to ConfigMap and Secret resources
- The parameter will be silently ignored for other resource kinds
- The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
the generated hash and append_hash=no)
type: bool
version_added: "2.8"
apply:
description:
- C(apply) compares the desired resource definition with the previously supplied resource definition,
ignoring properties that are automatically generated
- C(apply) works better with Services than 'force=yes'
- mutually exclusive with C(merge_type)
type: bool
version_added: "2.9"
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a k8s namespace
k8s:
name: testing
api_version: v1
kind: Namespace
state: present
- name: Create a Service object from an inline definition
k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
- name: Remove an existing Service object
k8s:
state: absent
api_version: v1
kind: Service
namespace: testing
name: web
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
k8s:
state: present
src: /testing/deployment.yml
- name: >-
Read definition file from the Ansible controller file system.
If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
k8s:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
- name: fail on validation errors
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
validate:
fail_on_error: yes
- name: warn on validation errors, check for unexpected properties
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
validate:
fail_on_error: no
strict: yes
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when multiple yaml documents are passed to src or resource_definition
returned: when resource_definition or src contains list of objects
type: list
duration:
description: elapsed time of task in seconds
returned: when C(wait) is true
type: int
sample: 48
'''
from ansible.module_utils.k8s.raw import KubernetesRawModule
def main():
KubernetesRawModule().execute_module()
if __name__ == '__main__':
main()

@ -1,336 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, KubeVirt Team <@kubevirt>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_auth
short_description: Authenticate to Kubernetes clusters which require an explicit login step
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
description:
- "This module handles authenticating to Kubernetes clusters requiring I(explicit) authentication procedures,
meaning ones where a client logs in (obtains an authentication token), performs API operations using said
token and then logs out (revokes the token). An example of a Kubernetes distribution requiring this module
is OpenShift."
- "On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic
Auth, which does not involve any additional login/logout steps (instead login credentials can be attached
to each and every API call performed) and as such is handled directly by the C(k8s) module (and other
resourcespecific modules) by utilizing the C(host), C(username) and C(password) parameters. Please
consult your preferred module's documentation for more details."
options:
state:
description:
- If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in.
- If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key).
default: present
choices:
- present
- absent
host:
description:
- Provide a URL for accessing the API server.
required: true
username:
description:
- Provide a username for authenticating with the API server.
password:
description:
- Provide a password for authenticating with the API server.
ca_cert:
description:
- "Path to a CA certificate file used to verify connection to the API server. The full certificate chain
must be provided to avoid certificate validation errors."
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- "Whether or not to verify the API server's SSL certificates."
type: bool
default: true
aliases: [ verify_ssl ]
api_key:
description:
- When C(state) is set to I(absent), this specifies the token to revoke.
requirements:
- python >= 2.7
- urllib3
- requests
- requests-oauthlib
'''
EXAMPLES = '''
- hosts: localhost
module_defaults:
group/k8s:
host: https://k8s.example.com/
ca_cert: ca.pem
tasks:
- block:
# It's good practice to store login credentials in a secure vault and not
# directly in playbooks.
- include_vars: k8s_passwords.yml
- name: Log in (obtain access token)
k8s_auth:
username: admin
password: "{{ k8s_admin_password }}"
register: k8s_auth_results
# Previous task provides the token/api_key, while all other parameters
# are taken from module_defaults
- name: Get a list of all pods from any namespace
k8s_info:
api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
kind: Pod
register: pod_list
always:
- name: If login succeeded, try to log out (revoke access token)
when: k8s_auth_results.k8s_auth.api_key is defined
k8s_auth:
state: absent
api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
'''
# Returned value names need to match k8s modules parameter names, to make it
# easy to pass returned values of k8s_auth to other k8s modules.
# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899
RETURN = '''
k8s_auth:
description: Kubernetes authentication facts.
returned: success
type: complex
contains:
api_key:
description: Authentication token.
returned: success
type: str
host:
description: URL for accessing the API server.
returned: success
type: str
ca_cert:
description: Path to a CA certificate file used to verify connection to the API server.
returned: success
type: str
validate_certs:
description: "Whether or not to verify the API server's SSL certificates."
returned: success
type: bool
username:
description: Username for authenticating with the API server.
returned: success
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode
# 3rd party imports
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from requests_oauthlib import OAuth2Session
HAS_REQUESTS_OAUTH = True
except ImportError:
HAS_REQUESTS_OAUTH = False
try:
from urllib3.util import make_headers
HAS_URLLIB3 = True
except ImportError:
HAS_URLLIB3 = False
K8S_AUTH_ARG_SPEC = {
'state': {
'default': 'present',
'choices': ['present', 'absent'],
},
'host': {'required': True},
'username': {},
'password': {'no_log': True},
'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']},
'validate_certs': {
'type': 'bool',
'default': True,
'aliases': ['verify_ssl']
},
'api_key': {'no_log': True},
}
class KubernetesAuthModule(AnsibleModule):
def __init__(self):
AnsibleModule.__init__(
self,
argument_spec=K8S_AUTH_ARG_SPEC,
required_if=[
('state', 'present', ['username', 'password']),
('state', 'absent', ['api_key']),
]
)
if not HAS_REQUESTS:
self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
if not HAS_REQUESTS_OAUTH:
self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.")
if not HAS_URLLIB3:
self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.")
def execute_module(self):
state = self.params.get('state')
verify_ssl = self.params.get('validate_certs')
ssl_ca_cert = self.params.get('ca_cert')
self.auth_username = self.params.get('username')
self.auth_password = self.params.get('password')
self.auth_api_key = self.params.get('api_key')
self.con_host = self.params.get('host')
# python-requests takes either a bool or a path to a ca file as the 'verify' param
if verify_ssl and ssl_ca_cert:
self.con_verify_ca = ssl_ca_cert # path
else:
self.con_verify_ca = verify_ssl # bool
# Get needed info to access authorization APIs
self.openshift_discover()
if state == 'present':
new_api_key = self.openshift_login()
result = dict(
host=self.con_host,
validate_certs=verify_ssl,
ca_cert=ssl_ca_cert,
api_key=new_api_key,
username=self.auth_username,
)
else:
self.openshift_logout()
result = dict()
self.exit_json(changed=False, k8s_auth=result)
def openshift_discover(self):
url = '{0}/.well-known/oauth-authorization-server'.format(self.con_host)
ret = requests.get(url, verify=self.con_verify_ca)
if ret.status_code != 200:
self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url,
reason=ret.reason, status_code=ret.status_code)
try:
oauth_info = ret.json()
self.openshift_auth_endpoint = oauth_info['authorization_endpoint']
self.openshift_token_endpoint = oauth_info['token_endpoint']
except Exception as e:
self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.",
exception=traceback.format_exc())
def openshift_login(self):
os_oauth = OAuth2Session(client_id='openshift-challenging-client')
authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint,
state="1", code_challenge_method='S256')
auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password))
# Request authorization code using basic auth credentials
ret = os_oauth.get(
authorization_url,
headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')},
verify=self.con_verify_ca,
allow_redirects=False
)
if ret.status_code != 302:
self.fail_request("Authorization failed.", method='GET', url=authorization_url,
reason=ret.reason, status_code=ret.status_code)
# In here we have `code` and `state`, I think `code` is the important one
qwargs = {}
for k, v in parse_qs(urlparse(ret.headers['Location']).query).items():
qwargs[k] = v[0]
qwargs['grant_type'] = 'authorization_code'
# Using authorization code given to us in the Location header of the previous request, request a token
ret = os_oauth.post(
self.openshift_token_endpoint,
headers={
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
# This is just base64 encoded 'openshift-challenging-client:'
'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo='
},
data=urlencode(qwargs),
verify=self.con_verify_ca
)
if ret.status_code != 200:
self.fail_request("Failed to obtain an authorization token.", method='POST',
url=self.openshift_token_endpoint,
reason=ret.reason, status_code=ret.status_code)
return ret.json()['access_token']
def openshift_logout(self):
url = '{0}/apis/oauth.openshift.io/v1/oauthaccesstokens/{1}'.format(self.con_host, self.auth_api_key)
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.auth_api_key)
}
json = {
"apiVersion": "oauth.openshift.io/v1",
"kind": "DeleteOptions"
}
ret = requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca)
# Ignore errors, the token will time out eventually anyway
def fail(self, msg=None):
self.fail_json(msg=msg)
def fail_request(self, msg, **kwargs):
req_info = {}
for k, v in kwargs.items():
req_info['req_' + k] = v
self.fail_json(msg=msg, **req_info)
def main():
module = KubernetesAuthModule()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

@ -1,179 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Will Thames <@willthames>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_info
short_description: Describe Kubernetes (K8s) objects
version_added: "2.7"
author:
- "Will Thames (@willthames)"
description:
- Use the OpenShift Python client to perform read operations on K8s objects.
- Access to the full range of K8s APIs.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
- This module was called C(k8s_facts) before Ansible 2.9. The usage did not change.
options:
api_version:
description:
- Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a
specific object.
default: v1
aliases:
- api
- version
kind:
description:
- Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
specific object.
required: yes
name:
description:
- Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
specific object.
namespace:
description:
- Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name)
to identify a specific object.
label_selectors:
description: List of label selectors to use to filter results
field_selectors:
description: List of field selectors to use to filter results
extends_documentation_fragment:
- k8s_auth_options
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Get an existing Service object
k8s_info:
api_version: v1
kind: Service
name: web
namespace: testing
register: web_service
- name: Get a list of all service objects
k8s_info:
api_version: v1
kind: Service
namespace: testing
register: service_list
- name: Get a list of all pods from any namespace
k8s_info:
kind: Pod
register: pod_list
- name: Search for all Pods labelled app=web
k8s_info:
kind: Pod
label_selectors:
- app = web
- tier in (dev, test)
- name: Search for all running pods
k8s_info:
kind: Pod
field_selectors:
- status.phase=Running
'''
RETURN = '''
resources:
description:
- The object(s) that exists
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: dict
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: dict
status:
description: Current status details for the object.
returned: success
type: dict
'''
from ansible.module_utils.k8s.common import KubernetesAnsibleModule, AUTH_ARG_SPEC
import copy
class KubernetesInfoModule(KubernetesAnsibleModule):
def __init__(self, *args, **kwargs):
KubernetesAnsibleModule.__init__(self, *args,
supports_check_mode=True,
**kwargs)
if self._name == 'k8s_facts':
self.deprecate("The 'k8s_facts' module has been renamed to 'k8s_info'", version='2.13')
def execute_module(self):
self.client = self.get_api_client()
self.exit_json(changed=False,
**self.kubernetes_facts(self.params['kind'],
self.params['api_version'],
self.params['name'],
self.params['namespace'],
self.params['label_selectors'],
self.params['field_selectors']))
@property
def argspec(self):
args = copy.deepcopy(AUTH_ARG_SPEC)
args.update(
dict(
kind=dict(required=True),
api_version=dict(default='v1', aliases=['api', 'version']),
name=dict(),
namespace=dict(),
label_selectors=dict(type='list', default=[]),
field_selectors=dict(type='list', default=[]),
)
)
return args
def main():
KubernetesInfoModule().execute_module()
if __name__ == '__main__':
main()

@ -1,129 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_scale
short_description: Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job.
version_added: "2.5"
author:
- "Chris Houseknecht (@chouseknecht)"
- "Fabian von Feilitzsch (@fabianvf)"
description:
- Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicaSet,
or Replication Controller, or the parallelism attribute of a Job. Supports check mode.
extends_documentation_fragment:
- k8s_name_options
- k8s_auth_options
- k8s_resource_options
- k8s_scale_options
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Scale deployment up, and extend timeout
k8s_scale:
api_version: v1
kind: Deployment
name: elastic
namespace: myproject
replicas: 3
wait_timeout: 60
- name: Scale deployment down when current replicas match
k8s_scale:
api_version: v1
kind: Deployment
name: elastic
namespace: myproject
current_replicas: 3
replicas: 2
- name: Increase job parallelism
k8s_scale:
api_version: batch/v1
kind: job
name: pi-with-timeout
namespace: testing
replicas: 2
# Match object using local file or inline definition
- name: Scale deployment based on a file from the local filesystem
k8s_scale:
src: /myproject/elastic_deployment.yml
replicas: 3
wait: no
- name: Scale deployment based on a template output
k8s_scale:
resource_definition: "{{ lookup('template', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
- name: Scale deployment based on a file from the Ansible controller filesystem
k8s_scale:
resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
'''
RETURN = '''
result:
description:
- If a change was made, will return the patched object, otherwise returns the existing object.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
'''
from ansible.module_utils.k8s.scale import KubernetesAnsibleScaleModule
def main():
KubernetesAnsibleScaleModule().execute_module()
if __name__ == '__main__':
main()

@ -1,267 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, KubeVirt Team <@kubevirt>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_service
short_description: Manage Services on Kubernetes
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
description:
- Use Openshift Python SDK to manage Services on Kubernetes
extends_documentation_fragment:
- k8s_auth_options
options:
resource_definition:
description:
- A partial YAML definition of the Service object being created/updated. Here you can define Kubernetes
Service Resource parameters not covered by this module's parameters.
- "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
I(metadata.namespace) here, that value will be ignored and I(metadata) used instead."
aliases:
- definition
- inline
type: dict
state:
description:
- Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
C(present), an existing object will be patched, if its attributes differ from those specified using
module options and I(resource_definition).
default: present
choices:
- present
- absent
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will be replaced.
default: false
type: bool
merge_type:
description:
- Whether to override the default patch merge approach with a specific type. By default, the strategic
merge will typically be used.
- For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
want to use C(merge) if you see "strategic merge patch format is not supported"
- See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
- Requires openshift >= 0.6.2
- If more than one merge_type is given, the merge_types will be tried in order
- If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
is simply C(strategic-merge).
choices:
- json
- merge
- strategic-merge
type: list
name:
description:
- Use to specify a Service object name.
required: true
type: str
namespace:
description:
- Use to specify a Service object namespace.
required: true
type: str
type:
description:
- Specifies the type of Service to create.
- See U(https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types)
choices:
- NodePort
- ClusterIP
- LoadBalancer
- ExternalName
ports:
description:
- A list of ports to expose.
- U(https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services)
type: list
selector:
description:
- Label selectors identify objects this Service should apply to.
- U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
type: dict
requirements:
- python >= 2.7
- openshift >= 0.6.2
'''
EXAMPLES = '''
- name: Expose https port with ClusterIP
k8s_service:
state: present
name: test-https
namespace: default
ports:
- port: 443
protocol: TCP
selector:
key: special
- name: Expose https port with ClusterIP using spec
k8s_service:
state: present
name: test-https
namespace: default
inline:
spec:
ports:
- port: 443
protocol: TCP
selector:
key: special
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present Service object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Always 'Service'.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
'''
import copy
import traceback
from collections import defaultdict
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
from ansible.module_utils.k8s.raw import KubernetesRawModule
SERVICE_ARG_SPEC = {
'state': {
'default': 'present',
'choices': ['present', 'absent'],
},
'force': {
'type': 'bool',
'default': False,
},
'resource_definition': {
'type': 'dict',
'aliases': ['definition', 'inline']
},
'name': {'required': True},
'namespace': {'required': True},
'merge_type': {'type': 'list', 'choices': ['json', 'merge', 'strategic-merge']},
'selector': {'type': 'dict'},
'type': {
'type': 'str',
'choices': [
'NodePort', 'ClusterIP', 'LoadBalancer', 'ExternalName'
],
},
'ports': {'type': 'list'},
}
class KubernetesService(KubernetesRawModule):
def __init__(self, *args, **kwargs):
super(KubernetesService, self).__init__(*args, k8s_kind='Service', **kwargs)
@staticmethod
def merge_dicts(x, y):
for k in set(x.keys()).union(y.keys()):
if k in x and k in y:
if isinstance(x[k], dict) and isinstance(y[k], dict):
yield (k, dict(KubernetesService.merge_dicts(x[k], y[k])))
else:
yield (k, y[k])
elif k in x:
yield (k, x[k])
else:
yield (k, y[k])
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(SERVICE_ARG_SPEC)
return argument_spec
def execute_module(self):
""" Module execution """
self.client = self.get_api_client()
api_version = 'v1'
selector = self.params.get('selector')
service_type = self.params.get('type')
ports = self.params.get('ports')
definition = defaultdict(defaultdict)
definition['kind'] = 'Service'
definition['apiVersion'] = api_version
def_spec = definition['spec']
def_spec['type'] = service_type
def_spec['ports'] = ports
def_spec['selector'] = selector
def_meta = definition['metadata']
def_meta['name'] = self.params.get('name')
def_meta['namespace'] = self.params.get('namespace')
# 'resource_definition:' has lower priority than module parameters
definition = dict(self.merge_dicts(self.resource_definitions[0], definition))
resource = self.find_resource('Service', api_version, fail=True)
definition = self.set_defaults(resource, definition)
result = self.perform_action(resource, definition)
self.exit_json(**result)
def main():
module = KubernetesService()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

@ -1,95 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Options for authenticating with the API.
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
type: str
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
type: str
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
variable.
type: path
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment variable.
type: str
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
variable.
- Please note that this only works with clusters configured to use HTTP Basic Auth. If your cluster has a
different form of authentication (e.g. OAuth2 in OpenShift), this option will not work as expected and you
should look into the C(k8s_auth) module, as that might do what you need.
type: str
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
variable.
- Please read the description of the C(username) option for a discussion of when this option is applicable.
type: str
client_cert:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE environment
variable.
type: path
aliases: [ cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE environment
variable.
type: path
aliases: [ key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. The full certificate chain must be provided to
avoid certificate validation errors. Can also be specified via K8S_AUTH_SSL_CA_CERT environment variable.
type: path
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
environment variable.
type: bool
aliases: [ verify_ssl ]
proxy:
description:
- The URL of an HTTP proxy to use for the connection. Can also be specified via K8S_AUTH_PROXY environment variable.
- Please note that this module does not pick up typical proxy settings from the environment (e.g. HTTP_PROXY).
version_added: "2.9"
persist_config:
description:
- Whether or not to save the kube config refresh tokens.
Can also be specified via K8S_AUTH_PERSIST_CONFIG environment variable.
- When the k8s context is using a user credentials with refresh tokens (like oidc or gke/gcloud auth),
the token is refreshed by the k8s python client library but not saved by default. So the old refresh token can
expire and the next auth might fail. Setting this flag to true will tell the k8s python client to save the
new refresh token to the kube config file.
- Default to false.
- Please note that the current version of the k8s python client library does not support setting this flag to True yet.
- "The fix for this k8s python library is here: https://github.com/kubernetes-client/python-base/pull/169"
type: bool
version_added: "2.10"
notes:
- "The OpenShift Python client wraps the K8s Python client, providing full access to
all of the APIS and models available on both platforms. For API version details and
additional information visit https://github.com/openshift/openshift-restclient-python"
- "To avoid SSL certificate validation errors when C(validate_certs) is I(True), the full
certificate chain for the API server must be provided via C(ca_cert) or in the
kubeconfig file."
'''

@ -1,45 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Options for selecting or identifying a specific K8s object
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
api_version:
description:
- Use to specify the API version. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(kind), I(name), and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(apiVersion) from the I(resource_definition)
will override this option.
type: str
default: v1
aliases:
- api
- version
kind:
description:
- Use to specify an object model. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(kind) from the I(resource_definition)
will override this option.
type: str
name:
description:
- Use to specify an object name. Use to create, delete, or discover an object without providing a full
resource definition. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
specific object. If I(resource definition) is provided, the I(metadata.name) value from the
I(resource_definition) will override this option.
type: str
namespace:
description:
- Use to specify an object namespace. Useful when creating, deleting, or discovering an object without
providing a full resource definition. Use in conjunction with I(api_version), I(kind), and I(name)
to identify a specfic object. If I(resource definition) is provided, the I(metadata.namespace) value
from the I(resource_definition) will override this option.
type: str
'''

@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Options for providing an object configuration
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
resource_definition:
description:
- "Provide a valid YAML definition (either as a string, list, or dict) for an object when creating or updating. NOTE: I(kind), I(api_version), I(name),
and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
aliases:
- definition
- inline
src:
description:
- "Provide a path to a file containing a valid YAML definition of an object or objects to be created or updated. Mutually
exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(name), and I(namespace) will be
overwritten by corresponding values found in the configuration read in from the I(src) file."
- Reads from the local file system. To read from the Ansible controller's file system, including vaulted files, use the file lookup
plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
I(resource_definition). See Examples below.
type: path
'''

@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Options used by scale modules.
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
replicas:
description:
- The desired number of replicas.
type: int
current_replicas:
description:
- For Deployment, ReplicaSet, Replication Controller, only scale, if the number of existing replicas
matches. In the case of a Job, update parallelism only if the current parallelism value matches.
type: int
resource_version:
description:
- Only attempt to scale, if the current object version matches.
type: str
wait:
description:
- For Deployment, ReplicaSet, Replication Controller, wait for the status value of I(ready_replicas) to change
to the number of I(replicas). In the case of a Job, this option is ignored.
type: bool
default: yes
wait_timeout:
description:
- When C(wait) is I(True), the number of seconds to wait for the I(ready_replicas) status to equal I(replicas).
If the status is not reached within the allotted time, an error will result. In the case of a Job, this option
is ignored.
type: int
default: 20
'''

@ -1,27 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Options for specifying object state
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
state:
description:
- Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
C(present), an existing object will be patched, if its attributes differ from those specified using
I(resource_definition) or I(src).
type: str
default: present
choices: [ absent, present ]
force:
description:
- If set to C(yes), and I(state) is C(present), an existing object will be replaced.
type: bool
default: no
'''

@ -1,40 +0,0 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
try:
from openshift.helper.hashes import generate_hash
HAS_GENERATE_HASH = True
except ImportError:
HAS_GENERATE_HASH = False
from ansible.errors import AnsibleFilterError
def k8s_config_resource_name(resource):
if not HAS_GENERATE_HASH:
raise AnsibleFilterError("k8s_config_resource_name requires openshift>=0.7.2")
try:
return resource['metadata']['name'] + '-' + generate_hash(resource)
except KeyError:
raise AnsibleFilterError("resource must have a metadata.name key to generate a resource name")
# ---- Ansible filters ----
class FilterModule(object):
def filters(self):
return {
'k8s_config_resource_name': k8s_config_resource_name
}

@ -1,360 +0,0 @@
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: k8s
plugin_type: inventory
author:
- Chris Houseknecht <@chouseknecht>
- Fabian von Feilitzsch <@fabianvf>
short_description: Kubernetes (K8s) inventory source
description:
- Fetch containers and services for one or more clusters
- Groups by cluster name, namespace, namespace_services, namespace_pods, and labels
- Uses k8s.(yml|yaml) YAML configuration file to set parameter values.
options:
plugin:
description: token that ensures this is a source file for the 'k8s' plugin.
required: True
choices: ['k8s']
connections:
description:
- Optional list of cluster connection settings. If no connections are provided, the default
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
the active user is authorized to access.
name:
description:
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
and port.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the OpenShift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
environment variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
environment variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
environment variable.
client_cert:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment variable.
aliases: [ cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE
environment variable.
aliases: [ key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via
K8S_AUTH_SSL_CA_CERT environment variable.
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
K8S_AUTH_VERIFY_SSL environment variable."
type: bool
aliases: [ verify_ssl ]
namespaces:
description:
- List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized
to access.
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
# File must be named k8s.yaml or k8s.yml
# Authenticate with token, and return all pods and services for all namespaces
plugin: k8s
connections:
- host: https://192.168.64.4:8443
token: xxxxxxxxxxxxxxxx
validate_certs: false
# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace
plugin: k8s
connections:
- namespaces:
- testing
# Use a custom config file, and a specific context.
plugin: k8s
connections:
- kubeconfig: /path/to/config
context: 'awx/192-168-64-4:8443/developer'
'''
import json
from ansible.errors import AnsibleError
from ansible.module_utils.k8s.common import K8sAnsibleMixin, HAS_K8S_MODULE_HELPER, k8s_import_exception
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
try:
from openshift.dynamic.exceptions import DynamicApiError
except ImportError:
pass
def format_dynamic_api_exc(exc):
if exc.body:
if exc.headers and exc.headers.get('Content-Type') == 'application/json':
message = json.loads(exc.body).get('message')
if message:
return message
return exc.body
else:
return '%s Reason: %s' % (exc.status, exc.reason)
class K8sInventoryException(Exception):
pass
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable, K8sAnsibleMixin):
NAME = 'k8s'
transport = 'kubectl'
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
cache_key = self._get_cache_prefix(path)
config_data = self._read_config_data(path)
self.setup(config_data, cache, cache_key)
def setup(self, config_data, cache, cache_key):
connections = config_data.get('connections')
if not HAS_K8S_MODULE_HELPER:
raise K8sInventoryException(
"This module requires the OpenShift Python client. Try `pip install openshift`. Detail: {0}".format(k8s_import_exception)
)
source_data = None
if cache and cache_key in self._cache:
try:
source_data = self._cache[cache_key]
except KeyError:
pass
if not source_data:
self.fetch_objects(connections)
def fetch_objects(self, connections):
if connections:
if not isinstance(connections, list):
raise K8sInventoryException("Expecting connections to be a list.")
for connection in connections:
if not isinstance(connection, dict):
raise K8sInventoryException("Expecting connection to be a dictionary.")
client = self.get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_pods_for_namespace(client, name, namespace)
self.get_services_for_namespace(client, name, namespace)
else:
client = self.get_api_client()
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_pods_for_namespace(client, name, namespace)
self.get_services_for_namespace(client, name, namespace)
@staticmethod
def get_default_host_name(host):
return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_')
def get_available_namespaces(self, client):
v1_namespace = client.resources.get(api_version='v1', kind='Namespace')
try:
obj = v1_namespace.get()
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Namespace list: %s' % format_dynamic_api_exc(exc))
return [namespace.metadata.name for namespace in obj.items]
def get_pods_for_namespace(self, client, name, namespace):
v1_pod = client.resources.get(api_version='v1', kind='Pod')
try:
obj = v1_pod.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Pod list: %s' % format_dynamic_api_exc(exc))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_pods_group = '{0}_pods'.format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_pods_group)
self.inventory.add_child(namespace_group, namespace_pods_group)
for pod in obj.items:
pod_name = pod.metadata.name
pod_groups = []
pod_annotations = {} if not pod.metadata.annotations else dict(pod.metadata.annotations)
if pod.metadata.labels:
# create a group for each label_value
for key, value in pod.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
if group_name not in pod_groups:
pod_groups.append(group_name)
self.inventory.add_group(group_name)
pod_labels = dict(pod.metadata.labels)
else:
pod_labels = {}
if not pod.status.containerStatuses:
continue
for container in pod.status.containerStatuses:
# add each pod_container to the namespace group, and to each label_value group
container_name = '{0}_{1}'.format(pod.metadata.name, container.name)
self.inventory.add_host(container_name)
self.inventory.add_child(namespace_pods_group, container_name)
if pod_groups:
for group in pod_groups:
self.inventory.add_child(group, container_name)
# Add hostvars
self.inventory.set_variable(container_name, 'object_type', 'pod')
self.inventory.set_variable(container_name, 'labels', pod_labels)
self.inventory.set_variable(container_name, 'annotations', pod_annotations)
self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.clusterName)
self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.nodeName)
self.inventory.set_variable(container_name, 'pod_name', pod.spec.name)
self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.hostIP)
self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase)
self.inventory.set_variable(container_name, 'pod_ip', pod.status.podIP)
self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.selfLink)
self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resourceVersion)
self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid)
self.inventory.set_variable(container_name, 'container_name', container.image)
self.inventory.set_variable(container_name, 'container_image', container.image)
if container.state.running:
self.inventory.set_variable(container_name, 'container_state', 'Running')
if container.state.terminated:
self.inventory.set_variable(container_name, 'container_state', 'Terminated')
if container.state.waiting:
self.inventory.set_variable(container_name, 'container_state', 'Waiting')
self.inventory.set_variable(container_name, 'container_ready', container.ready)
self.inventory.set_variable(container_name, 'ansible_remote_tmp', '/tmp/')
self.inventory.set_variable(container_name, 'ansible_connection', self.transport)
self.inventory.set_variable(container_name, 'ansible_{0}_pod'.format(self.transport),
pod_name)
self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport),
container.name)
self.inventory.set_variable(container_name, 'ansible_{0}_namespace'.format(self.transport),
namespace)
def get_services_for_namespace(self, client, name, namespace):
v1_service = client.resources.get(api_version='v1', kind='Service')
try:
obj = v1_service.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Service list: %s' % format_dynamic_api_exc(exc))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_services_group = '{0}_services'.format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_services_group)
self.inventory.add_child(namespace_group, namespace_services_group)
for service in obj.items:
service_name = service.metadata.name
service_labels = {} if not service.metadata.labels else dict(service.metadata.labels)
service_annotations = {} if not service.metadata.annotations else dict(service.metadata.annotations)
self.inventory.add_host(service_name)
if service.metadata.labels:
# create a group for each label_value
for key, value in service.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, service_name)
try:
self.inventory.add_child(namespace_services_group, service_name)
except AnsibleError as e:
raise
ports = [{'name': port.name,
'port': port.port,
'protocol': port.protocol,
'targetPort': port.targetPort,
'nodePort': port.nodePort} for port in service.spec.ports or []]
# add hostvars
self.inventory.set_variable(service_name, 'object_type', 'service')
self.inventory.set_variable(service_name, 'labels', service_labels)
self.inventory.set_variable(service_name, 'annotations', service_annotations)
self.inventory.set_variable(service_name, 'cluster_name', service.metadata.clusterName)
self.inventory.set_variable(service_name, 'ports', ports)
self.inventory.set_variable(service_name, 'type', service.spec.type)
self.inventory.set_variable(service_name, 'self_link', service.metadata.selfLink)
self.inventory.set_variable(service_name, 'resource_version', service.metadata.resourceVersion)
self.inventory.set_variable(service_name, 'uid', service.metadata.uid)
if service.spec.externalTrafficPolicy:
self.inventory.set_variable(service_name, 'external_traffic_policy',
service.spec.externalTrafficPolicy)
if service.spec.externalIPs:
self.inventory.set_variable(service_name, 'external_ips', service.spec.externalIPs)
if service.spec.externalName:
self.inventory.set_variable(service_name, 'external_name', service.spec.externalName)
if service.spec.healthCheckNodePort:
self.inventory.set_variable(service_name, 'health_check_node_port',
service.spec.healthCheckNodePort)
if service.spec.loadBalancerIP:
self.inventory.set_variable(service_name, 'load_balancer_ip',
service.spec.loadBalancerIP)
if service.spec.selector:
self.inventory.set_variable(service_name, 'selector', dict(service.spec.selector))
if hasattr(service.status.loadBalancer, 'ingress') and service.status.loadBalancer.ingress:
load_balancer = [{'hostname': ingress.hostname,
'ip': ingress.ip} for ingress in service.status.loadBalancer.ingress]
self.inventory.set_variable(service_name, 'load_balancer', load_balancer)

@ -1,201 +0,0 @@
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: openshift
plugin_type: inventory
author:
- Chris Houseknecht <@chouseknecht>
short_description: OpenShift inventory source
description:
- Fetch containers, services and routes for one or more clusters
- Groups by cluster name, namespace, namespace_services, namespace_pods, namespace_routes, and labels
- Uses openshift.(yml|yaml) YAML configuration file to set parameter values.
options:
plugin:
description: token that ensures this is a source file for the 'openshift' plugin.
required: True
choices: ['openshift']
connections:
description:
- Optional list of cluster connection settings. If no connections are provided, the default
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
the active user is authorized to access.
name:
description:
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
and port.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the OpenShift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
environment variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
environment variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
environment variable.
client_cert:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment variable.
aliases: [ cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE
environment variable.
aliases: [ key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via
K8S_AUTH_SSL_CA_CERT environment variable.
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
K8S_AUTH_VERIFY_SSL environment variable."
type: bool
aliases: [ verify_ssl ]
namespaces:
description:
- List of namespaces. If not specified, will fetch all containers for all namespaces user is authorized
to access.
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
# File must be named openshift.yaml or openshift.yml
# Authenticate with token, and return all pods and services for all namespaces
plugin: openshift
connections:
- host: https://192.168.64.4:8443
api_key: xxxxxxxxxxxxxxxx
verify_ssl: false
# Use default config (~/.kube/config) file and active context, and return objects for a specific namespace
plugin: openshift
connections:
- namespaces:
- testing
# Use a custom config file, and a specific context.
plugin: openshift
connections:
- kubeconfig: /path/to/config
context: 'awx/192-168-64-4:8443/developer'
'''
from ansible.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
try:
from openshift.dynamic.exceptions import DynamicApiError
except ImportError:
pass
class InventoryModule(K8sInventoryModule):
NAME = 'openshift'
transport = 'oc'
def fetch_objects(self, connections):
super(InventoryModule, self).fetch_objects(connections)
if connections:
if not isinstance(connections, list):
raise K8sInventoryException("Expecting connections to be a list.")
for connection in connections:
client = self.get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_routes_for_namespace(client, name, namespace)
else:
client = self.get_api_client()
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_routes_for_namespace(client, name, namespace)
def get_routes_for_namespace(self, client, name, namespace):
v1_route = client.resources.get(api_version='v1', kind='Route')
try:
obj = v1_route.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Routes list: %s' % format_dynamic_api_exc(exc))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_routes_group = '{0}_routes'.format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_routes_group)
self.inventory.add_child(namespace_group, namespace_routes_group)
for route in obj.items:
route_name = route.metadata.name
route_annotations = {} if not route.metadata.annotations else dict(route.metadata.annotations)
self.inventory.add_host(route_name)
if route.metadata.labels:
# create a group for each label_value
for key, value in route.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, route_name)
route_labels = dict(route.metadata.labels)
else:
route_labels = {}
self.inventory.add_child(namespace_routes_group, route_name)
# add hostvars
self.inventory.set_variable(route_name, 'labels', route_labels)
self.inventory.set_variable(route_name, 'annotations', route_annotations)
self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName)
self.inventory.set_variable(route_name, 'object_type', 'route')
self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink)
self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion)
self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
if route.spec.host:
self.inventory.set_variable(route_name, 'host', route.spec.host)
if route.spec.path:
self.inventory.set_variable(route_name, 'path', route.spec.path)
if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort:
self.inventory.set_variable(route_name, 'port', dict(route.spec.port))

@ -1,299 +0,0 @@
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: k8s
version_added: "2.5"
short_description: Query the K8s API
description:
- Uses the OpenShift Python client to fetch a specific object by name, all matching objects within a
namespace, or all matching objects for all namespaces, as well as information about the cluster.
- Provides access the full range of K8s APIs.
- Enables authentication via config file, certificates, password or token.
options:
cluster_info:
description:
- Use to specify the type of cluster information you are attempting to retrieve. Will take priority
over all the other options.
api_version:
description:
- Use to specify the API version. If I(resource definition) is provided, the I(apiVersion) from the
I(resource_definition) will override this option.
default: v1
kind:
description:
- Use to specify an object model. If I(resource definition) is provided, the I(kind) from a
I(resource_definition) will override this option.
required: true
resource_name:
description:
- Fetch a specific object by name. If I(resource definition) is provided, the I(metadata.name) value
from the I(resource_definition) will override this option.
namespace:
description:
- Limit the objects returned to a specific namespace. If I(resource definition) is provided, the
I(metadata.namespace) value from the I(resource_definition) will override this option.
label_selector:
description:
- Additional labels to include in the query. Ignored when I(resource_name) is provided.
field_selector:
description:
- Specific fields on which to query. Ignored when I(resource_name) is provided.
resource_definition:
description:
- "Provide a YAML configuration for an object. NOTE: I(kind), I(api_version), I(resource_name),
and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
src:
description:
- "Provide a path to a file containing a valid YAML definition of an object dated. Mutually
exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(resource_name), and I(namespace)
will be overwritten by corresponding values found in the configuration read in from the I(src) file."
- Reads from the local file system. To read from the Ansible controller's file system, use the file lookup
plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
I(resource_definition). See Examples below.
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment variable.
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG environment
variable.
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME environment
variable.
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD environment
variable.
client_cert:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment
variable.
aliases: [ cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_KEY_FILE environment
variable.
aliases: [ key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via K8S_AUTH_SSL_CA_CERT
environment variable.
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- Whether or not to verify the API server's SSL certificates. Can also be specified via K8S_AUTH_VERIFY_SSL
environment variable.
type: bool
aliases: [ verify_ssl ]
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
notes:
- "The OpenShift Python client wraps the K8s Python client, providing full access to
all of the APIS and models available on both platforms. For API version details and
additional information visit https://github.com/openshift/openshift-restclient-python"
"""
EXAMPLES = """
- name: Fetch a list of namespaces
set_fact:
projects: "{{ lookup('k8s', api_version='v1', kind='Namespace') }}"
- name: Fetch all deployments
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment') }}"
- name: Fetch all deployments in a namespace
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing') }}"
- name: Fetch a specific deployment by name
set_fact:
deployments: "{{ lookup('k8s', kind='Deployment', namespace='testing', resource_name='elastic') }}"
- name: Fetch with label selector
set_fact:
service: "{{ lookup('k8s', kind='Service', label_selector='app=galaxy') }}"
# Use parameters from a YAML config
- name: Load config from the Ansible controller filesystem
set_fact:
config: "{{ lookup('file', 'service.yml') | from_yaml }}"
- name: Using the config (loaded from a file in prior task), fetch the latest version of the object
set_fact:
service: "{{ lookup('k8s', resource_definition=config) }}"
- name: Use a config from the local filesystem
set_fact:
service: "{{ lookup('k8s', src='service.yml') }}"
"""
RETURN = """
_list:
description:
- One ore more object definitions returned from the API.
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
"""
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.k8s.common import K8sAnsibleMixin
from ansible.errors import AnsibleError
try:
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import NotFoundError
HAS_K8S_MODULE_HELPER = True
k8s_import_exception = None
except ImportError as e:
HAS_K8S_MODULE_HELPER = False
k8s_import_exception = e
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
class KubernetesLookup(K8sAnsibleMixin):
def __init__(self):
if not HAS_K8S_MODULE_HELPER:
raise Exception(
"Requires the OpenShift Python client. Try `pip install openshift`. Detail: {0}".format(k8s_import_exception)
)
if not HAS_YAML:
raise Exception(
"Requires PyYAML. Try `pip install PyYAML`"
)
self.kind = None
self.name = None
self.namespace = None
self.api_version = None
self.label_selector = None
self.field_selector = None
self.include_uninitialized = None
self.resource_definition = None
self.helper = None
self.connection = {}
def fail(self, msg=None):
raise AnsibleError(msg)
def run(self, terms, variables=None, **kwargs):
self.params = kwargs
self.client = self.get_api_client()
cluster_info = kwargs.get('cluster_info')
if cluster_info == 'version':
return [self.client.version]
if cluster_info == 'api_groups':
return [self.client.resources.api_groups]
self.kind = kwargs.get('kind')
self.name = kwargs.get('resource_name')
self.namespace = kwargs.get('namespace')
self.api_version = kwargs.get('api_version', 'v1')
self.label_selector = kwargs.get('label_selector')
self.field_selector = kwargs.get('field_selector')
self.include_uninitialized = kwargs.get('include_uninitialized', False)
resource_definition = kwargs.get('resource_definition')
src = kwargs.get('src')
if src:
resource_definition = self.load_resource_definitions(src)[0]
if resource_definition:
self.kind = resource_definition.get('kind', self.kind)
self.api_version = resource_definition.get('apiVersion', self.api_version)
self.name = resource_definition.get('metadata', {}).get('name', self.name)
self.namespace = resource_definition.get('metadata', {}).get('namespace', self.namespace)
if not self.kind:
raise AnsibleError(
"Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
"using the 'resource_definition' parameter."
)
resource = self.find_resource(self.kind, self.api_version, fail=True)
try:
k8s_obj = resource.get(name=self.name, namespace=self.namespace, label_selector=self.label_selector, field_selector=self.field_selector)
except NotFoundError:
return []
if self.name:
return [k8s_obj.to_dict()]
return k8s_obj.to_dict().get('items')
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
return KubernetesLookup().run(terms, variables=variables, **kwargs)

@ -1,23 +0,0 @@
Wait tests
----------
wait tests require at least one node, and don't work on the normal k8s
openshift-origin container as provided by ansible-test --docker -v k8s
minikube, Kubernetes from Docker or any other Kubernetes service will
suffice.
If kubectl is already using the right config file and context, you can
just do
```
cd test/integration/targets/k8s
./runme.sh -vv
```
otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT`
and use the same command

@ -1,2 +0,0 @@
cloud/openshift
shippable/cloud/group1

@ -1,32 +0,0 @@
recreate_crd_default_merge_expectation: recreate_crd is not failed
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources:
limits:
cpu: "100m"
memory: "100Mi"
ports: "{{ k8s_pod_ports }}"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_openshift: yes

@ -1,20 +0,0 @@
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: acme-crt
spec:
secretName: acme-crt-secret
dnsNames:
- foo.example.com
- bar.example.com
acme:
config:
- ingressClass: nginx
domains:
- foo.example.com
- bar.example.com
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer

@ -1,21 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: kuard
unwanted: value
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

@ -1,20 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: hello
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

@ -1,14 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: certificates.certmanager.k8s.io
spec:
group: certmanager.k8s.io
version: v1alpha1
scope: Namespaced
names:
kind: Certificate
plural: certificates
shortNames:
- cert
- certs

@ -1,2 +0,0 @@
dependencies:
- setup_remote_tmp_dir

@ -1,68 +0,0 @@
- block:
- name: Ensure that append_hash namespace exists
k8s:
kind: Namespace
name: append-hash
- name: create k8s_resource variable
set_fact:
k8s_resource:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
- name: Create config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap1
- name: check configmap is created with a hash
assert:
that:
- k8s_configmap1 is changed
- k8s_configmap1.result.metadata.name != 'config-map-test'
- k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-'
- name: recreate same config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap2
- name: check configmaps are different
assert:
that:
- k8s_configmap2 is not changed
- k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name
- name: add key to config map
k8s:
definition:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
another: value
append_hash: yes
register: k8s_configmap3
- name: check configmaps are different
assert:
that:
- k8s_configmap3 is changed
- k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name
always:
- name: ensure that namespace is removed
k8s:
kind: Namespace
name: append-hash
state: absent

@ -1,277 +0,0 @@
- block:
- python_requirements_info:
dependencies:
- openshift
- kubernetes
- set_fact:
apply_namespace: apply
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ apply_namespace }}"
- name: add a configmap
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap
- name: check configmap was created
assert:
that:
- k8s_configmap is changed
- k8s_configmap.result.metadata.annotations|default(False)
- name: add same configmap again
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2
- name: check nothing changed
assert:
that:
- k8s_configmap_2 is not changed
- name: add same configmap again with check mode on
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
check_mode: yes
register: k8s_configmap_check
- name: check nothing changed
assert:
that:
- k8s_configmap_check is not changed
- name: add same configmap again but using name and namespace args
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2a
- name: check nothing changed
assert:
that:
- k8s_configmap_2a is not changed
- name: update configmap
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
three: "3"
four: "4"
apply: yes
register: k8s_configmap_3
- name: ensure that configmap has been correctly updated
assert:
that:
- k8s_configmap_3 is changed
- "'four' in k8s_configmap_3.result.data"
- "'two' not in k8s_configmap_3.result.data"
- name: add a service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
type: NodePort
apply: yes
register: k8s_service
- name: add exactly same service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
type: NodePort
apply: yes
register: k8s_service_2
- name: check nothing changed
assert:
that:
- k8s_service_2 is not changed
- name: change service ports
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_3
- name: check ports are correct
assert:
that:
- k8s_service_3 is changed
- k8s_service_3.result.spec.ports | length == 1
- k8s_service_3.result.spec.ports[0].port == 8081
- name: insert new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: mesh
port: 8080
targetPort: 8080
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_4
- name: check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 2
- k8s_service_4.result.spec.ports[0].port == 8080
- k8s_service_4.result.spec.ports[1].port == 8081
- name: remove new service port (check mode)
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
check_mode: yes
register: k8s_service_check
- name: check ports are correct
assert:
that:
- k8s_service_check is changed
- k8s_service_check.result.spec.ports | length == 1
- k8s_service_check.result.spec.ports[0].port == 8081
- name: remove new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_5
- name: check ports are correct
assert:
that:
- k8s_service_5 is changed
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ apply_namespace }}"
state: absent

@ -1,71 +0,0 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- block:
- name: Create a namespace
k8s:
name: crd
kind: Namespace
- name: install custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
- name: pause 5 seconds to avoid race condition
pause:
seconds: 5
- name: create custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
apply: "{{ create_crd_with_apply | default(omit) }}"
register: create_crd
- name: patch custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
register: recreate_crd
ignore_errors: yes
- name: assert that recreating crd is as expected
assert:
that:
- recreate_crd_default_merge_expectation
- block:
- name: recreate custom resource definition with merge_type
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
merge_type: merge
namespace: crd
register: recreate_crd_with_merge
- name: recreate custom resource definition with merge_type list
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
merge_type:
- strategic-merge
- merge
namespace: crd
register: recreate_crd_with_merge_list
when: recreate_crd is successful
- name: remove crd
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
state: absent
always:
- name: remove crd namespace
k8s:
kind: Namespace
name: crd
state: absent
ignore_errors: yes

@ -1,101 +0,0 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
- block:
- set_fact:
delete_namespace: delete
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ delete_namespace }}"
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: delete-daemonset
namespace: "{{ delete_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
k8s_pod_name: delete-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: check if pods exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_create
- name: assert that there are pods
assert:
that:
- pods_create.resources
- name: remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
namespace: "{{ delete_namespace }}"
state: absent
wait: yes
- name: show status of pods
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
- name: wait for background deletion
pause:
seconds: 30
- name: check if pods still exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_delete
- name: assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ delete_namespace }}"
state: absent
when: (nodes.resources | length) > 0

@ -1,375 +0,0 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- include_tasks: delete.yml
- include_tasks: apply.yml
- include_tasks: waiter.yml
- block:
- name: Create a namespace
k8s:
name: testing
kind: Namespace
register: output
- name: show output
debug:
var: output
- name: Setting validate_certs to true causes a failure
k8s:
name: testing
kind: Namespace
validate_certs: yes
ignore_errors: yes
register: output
- name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
assert:
that:
- output is failed
- name: k8s_info works with empty resources
k8s_info:
kind: Deployment
namespace: testing
api_version: extensions/v1beta1
register: k8s_info
- name: assert that k8s_info is in correct format
assert:
that:
- "'resources' in k8s_info"
- not k8s_info.resources
- name: Create a service
k8s:
state: present
resource_definition: &svc
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
register: output
- name: show output
debug:
var: output
- name: Create the service again
k8s:
state: present
resource_definition: *svc
register: output
- name: Service creation should be idempotent
assert:
that: not output.changed
- name: Create a ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: value
- name: Force update ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: newvalue
force: yes
- name: Create PVC
k8s:
state: present
inline: &pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elastic-volume
namespace: testing
spec:
resources:
requests:
storage: 5Gi
accessModes:
- ReadWriteOnce
- name: Show output
debug:
var: output
- name: Create the PVC again
k8s:
state: present
inline: *pvc
- name: PVC creation should be idempotent
assert:
that: not output.changed
- name: Create deployment
k8s:
state: present
inline: &deployment
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: RollingUpdate
register: output
- name: Show output
debug:
var: output
- name: Create deployment again
k8s:
state: present
inline: *deployment
register: output
- name: Deployment creation should be idempotent
assert:
that: not output.changed
- debug:
var: k8s_openshift
- include: openshift.yml
when: k8s_openshift | bool
### Type tests
- name: Create a namespace from a string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing1
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing1
register: k8s_info_testing1
failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active"
- name: Create resources from a multidocument yaml string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a multidocument yaml string
k8s:
state: absent
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should not exist
assert:
that:
- not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_namespaces.results }}"
- name: Create resources from a list
k8s:
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a list
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_info
- name: Resources are terminating if still in results
assert:
that: not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_info.results }}"
- name: Create resources from a yaml string ending with ---
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing6
---
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing6
register: k8s_info_testing6
failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active"
- include_tasks: crd.yml
- include_tasks: lists.yml
- include_tasks: append_hash.yml
always:
- name: Delete all namespaces
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing
- kind: Namespace
apiVersion: v1
metadata:
name: testing1
- kind: Namespace
apiVersion: v1
metadata:
name: testing2
- kind: Namespace
apiVersion: v1
metadata:
name: testing3
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- kind: Namespace
apiVersion: v1
metadata:
name: testing6
ignore_errors: yes

@ -1,140 +0,0 @@
---
- name: Ensure testing1 namespace exists
k8s:
api_version: v1
kind: Namespace
name: testing1
- block:
- name: Create configmaps
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should exist
assert:
that: item.data.a is defined
with_items: '{{ cms.resources }}'
- name: Delete configmaps
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should not exist
assert:
that: not cms.resources
vars:
configmaps:
- metadata:
name: list-example-1
labels:
app: test
data:
a: first
- metadata:
name: list-example-2
labels:
app: test
data:
a: second
- metadata:
name: list-example-3
labels:
app: test
data:
a: third
- block:
- name: Create list of arbitrary resources
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the created resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: All resources should exist
assert:
that: ((list_resources.results | sum(attribute="resources", start=[])) | length) == (resources | length)
- name: Delete list of arbitrary resources
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: The resources should not exist
assert:
that: not ((list_resources.results | sum(attribute="resources", start=[])) | length)
vars:
resources:
- apiVersion: v1
kind: ConfigMap
metadata:
name: list-example-4
data:
key: value
- apiVersion: v1
kind: Service
metadata:
name: list-example-svc
labels:
app: test
spec:
selector:
app: test
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000

@ -1,92 +0,0 @@
- set_fact:
virtualenv: "{{ remote_tmp_dir }}/virtualenv"
virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
- set_fact:
virtualenv_interpreter: "{{ virtualenv }}/bin/python"
- pip:
name: virtualenv
# Test graceful failure for missing kubernetes-validate
- pip:
name:
- openshift>=0.9.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: validate_not_installed.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Test validate with kubernetes-validate
- pip:
name:
- kubernetes-validate==1.12.0
- openshift>=0.9.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: validate_installed.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
playbook_namespace: ansible-test-k8s-validate
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Test graceful failure for older versions of openshift
- pip:
name:
- openshift==0.6.0
- kubernetes==6.0.0
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: older_openshift_fail.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
recreate_crd_default_merge_expectation: recreate_crd is failed
playbook_namespace: ansible-test-k8s-older-openshift
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Run full test suite
- pip:
name:
- openshift>=0.9.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: full_test.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
create_crd_with_apply: no
playbook_namespace: ansible-test-k8s-full
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes

@ -1,69 +0,0 @@
- python_requirements_info:
dependencies:
- openshift==0.6.0
- kubernetes==6.0.0
# append_hash
- name: use append_hash with ConfigMap
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
append_hash: yes
ignore_errors: yes
register: k8s_append_hash
- name: assert that append_hash fails gracefully
assert:
that:
- k8s_append_hash is failed
- "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg"
- "'. This is required for append_hash.' in k8s_append_hash.msg"
# validate
- name: attempt to use validate with older openshift
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
validate:
fail_on_error: yes
ignore_errors: yes
register: k8s_validate
- name: assert that validate fails gracefully
assert:
that:
- k8s_validate is failed
- "k8s_validate.msg == 'openshift >= 0.8.0 is required for validate'"
# apply
- name: attempt to use apply with older openshift
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
apply: yes
ignore_errors: yes
register: k8s_apply
- name: assert that apply fails gracefully
assert:
that:
- k8s_apply is failed
- "k8s_apply.msg.startswith('Failed to import the required Python library (openshift >= 0.9.2)')"

@ -1,61 +0,0 @@
# OpenShift Resources
- name: Create a project
k8s:
name: testing
kind: Project
api_version: v1
apply: no
register: output
- name: show output
debug:
var: output
- name: Create deployment config
k8s:
state: present
inline: &dc
apiVersion: v1
kind: DeploymentConfig
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: Rolling
register: output
- name: Show output
debug:
var: output
- name: Create deployment config again
k8s:
state: present
inline: *dc
register: output
- name: DC creation should be idempotent
assert:
that: not output.changed

@ -1,125 +0,0 @@
- block:
- name: Create a namespace
k8s:
name: "{{ playbook_namespace }}"
kind: Namespace
- copy:
src: files
dest: "{{ remote_tmp_dir }}"
- name: incredibly simple ConfigMap
k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
register: k8s_with_validate
- name: assert that k8s_with_validate succeeds
assert:
that:
- k8s_with_validate is successful
- name: extra property does not fail without strict
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
- name: extra property fails with strict
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
ignore_errors: yes
register: extra_property
- name: check that extra property fails with strict
assert:
that:
- extra_property is failed
- name: invalid type fails at validation stage
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
ignore_errors: yes
register: invalid_type
- name: check that invalid type fails
assert:
that:
- invalid_type is failed
- name: invalid type fails with warnings when fail_on_error is False
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: no
strict: no
ignore_errors: yes
register: invalid_type_no_fail
- name: check that invalid type fails
assert:
that:
- invalid_type_no_fail is failed
- name: setup custom resource definition
k8s:
src: "{{ remote_tmp_dir }}/files/setup-crd.yml"
- name: wait a few seconds
pause:
seconds: 5
- name: add custom resource definition
k8s:
src: "{{ remote_tmp_dir }}/files/crd-resource.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
register: unknown_kind
- name: check that unknown kind warns
assert:
that:
- unknown_kind is successful
- "'warnings' in unknown_kind"
always:
- name: remove custom resource
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: "{{ playbook_namespace }}"
state: absent
ignore_errors: yes
- name: remove custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
state: absent
- name: Delete namespace
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: "{{ playbook_namespace }}"
ignore_errors: yes

@ -1,23 +0,0 @@
- python_requirements_info:
dependencies:
- openshift
- kubernetes
- kubernetes-validate
- k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: default
validate:
fail_on_error: yes
ignore_errors: yes
register: k8s_no_validate
- name: assert that k8s_no_validate fails gracefully
assert:
that:
- k8s_no_validate is failed
- "k8s_no_validate.msg == 'kubernetes-validate python library is required to validate resources'"

@ -1,355 +0,0 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
- block:
- set_fact:
wait_namespace: wait
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ wait_namespace }}"
- name: add a simple pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
vars:
k8s_pod_name: wait-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- sleep
- "10000"
register: wait_pod
ignore_errors: yes
- name: assert that pod creation succeeded
assert:
that:
- wait_pod is successful
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: update a daemonset in check_mode
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
register: update_ds_check_mode
- name: check that check_mode returned changed
assert:
that:
- update_ds_check_mode is changed
- name: update a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
register: ds
- name: get updated pods
k8s_info:
api_version: v1
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- app=wait-ds
register: updated_ds_pods
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
- name: add a crashing pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
vars:
k8s_pod_name: wait-crash-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: crash_pod
ignore_errors: yes
- name: check that task failed
assert:
that:
- crash_pod is failed
- name: use a non-existent image
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
vars:
k8s_pod_name: wait-no-image-pod
k8s_pod_image: i_made_this_up:and_this_too
register: no_image_pod
ignore_errors: yes
- name: check that task failed
assert:
that:
- no_image_pod is failed
- name: add a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: deploy
- name: check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- name: update a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: update_deploy
- name: get updated pods
k8s_info:
api_version: v1
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- app=wait-deploy
register: updated_deploy_pods
- name: check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- updated_deploy_pods.resources[0].spec.containers[0].image.endswith(":2")
- name: pause a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
paused: True
apply: no
wait: yes
wait_condition:
type: Progressing
status: Unknown
reason: DeploymentPaused
register: pause_deploy
- name: check that paused deployment wait worked
assert:
that:
- condition.reason == "DeploymentPaused"
- condition.status == "Unknown"
vars:
condition: '{{ pause_deploy.result.status.conditions | json_query("[?type==`Progressing`]") | first }}'
- name: add a service based on the deployment
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: wait-svc
namespace: "{{ wait_namespace }}"
spec:
selector:
app: "{{ k8s_pod_name }}"
ports:
- port: 8080
targetPort: 8080
protocol: TCP
wait: yes
vars:
k8s_pod_name: wait-deploy
register: service
- name: assert that waiting for service works
assert:
that:
- service is successful
- name: add a crashing deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-crash-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-crash-deploy
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: wait_crash_deploy
ignore_errors: yes
- name: check that task failed
assert:
that:
- wait_crash_deploy is failed
- name: remove Pod with very short timeout
k8s:
api_version: v1
kind: Pod
name: wait-pod
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
wait_sleep: 2
wait_timeout: 5
ignore_errors: yes
register: short_wait_remove_pod
- name: check that task failed
assert:
that:
- short_wait_remove_pod is failed
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ wait_namespace }}"
state: absent
when: (nodes.resources | length) > 0

@ -96,9 +96,6 @@ lib/ansible/module_utils/ismount.py future-import-boilerplate
lib/ansible/module_utils/ismount.py metaclass-boilerplate
lib/ansible/module_utils/json_utils.py future-import-boilerplate
lib/ansible/module_utils/json_utils.py metaclass-boilerplate
lib/ansible/module_utils/k8s/common.py metaclass-boilerplate
lib/ansible/module_utils/k8s/raw.py metaclass-boilerplate
lib/ansible/module_utils/k8s/scale.py metaclass-boilerplate
lib/ansible/module_utils/net_tools/netbox/netbox_utils.py future-import-boilerplate
lib/ansible/module_utils/netapp.py future-import-boilerplate
lib/ansible/module_utils/netapp.py metaclass-boilerplate
@ -1677,26 +1674,6 @@ lib/ansible/modules/cloud/vmware_httpapi/vmware_cis_category_info.py validate-mo
lib/ansible/modules/cloud/vmware_httpapi/vmware_cis_category_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/vmware_httpapi/vmware_core_info.py validate-modules:doc-required-mismatch
lib/ansible/modules/cloud/vmware_httpapi/vmware_core_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/clustering/k8s/k8s.py validate-modules:doc-default-does-not-match-spec
lib/ansible/modules/clustering/k8s/k8s.py validate-modules:doc-missing-type
lib/ansible/modules/clustering/k8s/k8s.py validate-modules:doc-required-mismatch
lib/ansible/modules/clustering/k8s/k8s.py validate-modules:parameter-list-no-elements
lib/ansible/modules/clustering/k8s/k8s.py validate-modules:parameter-type-not-in-doc
lib/ansible/modules/clustering/k8s/k8s.py validate-modules:return-syntax-error
lib/ansible/modules/clustering/k8s/k8s_auth.py validate-modules:doc-missing-type
lib/ansible/modules/clustering/k8s/k8s_auth.py validate-modules:parameter-type-not-in-doc
lib/ansible/modules/clustering/k8s/k8s_info.py validate-modules:doc-missing-type
lib/ansible/modules/clustering/k8s/k8s_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/clustering/k8s/k8s_info.py validate-modules:parameter-type-not-in-doc
lib/ansible/modules/clustering/k8s/k8s_scale.py validate-modules:doc-missing-type
lib/ansible/modules/clustering/k8s/k8s_scale.py validate-modules:doc-required-mismatch
lib/ansible/modules/clustering/k8s/k8s_scale.py validate-modules:parameter-type-not-in-doc
lib/ansible/modules/clustering/k8s/k8s_scale.py validate-modules:return-syntax-error
lib/ansible/modules/clustering/k8s/k8s_service.py validate-modules:doc-missing-type
lib/ansible/modules/clustering/k8s/k8s_service.py validate-modules:mutually_exclusive-unknown
lib/ansible/modules/clustering/k8s/k8s_service.py validate-modules:parameter-list-no-elements
lib/ansible/modules/clustering/k8s/k8s_service.py validate-modules:parameter-type-not-in-doc
lib/ansible/modules/clustering/k8s/k8s_service.py validate-modules:return-syntax-error
lib/ansible/modules/commands/command.py validate-modules:doc-missing-type
lib/ansible/modules/commands/command.py validate-modules:nonexistent-parameter-documented
lib/ansible/modules/commands/command.py validate-modules:parameter-list-no-elements
@ -4287,16 +4264,6 @@ lib/ansible/plugins/doc_fragments/iosxr.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/iosxr.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/junos.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/junos.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/k8s_auth_options.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/k8s_auth_options.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/k8s_name_options.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/k8s_name_options.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/k8s_resource_options.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/k8s_resource_options.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/k8s_scale_options.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/k8s_scale_options.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/k8s_state_options.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/k8s_state_options.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/meraki.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/meraki.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/netapp.py future-import-boilerplate

Loading…
Cancel
Save