Various intentional tests (#72485)

* Add tests for argspec choices type=list

* Add explicit interpreter discovery tests to validate modules returning ansible_facts still set interp

* Add explicit tests for missing_required_lib

* Add explicit tests for recursive_diff

* ci_complete ci_coverage

* Update data to cover more code/tests

* ci_complete ci_coverage

* Add argspec tests for aliases, and no_log

* Forgotten file

* ci_complete ci_coverage

* Add argspec tests for type int

* ci_complete ci_coverage

* Remove incidental_k8s

* ci_complete ci_coverage

* fix missing newline

* Remove incidental_sts_assume_role

* ci_complete ci_coverage

(cherry picked from commit c8590c7482)
pull/72548/head
Matt Martz 4 years ago committed by Rick Elrod
parent 65470f48bd
commit 1f5afcd072

@ -97,7 +97,35 @@ def main():
'foo',
'bar',
],
}
},
'list_choices': {
'type': 'list',
'choices': [
'foo',
'bar',
'baz',
],
},
'primary': {
'type': 'str',
'aliases': [
'alias',
],
},
'password': {
'type': 'str',
'no_log': True,
},
'not_a_password': {
'type': 'str',
'no_log': False,
},
'maybe_password': {
'type': 'str',
},
'int': {
'type': 'int',
},
},
required_if=(
('state', 'present', ('path', 'content'), True),

@ -277,6 +277,71 @@
register: argspec_choices_bad_choice
ignore_errors: true
- argspec:
required: value
required_one_of_one: value
list_choices:
- bar
- baz
- argspec:
required: value
required_one_of_one: value
list_choices:
- bar
- baz
- qux
register: argspec_list_choices_bad_choice
ignore_errors: true
- argspec:
required: value
required_one_of_one: value
primary: foo
register: argspec_aliases_primary
- argspec:
required: value
required_one_of_one: value
alias: foo
register: argspec_aliases_alias
- argspec:
required: value
required_one_of_one: value
primary: foo
alias: foo
register: argspec_aliases_both
- argspec:
required: value
required_one_of_one: value
primary: foo
alias: bar
register: argspec_aliases_both_different
- command: >-
ansible localhost -m argspec
-a 'required=value required_one_of_one=value primary=foo alias=bar'
environment:
ANSIBLE_LIBRARY: '{{ role_path }}/library'
register: argspec_aliases_both_warning
- command: ansible localhost -m import_role -a 'role=argspec tasks_from=password_no_log.yml'
register: argspec_password_no_log
- argspec:
required: value
required_one_of_one: value
int: 1
- argspec:
required: value
required_one_of_one: value
int: foo
register: argspec_int_invalid
ignore_errors: true
- assert:
that:
- argspec_required_fail is failed
@ -333,3 +398,22 @@
- argspec_choices_with_strings_like_bools_false.choices_with_strings_like_bools == 'off'
- argspec_choices_bad_choice is failed
- argspec_list_choices_bad_choice is failed
- argspec_aliases_primary.primary == 'foo'
- argspec_aliases_primary.alias is undefined
- argspec_aliases_alias.primary == 'foo'
- argspec_aliases_alias.alias == 'foo'
- argspec_aliases_both.primary == 'foo'
- argspec_aliases_both.alias == 'foo'
- argspec_aliases_both_different.primary == 'bar'
- argspec_aliases_both_different.alias == 'bar'
- '"[WARNING]: Both option primary and its alias alias are set." in argspec_aliases_both_warning.stderr'
- '"Module did not set no_log for maybe_password" in argspec_password_no_log.stderr'
- '"Module did not set no_log for password" not in argspec_password_no_log.stderr'
- '"Module did not set no_log for not_a_password" not in argspec_password_no_log.stderr'
- argspec_password_no_log.stdout|regex_findall('VALUE_SPECIFIED_IN_NO_LOG_PARAMETER')|length == 1
- argspec_int_invalid is failed

@ -0,0 +1,14 @@
- argspec:
required: value
required_one_of_one: value
password: foo
- argspec:
required: value
required_one_of_one: value
not_a_password: foo
- argspec:
required: value
required_one_of_one: value
maybe_password: foo

@ -1,23 +0,0 @@
Wait tests
----------
wait tests require at least one node, and don't work on the normal k8s
openshift-origin container as provided by ansible-test --docker -v k8s
minikube, Kubernetes from Docker or any other Kubernetes service will
suffice.
If kubectl is already using the right config file and context, you can
just do
```
cd test/integration/targets/k8s
./runme.sh -vv
```
otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT`
and use the same command

@ -1,2 +0,0 @@
cloud/openshift
shippable/cloud/incidental

@ -1,32 +0,0 @@
recreate_crd_default_merge_expectation: recreate_crd is not failed
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources:
limits:
cpu: "100m"
memory: "100Mi"
ports: "{{ k8s_pod_ports }}"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_openshift: yes

@ -1,20 +0,0 @@
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: acme-crt
spec:
secretName: acme-crt-secret
dnsNames:
- foo.example.com
- bar.example.com
acme:
config:
- ingressClass: nginx
domains:
- foo.example.com
- bar.example.com
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer

@ -1,21 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: kuard
unwanted: value
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

@ -1,20 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: hello
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

@ -1,14 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: certificates.certmanager.k8s.io
spec:
group: certmanager.k8s.io
version: v1alpha1
scope: Namespaced
names:
kind: Certificate
plural: certificates
shortNames:
- cert
- certs

@ -1,2 +0,0 @@
dependencies:
- setup_remote_tmp_dir

@ -1,68 +0,0 @@
- block:
- name: Ensure that append_hash namespace exists
k8s:
kind: Namespace
name: append-hash
- name: create k8s_resource variable
set_fact:
k8s_resource:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
- name: Create config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap1
- name: check configmap is created with a hash
assert:
that:
- k8s_configmap1 is changed
- k8s_configmap1.result.metadata.name != 'config-map-test'
- k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-'
- name: recreate same config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap2
- name: check configmaps are different
assert:
that:
- k8s_configmap2 is not changed
- k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name
- name: add key to config map
k8s:
definition:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
another: value
append_hash: yes
register: k8s_configmap3
- name: check configmaps are different
assert:
that:
- k8s_configmap3 is changed
- k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name
always:
- name: ensure that namespace is removed
k8s:
kind: Namespace
name: append-hash
state: absent

@ -1,277 +0,0 @@
- block:
- python_requirements_info:
dependencies:
- openshift
- kubernetes
- set_fact:
apply_namespace: apply
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ apply_namespace }}"
- name: add a configmap
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap
- name: check configmap was created
assert:
that:
- k8s_configmap is changed
- k8s_configmap.result.metadata.annotations|default(False)
- name: add same configmap again
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2
- name: check nothing changed
assert:
that:
- k8s_configmap_2 is not changed
- name: add same configmap again with check mode on
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
check_mode: yes
register: k8s_configmap_check
- name: check nothing changed
assert:
that:
- k8s_configmap_check is not changed
- name: add same configmap again but using name and namespace args
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2a
- name: check nothing changed
assert:
that:
- k8s_configmap_2a is not changed
- name: update configmap
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
three: "3"
four: "4"
apply: yes
register: k8s_configmap_3
- name: ensure that configmap has been correctly updated
assert:
that:
- k8s_configmap_3 is changed
- "'four' in k8s_configmap_3.result.data"
- "'two' not in k8s_configmap_3.result.data"
- name: add a service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
type: NodePort
apply: yes
register: k8s_service
- name: add exactly same service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
type: NodePort
apply: yes
register: k8s_service_2
- name: check nothing changed
assert:
that:
- k8s_service_2 is not changed
- name: change service ports
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_3
- name: check ports are correct
assert:
that:
- k8s_service_3 is changed
- k8s_service_3.result.spec.ports | length == 1
- k8s_service_3.result.spec.ports[0].port == 8081
- name: insert new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: mesh
port: 8080
targetPort: 8080
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_4
- name: check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 2
- k8s_service_4.result.spec.ports[0].port == 8080
- k8s_service_4.result.spec.ports[1].port == 8081
- name: remove new service port (check mode)
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
check_mode: yes
register: k8s_service_check
- name: check ports are correct
assert:
that:
- k8s_service_check is changed
- k8s_service_check.result.spec.ports | length == 1
- k8s_service_check.result.spec.ports[0].port == 8081
- name: remove new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_5
- name: check ports are correct
assert:
that:
- k8s_service_5 is changed
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ apply_namespace }}"
state: absent

@ -1,71 +0,0 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- block:
- name: Create a namespace
k8s:
name: crd
kind: Namespace
- name: install custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
- name: pause 5 seconds to avoid race condition
pause:
seconds: 5
- name: create custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
apply: "{{ create_crd_with_apply | default(omit) }}"
register: create_crd
- name: patch custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
register: recreate_crd
ignore_errors: yes
- name: assert that recreating crd is as expected
assert:
that:
- recreate_crd_default_merge_expectation
- block:
- name: recreate custom resource definition with merge_type
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
merge_type: merge
namespace: crd
register: recreate_crd_with_merge
- name: recreate custom resource definition with merge_type list
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
merge_type:
- strategic-merge
- merge
namespace: crd
register: recreate_crd_with_merge_list
when: recreate_crd is successful
- name: remove crd
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
state: absent
always:
- name: remove crd namespace
k8s:
kind: Namespace
name: crd
state: absent
ignore_errors: yes

@ -1,101 +0,0 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
- block:
- set_fact:
delete_namespace: delete
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ delete_namespace }}"
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: delete-daemonset
namespace: "{{ delete_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
k8s_pod_name: delete-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: check if pods exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_create
- name: assert that there are pods
assert:
that:
- pods_create.resources
- name: remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
namespace: "{{ delete_namespace }}"
state: absent
wait: yes
- name: show status of pods
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
- name: wait for background deletion
pause:
seconds: 30
- name: check if pods still exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_delete
- name: assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ delete_namespace }}"
state: absent
when: (nodes.resources | length) > 0

@ -1,375 +0,0 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- include_tasks: delete.yml
- include_tasks: apply.yml
- include_tasks: waiter.yml
- block:
- name: Create a namespace
k8s:
name: testing
kind: Namespace
register: output
- name: show output
debug:
var: output
- name: Setting validate_certs to true causes a failure
k8s:
name: testing
kind: Namespace
validate_certs: yes
ignore_errors: yes
register: output
- name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
assert:
that:
- output is failed
- name: k8s_info works with empty resources
k8s_info:
kind: Deployment
namespace: testing
api_version: extensions/v1beta1
register: k8s_info
- name: assert that k8s_info is in correct format
assert:
that:
- "'resources' in k8s_info"
- not k8s_info.resources
- name: Create a service
k8s:
state: present
resource_definition: &svc
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
register: output
- name: show output
debug:
var: output
- name: Create the service again
k8s:
state: present
resource_definition: *svc
register: output
- name: Service creation should be idempotent
assert:
that: not output.changed
- name: Create a ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: value
- name: Force update ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: newvalue
force: yes
- name: Create PVC
k8s:
state: present
inline: &pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elastic-volume
namespace: testing
spec:
resources:
requests:
storage: 5Gi
accessModes:
- ReadWriteOnce
- name: Show output
debug:
var: output
- name: Create the PVC again
k8s:
state: present
inline: *pvc
- name: PVC creation should be idempotent
assert:
that: not output.changed
- name: Create deployment
k8s:
state: present
inline: &deployment
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: RollingUpdate
register: output
- name: Show output
debug:
var: output
- name: Create deployment again
k8s:
state: present
inline: *deployment
register: output
- name: Deployment creation should be idempotent
assert:
that: not output.changed
- debug:
var: k8s_openshift
- include: openshift.yml
when: k8s_openshift | bool
### Type tests
- name: Create a namespace from a string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing1
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing1
register: k8s_info_testing1
failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active"
- name: Create resources from a multidocument yaml string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a multidocument yaml string
k8s:
state: absent
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should not exist
assert:
that:
- not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_namespaces.results }}"
- name: Create resources from a list
k8s:
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a list
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_info
- name: Resources are terminating if still in results
assert:
that: not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_info.results }}"
- name: Create resources from a yaml string ending with ---
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing6
---
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing6
register: k8s_info_testing6
failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active"
- include_tasks: crd.yml
- include_tasks: lists.yml
- include_tasks: append_hash.yml
always:
- name: Delete all namespaces
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing
- kind: Namespace
apiVersion: v1
metadata:
name: testing1
- kind: Namespace
apiVersion: v1
metadata:
name: testing2
- kind: Namespace
apiVersion: v1
metadata:
name: testing3
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- kind: Namespace
apiVersion: v1
metadata:
name: testing6
ignore_errors: yes

@ -1,140 +0,0 @@
---
- name: Ensure testing1 namespace exists
k8s:
api_version: v1
kind: Namespace
name: testing1
- block:
- name: Create configmaps
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should exist
assert:
that: item.data.a is defined
with_items: '{{ cms.resources }}'
- name: Delete configmaps
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should not exist
assert:
that: not cms.resources
vars:
configmaps:
- metadata:
name: list-example-1
labels:
app: test
data:
a: first
- metadata:
name: list-example-2
labels:
app: test
data:
a: second
- metadata:
name: list-example-3
labels:
app: test
data:
a: third
- block:
- name: Create list of arbitrary resources
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the created resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: All resources should exist
assert:
that: ((list_resources.results | sum(attribute="resources", start=[])) | length) == (resources | length)
- name: Delete list of arbitrary resources
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: The resources should not exist
assert:
that: not ((list_resources.results | sum(attribute="resources", start=[])) | length)
vars:
resources:
- apiVersion: v1
kind: ConfigMap
metadata:
name: list-example-4
data:
key: value
- apiVersion: v1
kind: Service
metadata:
name: list-example-svc
labels:
app: test
spec:
selector:
app: test
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000

@ -1,92 +0,0 @@
- set_fact:
virtualenv: "{{ remote_tmp_dir }}/virtualenv"
virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
- set_fact:
virtualenv_interpreter: "{{ virtualenv }}/bin/python"
- pip:
name: virtualenv
# Test graceful failure for missing kubernetes-validate
- pip:
name:
- openshift>=0.9.2
- coverage<5
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: validate_not_installed.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Test validate with kubernetes-validate
- pip:
name:
- kubernetes-validate==1.12.0
- openshift>=0.9.2
- coverage<5
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: validate_installed.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
playbook_namespace: ansible-test-k8s-validate
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Test graceful failure for older versions of openshift
- pip:
name:
- openshift==0.6.0
- kubernetes==6.0.0
- coverage<5
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: older_openshift_fail.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
recreate_crd_default_merge_expectation: recreate_crd is failed
playbook_namespace: ansible-test-k8s-older-openshift
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes
# Run full test suite
- pip:
name:
- openshift>=0.9.2
- coverage<5
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: full_test.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
create_crd_with_apply: no
playbook_namespace: ansible-test-k8s-full
- file:
path: "{{ virtualenv }}"
state: absent
no_log: yes

@ -1,69 +0,0 @@
- python_requirements_info:
dependencies:
- openshift==0.6.0
- kubernetes==6.0.0
# append_hash
- name: use append_hash with ConfigMap
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
append_hash: yes
ignore_errors: yes
register: k8s_append_hash
- name: assert that append_hash fails gracefully
assert:
that:
- k8s_append_hash is failed
- "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg"
- "'. This is required for append_hash.' in k8s_append_hash.msg"
# validate
- name: attempt to use validate with older openshift
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
validate:
fail_on_error: yes
ignore_errors: yes
register: k8s_validate
- name: assert that validate fails gracefully
assert:
that:
- k8s_validate is failed
- "k8s_validate.msg == 'openshift >= 0.8.0 is required for validate'"
# apply
- name: attempt to use apply with older openshift
k8s:
definition:
metadata:
name: config-map-test
namespace: "{{ playbook_namespace }}"
apiVersion: v1
kind: ConfigMap
data:
hello: world
apply: yes
ignore_errors: yes
register: k8s_apply
- name: assert that apply fails gracefully
assert:
that:
- k8s_apply is failed
- "k8s_apply.msg.startswith('Failed to import the required Python library (openshift >= 0.9.2)')"

@ -1,61 +0,0 @@
# OpenShift Resources
- name: Create a project
k8s:
name: testing
kind: Project
api_version: v1
apply: no
register: output
- name: show output
debug:
var: output
- name: Create deployment config
k8s:
state: present
inline: &dc
apiVersion: v1
kind: DeploymentConfig
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: Rolling
register: output
- name: Show output
debug:
var: output
- name: Create deployment config again
k8s:
state: present
inline: *dc
register: output
- name: DC creation should be idempotent
assert:
that: not output.changed

@ -1,125 +0,0 @@
- block:
- name: Create a namespace
k8s:
name: "{{ playbook_namespace }}"
kind: Namespace
- copy:
src: files
dest: "{{ remote_tmp_dir }}"
- name: incredibly simple ConfigMap
k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
register: k8s_with_validate
- name: assert that k8s_with_validate succeeds
assert:
that:
- k8s_with_validate is successful
- name: extra property does not fail without strict
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
- name: extra property fails with strict
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
ignore_errors: yes
register: extra_property
- name: check that extra property fails with strict
assert:
that:
- extra_property is failed
- name: invalid type fails at validation stage
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: no
ignore_errors: yes
register: invalid_type
- name: check that invalid type fails
assert:
that:
- invalid_type is failed
- name: invalid type fails with warnings when fail_on_error is False
k8s:
src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: no
strict: no
ignore_errors: yes
register: invalid_type_no_fail
- name: check that invalid type fails
assert:
that:
- invalid_type_no_fail is failed
- name: setup custom resource definition
k8s:
src: "{{ remote_tmp_dir }}/files/setup-crd.yml"
- name: wait a few seconds
pause:
seconds: 5
- name: add custom resource definition
k8s:
src: "{{ remote_tmp_dir }}/files/crd-resource.yml"
namespace: "{{ playbook_namespace }}"
validate:
fail_on_error: yes
strict: yes
register: unknown_kind
- name: check that unknown kind warns
assert:
that:
- unknown_kind is successful
- "'warnings' in unknown_kind"
always:
- name: remove custom resource
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: "{{ playbook_namespace }}"
state: absent
ignore_errors: yes
- name: remove custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
state: absent
- name: Delete namespace
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: "{{ playbook_namespace }}"
ignore_errors: yes

@ -1,23 +0,0 @@
- python_requirements_info:
dependencies:
- openshift
- kubernetes
- kubernetes-validate
- k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: hello
namespace: default
validate:
fail_on_error: yes
ignore_errors: yes
register: k8s_no_validate
- name: assert that k8s_no_validate fails gracefully
assert:
that:
- k8s_no_validate is failed
- "k8s_no_validate.msg == 'kubernetes-validate python library is required to validate resources'"

@ -1,355 +0,0 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
- block:
- set_fact:
wait_namespace: wait
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ wait_namespace }}"
- name: add a simple pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
vars:
k8s_pod_name: wait-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- sleep
- "10000"
register: wait_pod
ignore_errors: yes
- name: assert that pod creation succeeded
assert:
that:
- wait_pod is successful
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: update a daemonset in check_mode
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
register: update_ds_check_mode
- name: check that check_mode returned changed
assert:
that:
- update_ds_check_mode is changed
- name: update a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: wait-daemonset
namespace: "{{ wait_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
template: "{{ k8s_pod_template }}"
wait: yes
wait_sleep: 3
wait_timeout: 180
vars:
k8s_pod_name: wait-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
register: ds
- name: get updated pods
k8s_info:
api_version: v1
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- app=wait-ds
register: updated_ds_pods
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
- name: add a crashing pod
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
vars:
k8s_pod_name: wait-crash-pod
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: crash_pod
ignore_errors: yes
- name: check that task failed
assert:
that:
- crash_pod is failed
- name: use a non-existent image
k8s:
definition:
apiVersion: v1
kind: Pod
metadata:
name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
spec: "{{ k8s_pod_spec }}"
wait: yes
wait_sleep: 1
wait_timeout: 30
vars:
k8s_pod_name: wait-no-image-pod
k8s_pod_image: i_made_this_up:and_this_too
register: no_image_pod
ignore_errors: yes
- name: check that task failed
assert:
that:
- no_image_pod is failed
- name: add a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: deploy
- name: check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- name: update a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: update_deploy
- name: get updated pods
k8s_info:
api_version: v1
kind: Pod
namespace: "{{ wait_namespace }}"
label_selectors:
- app=wait-deploy
register: updated_deploy_pods
- name: check that deployment wait worked
assert:
that:
- deploy.result.status.availableReplicas == deploy.result.status.replicas
- updated_deploy_pods.resources[0].spec.containers[0].image.endswith(":2")
- name: pause a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-deploy
namespace: "{{ wait_namespace }}"
spec:
paused: True
apply: no
wait: yes
wait_condition:
type: Progressing
status: Unknown
reason: DeploymentPaused
register: pause_deploy
- name: check that paused deployment wait worked
assert:
that:
- condition.reason == "DeploymentPaused"
- condition.status == "Unknown"
vars:
condition: '{{ pause_deploy.result.status.conditions | json_query("[?type==`Progressing`]") | first }}'
- name: add a service based on the deployment
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: wait-svc
namespace: "{{ wait_namespace }}"
spec:
selector:
app: "{{ k8s_pod_name }}"
ports:
- port: 8080
targetPort: 8080
protocol: TCP
wait: yes
vars:
k8s_pod_name: wait-deploy
register: service
- name: assert that waiting for service works
assert:
that:
- service is successful
- name: add a crashing deployment
k8s:
definition:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: wait-crash-deploy
namespace: "{{ wait_namespace }}"
spec:
replicas: 3
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
vars:
k8s_pod_name: wait-crash-deploy
k8s_pod_image: alpine:3.8
k8s_pod_command:
- /bin/false
register: wait_crash_deploy
ignore_errors: yes
- name: check that task failed
assert:
that:
- wait_crash_deploy is failed
- name: remove Pod with very short timeout
k8s:
api_version: v1
kind: Pod
name: wait-pod
namespace: "{{ wait_namespace }}"
state: absent
wait: yes
wait_sleep: 2
wait_timeout: 5
ignore_errors: yes
register: short_wait_remove_pod
- name: check that task failed
assert:
that:
- short_wait_remove_pod is failed
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ wait_namespace }}"
state: absent
when: (nodes.resources | length) > 0

@ -1,2 +0,0 @@
cloud/aws
shippable/aws/incidental

@ -1,384 +0,0 @@
---
# tasks file for sts_assume_role
- block:
# ============================================================
# TODO create simple ansible sts_get_caller_identity module
- blockinfile:
path: "{{ output_dir }}/sts.py"
create: yes
block: |
#!/usr/bin/env python
import boto3
sts = boto3.client('sts')
response = sts.get_caller_identity()
print(response['Account'])
- name: get the aws account id
command: "{{ ansible_python.executable }} '{{ output_dir }}/sts.py'"
environment:
AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
AWS_SESSION_TOKEN: "{{ security_token }}"
register: result
- name: register account id
set_fact:
aws_account: "{{ result.stdout | replace('\n', '') }}"
# ============================================================
- name: create test iam role
iam_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
name: "ansible-test-sts-{{ resource_prefix }}"
assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
create_instance_profile: False
managed_policy:
- arn:aws:iam::aws:policy/IAMReadOnlyAccess
state: present
register: test_role
# ============================================================
- name: pause to ensure role exists before using
pause:
seconds: 30
# ============================================================
- name: test with no parameters
sts_assume_role:
register: result
ignore_errors: true
- name: assert with no parameters
assert:
that:
- 'result.failed'
- "'missing required arguments:' in result.msg"
# ============================================================
- name: test with empty parameters
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region}}"
role_arn:
role_session_name:
policy:
duration_seconds:
external_id:
mfa_token:
mfa_serial_number:
register: result
ignore_errors: true
- name: assert with empty parameters
assert:
that:
- 'result.failed'
- "'Missing required parameter in input:' in result.msg"
when: result.module_stderr is not defined
- name: assert with empty parameters
assert:
that:
- 'result.failed'
- "'Member must have length greater than or equal to 20' in result.module_stderr"
when: result.module_stderr is defined
# ============================================================
- name: test with only 'role_arn' parameter
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
role_arn: "{{ test_role.iam_role.arn }}"
register: result
ignore_errors: true
- name: assert with only 'role_arn' parameter
assert:
that:
- 'result.failed'
- "'missing required arguments: role_session_name' in result.msg"
# ============================================================
- name: test with only 'role_session_name' parameter
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
role_session_name: "AnsibleTest"
register: result
ignore_errors: true
- name: assert with only 'role_session_name' parameter
assert:
that:
- 'result.failed'
- "'missing required arguments: role_arn' in result.msg"
# ============================================================
- name: test assume role with invalid policy
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
role_arn: "{{ test_role.iam_role.arn }}"
role_session_name: "AnsibleTest"
policy: "invalid policy"
register: result
ignore_errors: true
- name: assert assume role with invalid policy
assert:
that:
- 'result.failed'
- "'The policy is not in the valid JSON format.' in result.msg"
when: result.module_stderr is not defined
- name: assert assume role with invalid policy
assert:
that:
- 'result.failed'
- "'The policy is not in the valid JSON format.' in result.module_stderr"
when: result.module_stderr is defined
# ============================================================
- name: test assume role with invalid duration seconds
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region}}"
role_arn: "{{ test_role.iam_role.arn }}"
role_session_name: AnsibleTest
duration_seconds: invalid duration
register: result
ignore_errors: true
- name: assert assume role with invalid duration seconds
assert:
that:
- result is failed
- 'result.msg is search("argument \w+ is of type <.*> and we were unable to convert to int: <.*> cannot be converted to an int")'
# ============================================================
- name: test assume role with invalid external id
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region}}"
role_arn: "{{ test_role.iam_role.arn }}"
role_session_name: AnsibleTest
external_id: invalid external id
register: result
ignore_errors: true
- name: assert assume role with invalid external id
assert:
that:
- 'result.failed'
- "'Member must satisfy regular expression pattern:' in result.msg"
when: result.module_stderr is not defined
- name: assert assume role with invalid external id
assert:
that:
- 'result.failed'
- "'Member must satisfy regular expression pattern:' in result.module_stderr"
when: result.module_stderr is defined
# ============================================================
- name: test assume role with invalid mfa serial number
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region}}"
role_arn: "{{ test_role.iam_role.arn }}"
role_session_name: AnsibleTest
mfa_serial_number: invalid serial number
register: result
ignore_errors: true
- name: assert assume role with invalid mfa serial number
assert:
that:
- 'result.failed'
- "'Member must satisfy regular expression pattern:' in result.msg"
when: result.module_stderr is not defined
- name: assert assume role with invalid mfa serial number
assert:
that:
- 'result.failed'
- "'Member must satisfy regular expression pattern:' in result.module_stderr"
when: result.module_stderr is defined
# ============================================================
- name: test assume role with invalid mfa token code
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region}}"
role_arn: "{{ test_role.iam_role.arn }}"
role_session_name: AnsibleTest
mfa_token: invalid token code
register: result
ignore_errors: true
- name: assert assume role with invalid mfa token code
assert:
that:
- 'result.failed'
- "'Member must satisfy regular expression pattern:' in result.msg"
when: result.module_stderr is not defined
- name: assert assume role with invalid mfa token code
assert:
that:
- 'result.failed'
- "'Member must satisfy regular expression pattern:' in result.module_stderr"
when: result.module_stderr is defined
# ============================================================
- name: test assume role with invalid role_arn
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region}}"
role_arn: invalid role arn
role_session_name: AnsibleTest
register: result
ignore_errors: true
- name: assert assume role with invalid role_arn
assert:
that:
- result.failed
- "'Invalid length for parameter RoleArn' in result.msg"
when: result.module_stderr is not defined
- name: assert assume role with invalid role_arn
assert:
that:
- 'result.failed'
- "'Member must have length greater than or equal to 20' in result.module_stderr"
when: result.module_stderr is defined
# ============================================================
- name: test assume not existing sts role
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region}}"
role_arn: "arn:aws:iam::123456789:role/non-existing-role"
role_session_name: "AnsibleTest"
register: result
ignore_errors: true
- name: assert assume not existing sts role
assert:
that:
- 'result.failed'
- "'is not authorized to perform: sts:AssumeRole' in result.msg"
when: result.module_stderr is not defined
- name: assert assume not existing sts role
assert:
that:
- 'result.failed'
- "'is not authorized to perform: sts:AssumeRole' in result.msg"
when: result.module_stderr is defined
# ============================================================
- name: test assume role
sts_assume_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
role_arn: "{{ test_role.iam_role.arn }}"
role_session_name: AnsibleTest
register: assumed_role
- name: assert assume role
assert:
that:
- 'not assumed_role.failed'
- "'sts_creds' in assumed_role"
- "'access_key' in assumed_role.sts_creds"
- "'secret_key' in assumed_role.sts_creds"
- "'session_token' in assumed_role.sts_creds"
# ============================================================
- name: test that assumed credentials have IAM read-only access
iam_role:
aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
security_token: "{{ assumed_role.sts_creds.session_token }}"
region: "{{ aws_region}}"
name: "ansible-test-sts-{{ resource_prefix }}"
assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
create_instance_profile: False
state: present
register: result
- name: assert assumed role with privileged action (expect changed=false)
assert:
that:
- 'not result.failed'
- 'not result.changed'
- "'iam_role' in result"
# ============================================================
- name: test assumed role with unprivileged action
iam_role:
aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
security_token: "{{ assumed_role.sts_creds.session_token }}"
region: "{{ aws_region}}"
name: "ansible-test-sts-{{ resource_prefix }}-new"
assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
state: present
register: result
ignore_errors: true
- name: assert assumed role with unprivileged action (expect changed=false)
assert:
that:
- 'result.failed'
- "'is not authorized to perform: iam:CreateRole' in result.msg"
# runs on Python2
when: result.module_stderr is not defined
- name: assert assumed role with unprivileged action (expect changed=false)
assert:
that:
- 'result.failed'
- "'is not authorized to perform: iam:CreateRole' in result.module_stderr"
# runs on Python3
when: result.module_stderr is defined
# ============================================================
always:
- name: delete test iam role
iam_role:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
name: "ansible-test-sts-{{ resource_prefix }}"
assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
managed_policy:
- arn:aws:iam::aws:policy/IAMReadOnlyAccess
state: absent

@ -1,12 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{{ aws_account }}:root"
},
"Action": "sts:AssumeRole"
}
]
}

@ -31,6 +31,18 @@
test_echo_module:
register: echoout
- name: clear facts to force interpreter discovery to run again
meta: clear_facts
- name: get the interpreter being used on the target to execute modules with ansible_facts
vars:
# keep this set so we can verify we didn't repeat discovery
ansible_python_interpreter: auto
test_echo_module:
facts:
sandwich: ham
register: echoout_with_facts
- when: distro == 'macosx'
block:
- name: Get the sys.executable for the macos discovered interpreter, as it may be different than the actual path
@ -50,7 +62,8 @@
- echoout.running_python_interpreter == normalized_discovered_interpreter
# verify that discovery didn't run again (if it did, we'd have the fact in the result)
- echoout.ansible_facts is not defined or echoout.ansible_facts.discovered_interpreter_python is not defined
- echoout_with_facts.ansible_facts is defined
- echoout_with_facts.running_python_interpreter == normalized_discovered_interpreter
- name: test that auto_legacy gives a dep warning when /usr/bin/python present but != auto result
block:

@ -0,0 +1,37 @@
#!/usr/bin/python
# Copyright: (c) 2020, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
try:
import ansible_missing_lib
HAS_LIB = True
except ImportError as e:
HAS_LIB = False
def main():
module = AnsibleModule({
'url': {'type': 'bool'},
'reason': {'type': 'bool'},
})
kwargs = {}
if module.params['url']:
kwargs['url'] = 'https://github.com/ansible/ansible'
if module.params['reason']:
kwargs['reason'] = 'for fun'
if not HAS_LIB:
module.fail_json(
msg=missing_required_lib(
'ansible_missing_lib',
**kwargs
),
)
if __name__ == '__main__':
main()

@ -0,0 +1,5 @@
#!/usr/bin/env bash
set -eux
export ANSIBLE_ROLES_PATH=../
ansible-playbook -i ../../inventory runme.yml -e "output_dir=${OUTPUT_DIR}" -v "$@"

@ -0,0 +1,57 @@
- hosts: localhost
gather_facts: false
tasks:
- command: ansible localhost -m import_role -a role=missing_required_lib -e url=true -e reason=true
register: missing_required_lib_all
failed_when: missing_required_lib_all.rc == 0
- command: ansible localhost -m import_role -a role=missing_required_lib
register: missing_required_lib_none
failed_when: missing_required_lib_none.rc == 0
- command: ansible localhost -m import_role -a role=missing_required_lib -e url=true
register: missing_required_lib_url
failed_when: missing_required_lib_url.rc == 0
- command: ansible localhost -m import_role -a role=missing_required_lib -e reason=true
register: missing_required_lib_reason
failed_when: missing_required_lib_reason.rc == 0
- assert:
that:
- missing_required_lib_all.stdout is search(expected_all)
- missing_required_lib_none.stdout is search(expected_none)
- missing_required_lib_url.stdout is search(expected_url)
- missing_required_lib_reason.stdout is search(expected_reason)
vars:
expected_all: >-
Failed to import the required Python library \(ansible_missing_lib\) on
\S+'s Python \S+\.
This is required for fun\. See https://github.com/ansible/ansible for
more info. Please read the module documentation and install it in the
appropriate location\. If the required library is installed, but Ansible
is using the wrong Python interpreter, please consult the documentation
on ansible_python_interpreter
expected_none: >-
Failed to import the required Python library \(ansible_missing_lib\) on
\S+'s Python \S+\.
Please read the module documentation and install it in the
appropriate location\. If the required library is installed, but Ansible
is using the wrong Python interpreter, please consult the documentation
on ansible_python_interpreter
expected_url: >-
Failed to import the required Python library \(ansible_missing_lib\) on
\S+'s Python \S+\.
See https://github.com/ansible/ansible for
more info\. Please read the module documentation and install it in the
appropriate location\. If the required library is installed, but Ansible
is using the wrong Python interpreter, please consult the documentation
on ansible_python_interpreter
expected_reason: >-
Failed to import the required Python library \(ansible_missing_lib\) on
\S+'s Python \S+\.
This is required for fun\.
Please read the module documentation and install it in the
appropriate location\. If the required library is installed, but Ansible
is using the wrong Python interpreter, please consult the documentation
on ansible_python_interpreter

@ -0,0 +1,3 @@
- missing_required_lib:
url: '{{ url|default(omit) }}'
reason: '{{ reason|default(omit) }}'

@ -0,0 +1,29 @@
#!/usr/bin/python
# Copyright: (c) 2020, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.dict_transformations import recursive_diff
def main():
module = AnsibleModule(
{
'a': {'type': 'dict'},
'b': {'type': 'dict'},
}
)
module.exit_json(
the_diff=recursive_diff(
module.params['a'],
module.params['b'],
),
)
if __name__ == '__main__':
main()

@ -0,0 +1,34 @@
- hosts: testhost
gather_facts: no
tasks:
- test_recursive_diff:
a:
foo:
bar:
- baz:
qux: ham_sandwich
b:
foo:
bar:
- baz:
qux: turkey_sandwich
register: recursive_diff_diff
- test_recursive_diff:
a:
foo:
bar:
- baz:
qux: ham_sandwich
b:
foo:
bar:
- baz:
qux: ham_sandwich
register: recursive_diff_same
- assert:
that:
- recursive_diff_diff.the_diff is not none
- recursive_diff_diff.the_diff|length == 2
- recursive_diff_same.the_diff is none

@ -6,3 +6,5 @@ ANSIBLE_ROLES_PATH=../ ansible-playbook module_utils_basic_setcwd.yml -i ../../i
ansible-playbook module_utils_test.yml -i ../../inventory -v "$@"
ANSIBLE_MODULE_UTILS=other_mu_dir ansible-playbook module_utils_envvar.yml -i ../../inventory -v "$@"
ansible-playbook module_utils_common_dict_transformation.yml -i ../../inventory "$@"

@ -333,8 +333,6 @@ test/support/integration/plugins/module_utils/compat/ipaddress.py metaclass-boil
test/support/integration/plugins/module_utils/compat/ipaddress.py no-unicode-literals
test/support/integration/plugins/module_utils/database.py future-import-boilerplate
test/support/integration/plugins/module_utils/database.py metaclass-boilerplate
test/support/integration/plugins/module_utils/k8s/common.py metaclass-boilerplate
test/support/integration/plugins/module_utils/k8s/raw.py metaclass-boilerplate
test/support/integration/plugins/module_utils/mysql.py future-import-boilerplate
test/support/integration/plugins/module_utils/mysql.py metaclass-boilerplate
test/support/integration/plugins/module_utils/network/common/utils.py future-import-boilerplate

@ -1,290 +0,0 @@
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import copy
import json
import os
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.dict_transformations import recursive_diff
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_native
K8S_IMP_ERR = None
try:
import kubernetes
import openshift
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import ResourceNotFoundError, ResourceNotUniqueError
HAS_K8S_MODULE_HELPER = True
k8s_import_exception = None
except ImportError as e:
HAS_K8S_MODULE_HELPER = False
k8s_import_exception = e
K8S_IMP_ERR = traceback.format_exc()
YAML_IMP_ERR = None
try:
import yaml
HAS_YAML = True
except ImportError:
YAML_IMP_ERR = traceback.format_exc()
HAS_YAML = False
try:
import urllib3
urllib3.disable_warnings()
except ImportError:
pass
def list_dict_str(value):
if isinstance(value, list):
return value
elif isinstance(value, dict):
return value
elif isinstance(value, string_types):
return value
raise TypeError
ARG_ATTRIBUTES_BLACKLIST = ('property_path',)
COMMON_ARG_SPEC = {
'state': {
'default': 'present',
'choices': ['present', 'absent'],
},
'force': {
'type': 'bool',
'default': False,
},
'resource_definition': {
'type': list_dict_str,
'aliases': ['definition', 'inline']
},
'src': {
'type': 'path',
},
'kind': {},
'name': {},
'namespace': {},
'api_version': {
'default': 'v1',
'aliases': ['api', 'version'],
},
}
AUTH_ARG_SPEC = {
'kubeconfig': {
'type': 'path',
},
'context': {},
'host': {},
'api_key': {
'no_log': True,
},
'username': {},
'password': {
'no_log': True,
},
'validate_certs': {
'type': 'bool',
'aliases': ['verify_ssl'],
},
'ca_cert': {
'type': 'path',
'aliases': ['ssl_ca_cert'],
},
'client_cert': {
'type': 'path',
'aliases': ['cert_file'],
},
'client_key': {
'type': 'path',
'aliases': ['key_file'],
},
'proxy': {},
'persist_config': {
'type': 'bool',
},
}
# Map kubernetes-client parameters to ansible parameters
AUTH_ARG_MAP = {
'kubeconfig': 'kubeconfig',
'context': 'context',
'host': 'host',
'api_key': 'api_key',
'username': 'username',
'password': 'password',
'verify_ssl': 'validate_certs',
'ssl_ca_cert': 'ca_cert',
'cert_file': 'client_cert',
'key_file': 'client_key',
'proxy': 'proxy',
'persist_config': 'persist_config',
}
class K8sAnsibleMixin(object):
_argspec_cache = None
@property
def argspec(self):
"""
Introspect the model properties, and return an Ansible module arg_spec dict.
:return: dict
"""
if self._argspec_cache:
return self._argspec_cache
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
self._argspec_cache = argument_spec
return self._argspec_cache
def get_api_client(self, **auth_params):
auth_params = auth_params or getattr(self, 'params', {})
auth = {}
# If authorization variables aren't defined, look for them in environment variables
for true_name, arg_name in AUTH_ARG_MAP.items():
if auth_params.get(arg_name) is None:
env_value = os.getenv('K8S_AUTH_{0}'.format(arg_name.upper()), None) or os.getenv('K8S_AUTH_{0}'.format(true_name.upper()), None)
if env_value is not None:
if AUTH_ARG_SPEC[arg_name].get('type') == 'bool':
env_value = env_value.lower() not in ['0', 'false', 'no']
auth[true_name] = env_value
else:
auth[true_name] = auth_params[arg_name]
def auth_set(*names):
return all([auth.get(name) for name in names])
if auth_set('username', 'password', 'host') or auth_set('api_key', 'host'):
# We have enough in the parameters to authenticate, no need to load incluster or kubeconfig
pass
elif auth_set('kubeconfig') or auth_set('context'):
kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
else:
# First try to do incluster config, then kubeconfig
try:
kubernetes.config.load_incluster_config()
except kubernetes.config.ConfigException:
kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
# Override any values in the default configuration with Ansible parameters
configuration = kubernetes.client.Configuration()
for key, value in iteritems(auth):
if key in AUTH_ARG_MAP.keys() and value is not None:
if key == 'api_key':
setattr(configuration, key, {'authorization': "Bearer {0}".format(value)})
else:
setattr(configuration, key, value)
kubernetes.client.Configuration.set_default(configuration)
return DynamicClient(kubernetes.client.ApiClient(configuration))
def find_resource(self, kind, api_version, fail=False):
for attribute in ['kind', 'name', 'singular_name']:
try:
return self.client.resources.get(**{'api_version': api_version, attribute: kind})
except (ResourceNotFoundError, ResourceNotUniqueError):
pass
try:
return self.client.resources.get(api_version=api_version, short_names=[kind])
except (ResourceNotFoundError, ResourceNotUniqueError):
if fail:
self.fail(msg='Failed to find exact match for {0}.{1} by [kind, name, singularName, shortNames]'.format(api_version, kind))
def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_selectors=None, field_selectors=None):
resource = self.find_resource(kind, api_version)
if not resource:
return dict(resources=[])
try:
result = resource.get(name=name,
namespace=namespace,
label_selector=','.join(label_selectors),
field_selector=','.join(field_selectors)).to_dict()
except openshift.dynamic.exceptions.NotFoundError:
return dict(resources=[])
if 'items' in result:
return dict(resources=result['items'])
else:
return dict(resources=[result])
def remove_aliases(self):
"""
The helper doesn't know what to do with aliased keys
"""
for k, v in iteritems(self.argspec):
if 'aliases' in v:
for alias in v['aliases']:
if alias in self.params:
self.params.pop(alias)
def load_resource_definitions(self, src):
""" Load the requested src path """
result = None
path = os.path.normpath(src)
if not os.path.exists(path):
self.fail(msg="Error accessing {0}. Does the file exist?".format(path))
try:
with open(path, 'r') as f:
result = list(yaml.safe_load_all(f))
except (IOError, yaml.YAMLError) as exc:
self.fail(msg="Error loading resource_definition: {0}".format(exc))
return result
@staticmethod
def diff_objects(existing, new):
result = dict()
diff = recursive_diff(existing, new)
if diff:
result['before'] = diff[0]
result['after'] = diff[1]
return not diff, result
class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin):
resource_definition = None
api_version = None
kind = None
def __init__(self, *args, **kwargs):
kwargs['argument_spec'] = self.argspec
AnsibleModule.__init__(self, *args, **kwargs)
if not HAS_K8S_MODULE_HELPER:
self.fail_json(msg=missing_required_lib('openshift'), exception=K8S_IMP_ERR,
error=to_native(k8s_import_exception))
self.openshift_version = openshift.__version__
if not HAS_YAML:
self.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
def execute_module(self):
raise NotImplementedError()
def fail(self, msg=None):
self.fail_json(msg=msg)

@ -1,519 +0,0 @@
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import copy
from datetime import datetime
from distutils.version import LooseVersion
import time
import sys
import traceback
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
from ansible.module_utils.six import string_types
from ansible.module_utils.k8s.common import KubernetesAnsibleModule
from ansible.module_utils.common.dict_transformations import dict_merge
try:
import yaml
from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ConflictError, ForbiddenError, KubernetesValidateMissing
import urllib3
except ImportError:
# Exceptions handled in common
pass
try:
import kubernetes_validate
HAS_KUBERNETES_VALIDATE = True
except ImportError:
HAS_KUBERNETES_VALIDATE = False
K8S_CONFIG_HASH_IMP_ERR = None
try:
from openshift.helper.hashes import generate_hash
HAS_K8S_CONFIG_HASH = True
except ImportError:
K8S_CONFIG_HASH_IMP_ERR = traceback.format_exc()
HAS_K8S_CONFIG_HASH = False
HAS_K8S_APPLY = None
try:
from openshift.dynamic.apply import apply_object
HAS_K8S_APPLY = True
except ImportError:
HAS_K8S_APPLY = False
class KubernetesRawModule(KubernetesAnsibleModule):
@property
def validate_spec(self):
return dict(
fail_on_error=dict(type='bool'),
version=dict(),
strict=dict(type='bool', default=True)
)
@property
def condition_spec(self):
return dict(
type=dict(),
status=dict(default=True, choices=[True, False, "Unknown"]),
reason=dict()
)
@property
def argspec(self):
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
argument_spec['merge_type'] = dict(type='list', choices=['json', 'merge', 'strategic-merge'])
argument_spec['wait'] = dict(type='bool', default=False)
argument_spec['wait_sleep'] = dict(type='int', default=5)
argument_spec['wait_timeout'] = dict(type='int', default=120)
argument_spec['wait_condition'] = dict(type='dict', default=None, options=self.condition_spec)
argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec)
argument_spec['append_hash'] = dict(type='bool', default=False)
argument_spec['apply'] = dict(type='bool', default=False)
return argument_spec
def __init__(self, k8s_kind=None, *args, **kwargs):
self.client = None
self.warnings = []
mutually_exclusive = [
('resource_definition', 'src'),
('merge_type', 'apply'),
]
KubernetesAnsibleModule.__init__(self, *args,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
**kwargs)
self.kind = k8s_kind or self.params.get('kind')
self.api_version = self.params.get('api_version')
self.name = self.params.get('name')
self.namespace = self.params.get('namespace')
resource_definition = self.params.get('resource_definition')
validate = self.params.get('validate')
if validate:
if LooseVersion(self.openshift_version) < LooseVersion("0.8.0"):
self.fail_json(msg="openshift >= 0.8.0 is required for validate")
self.append_hash = self.params.get('append_hash')
if self.append_hash:
if not HAS_K8S_CONFIG_HASH:
self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"),
exception=K8S_CONFIG_HASH_IMP_ERR)
if self.params['merge_type']:
if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"))
self.apply = self.params.get('apply', False)
if self.apply:
if not HAS_K8S_APPLY:
self.fail_json(msg=missing_required_lib("openshift >= 0.9.2", reason="for apply"))
if resource_definition:
if isinstance(resource_definition, string_types):
try:
self.resource_definitions = yaml.safe_load_all(resource_definition)
except (IOError, yaml.YAMLError) as exc:
self.fail(msg="Error loading resource_definition: {0}".format(exc))
elif isinstance(resource_definition, list):
self.resource_definitions = resource_definition
else:
self.resource_definitions = [resource_definition]
src = self.params.get('src')
if src:
self.resource_definitions = self.load_resource_definitions(src)
try:
self.resource_definitions = [item for item in self.resource_definitions if item]
except AttributeError:
pass
if not resource_definition and not src:
implicit_definition = dict(
kind=self.kind,
apiVersion=self.api_version,
metadata=dict(name=self.name)
)
if self.namespace:
implicit_definition['metadata']['namespace'] = self.namespace
self.resource_definitions = [implicit_definition]
def flatten_list_kind(self, list_resource, definitions):
flattened = []
parent_api_version = list_resource.group_version if list_resource else None
parent_kind = list_resource.kind[:-4] if list_resource else None
for definition in definitions.get('items', []):
resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True)
flattened.append((resource, self.set_defaults(resource, definition)))
return flattened
def execute_module(self):
changed = False
results = []
try:
self.client = self.get_api_client()
# Hopefully the kubernetes client will provide its own exception class one day
except (urllib3.exceptions.RequestError) as e:
self.fail_json(msg="Couldn't connect to Kubernetes: %s" % str(e))
flattened_definitions = []
for definition in self.resource_definitions:
kind = definition.get('kind', self.kind)
api_version = definition.get('apiVersion', self.api_version)
if kind.endswith('List'):
resource = self.find_resource(kind, api_version, fail=False)
flattened_definitions.extend(self.flatten_list_kind(resource, definition))
else:
resource = self.find_resource(kind, api_version, fail=True)
flattened_definitions.append((resource, definition))
for (resource, definition) in flattened_definitions:
kind = definition.get('kind', self.kind)
api_version = definition.get('apiVersion', self.api_version)
definition = self.set_defaults(resource, definition)
self.warnings = []
if self.params['validate'] is not None:
self.warnings = self.validate(definition)
result = self.perform_action(resource, definition)
result['warnings'] = self.warnings
changed = changed or result['changed']
results.append(result)
if len(results) == 1:
self.exit_json(**results[0])
self.exit_json(**{
'changed': changed,
'result': {
'results': results
}
})
def validate(self, resource):
def _prepend_resource_info(resource, msg):
return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg)
try:
warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict'))
except KubernetesValidateMissing:
self.fail_json(msg="kubernetes-validate python library is required to validate resources")
if errors and self.params['validate']['fail_on_error']:
self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors]))
else:
return [_prepend_resource_info(resource, msg) for msg in warnings + errors]
def set_defaults(self, resource, definition):
definition['kind'] = resource.kind
definition['apiVersion'] = resource.group_version
metadata = definition.get('metadata', {})
if self.name and not metadata.get('name'):
metadata['name'] = self.name
if resource.namespaced and self.namespace and not metadata.get('namespace'):
metadata['namespace'] = self.namespace
definition['metadata'] = metadata
return definition
def perform_action(self, resource, definition):
result = {'changed': False, 'result': {}}
state = self.params.get('state', None)
force = self.params.get('force', False)
name = definition['metadata'].get('name')
namespace = definition['metadata'].get('namespace')
existing = None
wait = self.params.get('wait')
wait_sleep = self.params.get('wait_sleep')
wait_timeout = self.params.get('wait_timeout')
wait_condition = None
if self.params.get('wait_condition') and self.params['wait_condition'].get('type'):
wait_condition = self.params['wait_condition']
self.remove_aliases()
try:
# ignore append_hash for resources other than ConfigMap and Secret
if self.append_hash and definition['kind'] in ['ConfigMap', 'Secret']:
name = '%s-%s' % (name, generate_hash(definition))
definition['metadata']['name'] = name
params = dict(name=name)
if namespace:
params['namespace'] = namespace
existing = resource.get(**params)
except NotFoundError:
# Remove traceback so that it doesn't show up in later failures
try:
sys.exc_clear()
except AttributeError:
# no sys.exc_clear on python3
pass
except ForbiddenError as exc:
if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent':
return self.create_project_request(definition)
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
except DynamicApiError as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
if state == 'absent':
result['method'] = "delete"
if not existing:
# The object already does not exist
return result
else:
# Delete the object
result['changed'] = True
if not self.check_mode:
try:
k8s_obj = resource.delete(**params)
result['result'] = k8s_obj.to_dict()
except DynamicApiError as exc:
self.fail_json(msg="Failed to delete object: {0}".format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
if wait:
success, resource, duration = self.wait(resource, definition, wait_sleep, wait_timeout, 'absent')
result['duration'] = duration
if not success:
self.fail_json(msg="Resource deletion timed out", **result)
return result
else:
if self.apply:
if self.check_mode:
ignored, k8s_obj = apply_object(resource, definition)
else:
try:
k8s_obj = resource.apply(definition, namespace=namespace).to_dict()
except DynamicApiError as exc:
msg = "Failed to apply object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
success = True
result['result'] = k8s_obj
if wait:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
if existing:
existing = existing.to_dict()
else:
existing = {}
match, diffs = self.diff_objects(existing, result['result'])
result['changed'] = not match
result['diff'] = diffs
result['method'] = 'apply'
if not success:
self.fail_json(msg="Resource apply timed out", **result)
return result
if not existing:
if self.check_mode:
k8s_obj = definition
else:
try:
k8s_obj = resource.create(definition, namespace=namespace).to_dict()
except ConflictError:
# Some resources, like ProjectRequests, can't be created multiple times,
# because the resources that they create don't match their kind
# In this case we'll mark it as unchanged and warn the user
self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \
if the resource you are creating does not directly create a resource of the same kind.".format(name))
return result
except DynamicApiError as exc:
msg = "Failed to create object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
success = True
result['result'] = k8s_obj
if wait and not self.check_mode:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
result['changed'] = True
result['method'] = 'create'
if not success:
self.fail_json(msg="Resource creation timed out", **result)
return result
match = False
diffs = []
if existing and force:
if self.check_mode:
k8s_obj = definition
else:
try:
k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict()
except DynamicApiError as exc:
msg = "Failed to replace object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
success = True
result['result'] = k8s_obj
if wait:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
match, diffs = self.diff_objects(existing.to_dict(), result['result'])
result['changed'] = not match
result['method'] = 'replace'
result['diff'] = diffs
if not success:
self.fail_json(msg="Resource replacement timed out", **result)
return result
# Differences exist between the existing obj and requested params
if self.check_mode:
k8s_obj = dict_merge(existing.to_dict(), definition)
else:
if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
k8s_obj, error = self.patch_resource(resource, definition, existing, name,
namespace)
else:
for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']:
k8s_obj, error = self.patch_resource(resource, definition, existing, name,
namespace, merge_type=merge_type)
if not error:
break
if error:
self.fail_json(**error)
success = True
result['result'] = k8s_obj
if wait:
success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
match, diffs = self.diff_objects(existing.to_dict(), result['result'])
result['changed'] = not match
result['method'] = 'patch'
result['diff'] = diffs
if not success:
self.fail_json(msg="Resource update timed out", **result)
return result
def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None):
try:
params = dict(name=name, namespace=namespace)
if merge_type:
params['content_type'] = 'application/{0}-patch+json'.format(merge_type)
k8s_obj = resource.patch(definition, **params).to_dict()
match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
error = {}
return k8s_obj, {}
except DynamicApiError as exc:
msg = "Failed to patch object: {0}".format(exc.body)
if self.warnings:
msg += "\n" + "\n ".join(self.warnings)
error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings)
return None, error
def create_project_request(self, definition):
definition['kind'] = 'ProjectRequest'
result = {'changed': False, 'result': {}}
resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True)
if not self.check_mode:
try:
k8s_obj = resource.create(definition)
result['result'] = k8s_obj.to_dict()
except DynamicApiError as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.body),
error=exc.status, status=exc.status, reason=exc.reason)
result['changed'] = True
result['method'] = 'create'
return result
def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state):
start = datetime.now()
def _wait_for_elapsed():
return (datetime.now() - start).seconds
response = None
while _wait_for_elapsed() < timeout:
try:
response = resource.get(name=name, namespace=namespace)
if predicate(response):
if response:
return True, response.to_dict(), _wait_for_elapsed()
else:
return True, {}, _wait_for_elapsed()
time.sleep(sleep)
except NotFoundError:
if state == 'absent':
return True, {}, _wait_for_elapsed()
if response:
response = response.to_dict()
return False, response, _wait_for_elapsed()
def wait(self, resource, definition, sleep, timeout, state='present', condition=None):
def _deployment_ready(deployment):
# FIXME: frustratingly bool(deployment.status) is True even if status is empty
# Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty
return (deployment.status and deployment.status.replicas is not None and
deployment.status.availableReplicas == deployment.status.replicas and
deployment.status.observedGeneration == deployment.metadata.generation)
def _pod_ready(pod):
return (pod.status and pod.status.containerStatuses is not None and
all([container.ready for container in pod.status.containerStatuses]))
def _daemonset_ready(daemonset):
return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and
daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and
daemonset.status.observedGeneration == daemonset.metadata.generation)
def _custom_condition(resource):
if not resource.status or not resource.status.conditions:
return False
match = [x for x in resource.status.conditions if x.type == condition['type']]
if not match:
return False
# There should never be more than one condition of a specific type
match = match[0]
if match.status == 'Unknown':
if match.status == condition['status']:
if 'reason' not in condition:
return True
if condition['reason']:
return match.reason == condition['reason']
return False
status = True if match.status == 'True' else False
if status == condition['status']:
if condition.get('reason'):
return match.reason == condition['reason']
return True
return False
def _resource_absent(resource):
return not resource
waiter = dict(
Deployment=_deployment_ready,
DaemonSet=_daemonset_ready,
Pod=_pod_ready
)
kind = definition['kind']
if state == 'present' and not condition:
predicate = waiter.get(kind, lambda x: x)
elif state == 'present' and condition:
predicate = _custom_condition
else:
predicate = _resource_absent
return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state)

@ -1,674 +0,0 @@
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_role
short_description: Manage AWS IAM roles
description:
- Manage AWS IAM roles.
version_added: "2.3"
author: "Rob White (@wimnat)"
options:
path:
description:
- The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
default: "/"
type: str
name:
description:
- The name of the role to create.
required: true
type: str
description:
description:
- Provides a description of the role.
version_added: "2.5"
type: str
boundary:
description:
- The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates.
- Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false).
- This is intended for roles/users that have permissions to create new IAM objects.
- For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html).
- Requires botocore 1.10.57 or above.
aliases: [boundary_policy_arn]
version_added: "2.7"
type: str
assume_role_policy_document:
description:
- The trust relationship policy document that grants an entity permission to assume the role.
- This parameter is required when I(state=present).
type: json
managed_policies:
description:
- A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names.
- To remove all policies set I(purge_polices=true) and I(managed_policies=[None]).
- To embed an inline policy, use M(iam_policy).
aliases: ['managed_policy']
type: list
max_session_duration:
description:
- The maximum duration (in seconds) of a session when assuming the role.
- Valid values are between 1 and 12 hours (3600 and 43200 seconds).
version_added: "2.10"
type: int
purge_policies:
description:
- When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
- By default I(purge_policies=true). In Ansible 2.14 this will be changed to I(purge_policies=false).
version_added: "2.5"
type: bool
aliases: ['purge_policy', 'purge_managed_policies']
state:
description:
- Create or remove the IAM role.
default: present
choices: [ present, absent ]
type: str
create_instance_profile:
description:
- Creates an IAM instance profile along with the role.
default: true
version_added: "2.5"
type: bool
delete_instance_profile:
description:
- When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance
profile created with the same I(name) as the role.
- Only applies when I(state=absent).
default: false
version_added: "2.10"
type: bool
tags:
description:
- Tag dict to apply to the queue.
- Requires botocore 1.12.46 or above.
version_added: "2.10"
type: dict
purge_tags:
description:
- Remove tags not listed in I(tags) when tags is specified.
default: true
version_added: "2.10"
type: bool
requirements: [ botocore, boto3 ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create a role with description and tags
iam_role:
name: mynewrole
assume_role_policy_document: "{{ lookup('file','policy.json') }}"
description: This is My New Role
tags:
env: dev
- name: "Create a role and attach a managed policy called 'PowerUserAccess'"
iam_role:
name: mynewrole
assume_role_policy_document: "{{ lookup('file','policy.json') }}"
managed_policies:
- arn:aws:iam::aws:policy/PowerUserAccess
- name: Keep the role created above but remove all managed policies
iam_role:
name: mynewrole
assume_role_policy_document: "{{ lookup('file','policy.json') }}"
managed_policies: []
- name: Delete the role
iam_role:
name: mynewrole
assume_role_policy_document: "{{ lookup('file', 'policy.json') }}"
state: absent
'''
RETURN = '''
iam_role:
description: dictionary containing the IAM Role data
returned: success
type: complex
contains:
path:
description: the path to the role
type: str
returned: always
sample: /
role_name:
description: the friendly name that identifies the role
type: str
returned: always
sample: myrole
role_id:
description: the stable and unique string identifying the role
type: str
returned: always
sample: ABCDEFF4EZ4ABCDEFV4ZC
arn:
description: the Amazon Resource Name (ARN) specifying the role
type: str
returned: always
sample: "arn:aws:iam::1234567890:role/mynewrole"
create_date:
description: the date and time, in ISO 8601 date-time format, when the role was created
type: str
returned: always
sample: "2016-08-14T04:36:28+00:00"
assume_role_policy_document:
description: the policy that grants an entity permission to assume the role
type: str
returned: always
sample: {
'statement': [
{
'action': 'sts:AssumeRole',
'effect': 'Allow',
'principal': {
'service': 'ec2.amazonaws.com'
},
'sid': ''
}
],
'version': '2012-10-17'
}
attached_policies:
description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
type: list
returned: always
sample: [
{
'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
'policy_name': 'PowerUserAccess'
}
]
tags:
description: role tags
type: dict
returned: always
sample: '{"Env": "Prod"}'
'''
import json
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_aws_tags
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc):
if not compare_policies(current_policy_doc, json.loads(new_policy_doc)):
return True
else:
return False
@AWSRetry.jittered_backoff()
def _list_policies(connection):
paginator = connection.get_paginator('list_policies')
return paginator.paginate().build_full_result()['Policies']
def convert_friendly_names_to_arns(connection, module, policy_names):
if not any([not policy.startswith('arn:') for policy in policy_names]):
return policy_names
allpolicies = {}
policies = _list_policies(connection)
for policy in policies:
allpolicies[policy['PolicyName']] = policy['Arn']
allpolicies[policy['Arn']] = policy['Arn']
try:
return [allpolicies[policy] for policy in policy_names]
except KeyError as e:
module.fail_json_aws(e, msg="Couldn't find policy")
def attach_policies(connection, module, policies_to_attach, params):
changed = False
for policy_arn in policies_to_attach:
try:
if not module.check_mode:
connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName']))
changed = True
return changed
def remove_policies(connection, module, policies_to_remove, params):
changed = False
for policy in policies_to_remove:
try:
if not module.check_mode:
connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName']))
changed = True
return changed
def generate_create_params(module):
params = dict()
params['Path'] = module.params.get('path')
params['RoleName'] = module.params.get('name')
params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
if module.params.get('description') is not None:
params['Description'] = module.params.get('description')
if module.params.get('max_session_duration') is not None:
params['MaxSessionDuration'] = module.params.get('max_session_duration')
if module.params.get('boundary') is not None:
params['PermissionsBoundary'] = module.params.get('boundary')
if module.params.get('tags') is not None:
params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
return params
def create_basic_role(connection, module, params):
"""
Perform the Role creation.
Assumes tests for the role existing have already been performed.
"""
try:
if not module.check_mode:
role = connection.create_role(aws_retry=True, **params)
# 'Description' is documented as key of the role returned by create_role
# but appears to be an AWS bug (the value is not returned using the AWS CLI either).
# Get the role after creating it.
role = get_role_with_backoff(connection, module, params['RoleName'])
else:
role = {'MadeInCheckMode': True}
role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to create role")
return role
def update_role_assumed_policy(connection, module, params, role):
# Check Assumed Policy document
if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']):
return False
if module.check_mode:
return True
try:
connection.update_assume_role_policy(
RoleName=params['RoleName'],
PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])),
aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName']))
return True
def update_role_description(connection, module, params, role):
# Check Description update
if params.get('Description') is None:
return False
if role.get('Description') == params['Description']:
return False
if module.check_mode:
return True
try:
connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName']))
return True
def update_role_max_session_duration(connection, module, params, role):
# Check MaxSessionDuration update
if params.get('MaxSessionDuration') is None:
return False
if role.get('MaxSessionDuration') == params['MaxSessionDuration']:
return False
if module.check_mode:
return True
try:
connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName']))
return True
def update_role_permissions_boundary(connection, module, params, role):
# Check PermissionsBoundary
if params.get('PermissionsBoundary') is None:
return False
if params.get('PermissionsBoundary') == role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', ''):
return False
if module.check_mode:
return True
if params.get('PermissionsBoundary') == '':
try:
connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName']))
else:
try:
connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName']))
return True
def update_managed_policies(connection, module, params, role, managed_policies, purge_policies):
# Check Managed Policies
if managed_policies is None:
return False
# If we're manipulating a fake role
if role.get('MadeInCheckMode', False):
role['AttachedPolicies'] = list(map(lambda x: {'PolicyArn': x, 'PolicyName': x.split(':')[5]}, managed_policies))
return True
# Get list of current attached managed policies
current_attached_policies = get_attached_policy_list(connection, module, params['RoleName'])
current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies]
if len(managed_policies) == 1 and managed_policies[0] is None:
managed_policies = []
policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies)
policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list)
changed = False
if purge_policies:
changed |= remove_policies(connection, module, policies_to_remove, params)
changed |= attach_policies(connection, module, policies_to_attach, params)
return changed
def create_or_update_role(connection, module):
params = generate_create_params(module)
role_name = params['RoleName']
create_instance_profile = module.params.get('create_instance_profile')
purge_policies = module.params.get('purge_policies')
if purge_policies is None:
purge_policies = True
managed_policies = module.params.get('managed_policies')
if managed_policies:
# Attempt to list the policies early so we don't leave things behind if we can't find them.
managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
changed = False
# Get role
role = get_role(connection, module, role_name)
# If role is None, create it
if role is None:
role = create_basic_role(connection, module, params)
changed = True
else:
changed |= update_role_tags(connection, module, params, role)
changed |= update_role_assumed_policy(connection, module, params, role)
changed |= update_role_description(connection, module, params, role)
changed |= update_role_max_session_duration(connection, module, params, role)
changed |= update_role_permissions_boundary(connection, module, params, role)
if create_instance_profile:
changed |= create_instance_profiles(connection, module, params, role)
changed |= update_managed_policies(connection, module, params, role, managed_policies, purge_policies)
# Get the role again
if not role.get('MadeInCheckMode', False):
role = get_role(connection, module, params['RoleName'])
role['AttachedPolicies'] = get_attached_policy_list(connection, module, params['RoleName'])
role['tags'] = get_role_tags(connection, module)
module.exit_json(
changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']),
**camel_dict_to_snake_dict(role, ignore_list=['tags']))
def create_instance_profiles(connection, module, params, role):
if role.get('MadeInCheckMode', False):
return False
# Fetch existing Profiles
try:
instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
# Profile already exists
if any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles):
return False
if module.check_mode:
return True
# Make sure an instance profile is created
try:
connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True)
except ClientError as e:
# If the profile already exists, no problem, move on.
# Implies someone's changing things at the same time...
if e.response['Error']['Code'] == 'EntityAlreadyExists':
return False
else:
module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
# And attach the role to the profile
try:
connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName']))
return True
def remove_instance_profiles(connection, module, role_params, role):
role_name = module.params.get('name')
delete_profiles = module.params.get("delete_instance_profile")
try:
instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
# Remove the role from the instance profile(s)
for profile in instance_profiles:
profile_name = profile['InstanceProfileName']
try:
if not module.check_mode:
connection.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params)
if profile_name == role_name:
if delete_profiles:
try:
connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name))
def destroy_role(connection, module):
role_name = module.params.get('name')
role = get_role(connection, module, role_name)
role_params = dict()
role_params['RoleName'] = role_name
boundary_params = dict(role_params)
boundary_params['PermissionsBoundary'] = ''
if role is None:
module.exit_json(changed=False)
# Before we try to delete the role we need to remove any
# - attached instance profiles
# - attached managed policies
# - permissions boundary
remove_instance_profiles(connection, module, role_params, role)
update_managed_policies(connection, module, role_params, role, [], True)
update_role_permissions_boundary(connection, module, boundary_params, role)
try:
if not module.check_mode:
connection.delete_role(aws_retry=True, **role_params)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to delete role")
module.exit_json(changed=True)
def get_role_with_backoff(connection, module, name):
try:
return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
def get_role(connection, module, name):
try:
return connection.get_role(RoleName=name, aws_retry=True)['Role']
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
def get_attached_policy_list(connection, module, name):
try:
return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies']
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
def get_role_tags(connection, module):
role_name = module.params.get('name')
if not hasattr(connection, 'list_role_tags'):
return {}
try:
return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name))
def update_role_tags(connection, module, params, role):
new_tags = params.get('Tags')
if new_tags is None:
return False
new_tags = boto3_tag_list_to_ansible_dict(new_tags)
role_name = module.params.get('name')
purge_tags = module.params.get('purge_tags')
try:
existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
except (ClientError, KeyError):
existing_tags = {}
tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
if not module.check_mode:
try:
if tags_to_remove:
connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True)
if tags_to_add:
connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name)
changed = bool(tags_to_add) or bool(tags_to_remove)
return changed
def main():
argument_spec = dict(
name=dict(type='str', required=True),
path=dict(type='str', default="/"),
assume_role_policy_document=dict(type='json'),
managed_policies=dict(type='list', aliases=['managed_policy']),
max_session_duration=dict(type='int'),
state=dict(type='str', choices=['present', 'absent'], default='present'),
description=dict(type='str'),
boundary=dict(type='str', aliases=['boundary_policy_arn']),
create_instance_profile=dict(type='bool', default=True),
delete_instance_profile=dict(type='bool', default=False),
purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=True),
)
module = AnsibleAWSModule(argument_spec=argument_spec,
required_if=[('state', 'present', ['assume_role_policy_document'])],
supports_check_mode=True)
if module.params.get('purge_policies') is None:
module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.'
' To maintain the existing behaviour explicity set purge_policies=true',
version='2.14', collection_name='ansible.builtin')
if module.params.get('boundary'):
if module.params.get('create_instance_profile'):
module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.")
if not module.params.get('boundary').startswith('arn:aws:iam'):
module.fail_json(msg="Boundary policy must be an ARN")
if module.params.get('tags') is not None and not module.botocore_at_least('1.12.46'):
module.fail_json(msg="When managing tags botocore must be at least v1.12.46. "
"Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
if module.params.get('boundary') is not None and not module.botocore_at_least('1.10.57'):
module.fail_json(msg="When using a boundary policy, botocore must be at least v1.10.57. "
"Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
if module.params.get('max_session_duration'):
max_session_duration = module.params.get('max_session_duration')
if max_session_duration < 3600 or max_session_duration > 43200:
module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)")
if module.params.get('path'):
path = module.params.get('path')
if not path.endswith('/') or not path.startswith('/'):
module.fail_json(msg="path must begin and end with /")
connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
if state == 'present':
create_or_update_role(connection, module)
else:
destroy_role(connection, module)
if __name__ == '__main__':
main()

@ -1,274 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s
short_description: Manage Kubernetes (K8s) objects
version_added: "2.6"
author:
- "Chris Houseknecht (@chouseknecht)"
- "Fabian von Feilitzsch (@fabianvf)"
description:
- Use the OpenShift Python client to perform CRUD operations on K8s objects.
- Pass the object definition from a source file or inline. See examples for reading
files and using Jinja templates or vault-encrypted files.
- Access to the full range of K8s APIs.
- Use the M(k8s_info) module to obtain a list of items about an object of type C(kind)
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
extends_documentation_fragment:
- k8s_state_options
- k8s_name_options
- k8s_resource_options
- k8s_auth_options
notes:
- If your OpenShift Python library is not 0.9.0 or newer and you are trying to
remove an item from an associative array/dictionary, for example a label or
an annotation, you will need to explicitly set the value of the item to be
removed to `null`. Simply deleting the entry in the dictionary will not
remove it from openshift or kubernetes.
options:
merge_type:
description:
- Whether to override the default patch merge approach with a specific type. By default, the strategic
merge will typically be used.
- For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
want to use C(merge) if you see "strategic merge patch format is not supported"
- See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
- Requires openshift >= 0.6.2
- If more than one merge_type is given, the merge_types will be tried in order
- If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
is simply C(strategic-merge).
- mutually exclusive with C(apply)
choices:
- json
- merge
- strategic-merge
type: list
version_added: "2.7"
wait:
description:
- Whether to wait for certain resource kinds to end up in the desired state. By default the module exits once Kubernetes has
received the request
- Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds.
- For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set.
default: no
type: bool
version_added: "2.8"
wait_sleep:
description:
- Number of seconds to sleep between checks.
default: 5
version_added: "2.9"
wait_timeout:
description:
- How long in seconds to wait for the resource to end up in the desired state. Ignored if C(wait) is not set.
default: 120
version_added: "2.8"
wait_condition:
description:
- Specifies a custom condition on the status to wait for. Ignored if C(wait) is not set or is set to False.
suboptions:
type:
description:
- The type of condition to wait for. For example, the C(Pod) resource will set the C(Ready) condition (among others)
- Required if you are specifying a C(wait_condition). If left empty, the C(wait_condition) field will be ignored.
- The possible types for a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
for a given resource to see possible choices.
status:
description:
- The value of the status field in your desired condition.
- For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status.
choices:
- True
- False
- Unknown
reason:
description:
- The value of the reason field in your desired condition
- For example, if a C(Deployment) is paused, The C(Progressing) C(type) will have the C(DeploymentPaused) reason.
- The possible reasons in a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
for a given resource to see possible choices.
version_added: "2.8"
validate:
description:
- how (if at all) to validate the resource definition against the kubernetes schema.
Requires the kubernetes-validate python module
suboptions:
fail_on_error:
description: whether to fail on validation errors.
required: yes
type: bool
version:
description: version of Kubernetes to validate against. defaults to Kubernetes server version
strict:
description: whether to fail when passing unexpected properties
default: no
type: bool
version_added: "2.8"
append_hash:
description:
- Whether to append a hash to a resource name for immutability purposes
- Applies only to ConfigMap and Secret resources
- The parameter will be silently ignored for other resource kinds
- The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
the generated hash and append_hash=no)
type: bool
version_added: "2.8"
apply:
description:
- C(apply) compares the desired resource definition with the previously supplied resource definition,
ignoring properties that are automatically generated
- C(apply) works better with Services than 'force=yes'
- mutually exclusive with C(merge_type)
type: bool
version_added: "2.9"
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Create a k8s namespace
k8s:
name: testing
api_version: v1
kind: Namespace
state: present
- name: Create a Service object from an inline definition
k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
- name: Remove an existing Service object
k8s:
state: absent
api_version: v1
kind: Service
namespace: testing
name: web
# Passing the object definition from a file
- name: Create a Deployment by reading the definition from a local file
k8s:
state: present
src: /testing/deployment.yml
- name: >-
Read definition file from the Ansible controller file system.
If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
k8s:
state: present
definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
- name: Read definition file from the Ansible controller file system after Jinja templating
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
- name: fail on validation errors
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
validate:
fail_on_error: yes
- name: warn on validation errors, check for unexpected properties
k8s:
state: present
definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
validate:
fail_on_error: no
strict: yes
'''
RETURN = '''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
items:
description: Returned only when multiple yaml documents are passed to src or resource_definition
returned: when resource_definition or src contains list of objects
type: list
duration:
description: elapsed time of task in seconds
returned: when C(wait) is true
type: int
sample: 48
'''
from ansible.module_utils.k8s.raw import KubernetesRawModule
def main():
KubernetesRawModule().execute_module()
if __name__ == '__main__':
main()

@ -1,180 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Will Thames <@willthames>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_info
short_description: Describe Kubernetes (K8s) objects
version_added: "2.7"
author:
- "Will Thames (@willthames)"
description:
- Use the OpenShift Python client to perform read operations on K8s objects.
- Access to the full range of K8s APIs.
- Authenticate using either a config file, certificates, password or token.
- Supports check mode.
- This module was called C(k8s_facts) before Ansible 2.9. The usage did not change.
options:
api_version:
description:
- Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a
specific object.
default: v1
aliases:
- api
- version
kind:
description:
- Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
specific object.
required: yes
name:
description:
- Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
specific object.
namespace:
description:
- Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name)
to identify a specific object.
label_selectors:
description: List of label selectors to use to filter results
field_selectors:
description: List of field selectors to use to filter results
extends_documentation_fragment:
- k8s_auth_options
requirements:
- "python >= 2.7"
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
- name: Get an existing Service object
k8s_info:
api_version: v1
kind: Service
name: web
namespace: testing
register: web_service
- name: Get a list of all service objects
k8s_info:
api_version: v1
kind: Service
namespace: testing
register: service_list
- name: Get a list of all pods from any namespace
k8s_info:
kind: Pod
register: pod_list
- name: Search for all Pods labelled app=web
k8s_info:
kind: Pod
label_selectors:
- app = web
- tier in (dev, test)
- name: Search for all running pods
k8s_info:
kind: Pod
field_selectors:
- status.phase=Running
'''
RETURN = '''
resources:
description:
- The object(s) that exists
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: dict
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: dict
status:
description: Current status details for the object.
returned: success
type: dict
'''
from ansible.module_utils.k8s.common import KubernetesAnsibleModule, AUTH_ARG_SPEC
import copy
class KubernetesInfoModule(KubernetesAnsibleModule):
def __init__(self, *args, **kwargs):
KubernetesAnsibleModule.__init__(self, *args,
supports_check_mode=True,
**kwargs)
if self._name == 'k8s_facts':
self.deprecate("The 'k8s_facts' module has been renamed to 'k8s_info'",
version='2.13', collection_name='ansible.builtin')
def execute_module(self):
self.client = self.get_api_client()
self.exit_json(changed=False,
**self.kubernetes_facts(self.params['kind'],
self.params['api_version'],
self.params['name'],
self.params['namespace'],
self.params['label_selectors'],
self.params['field_selectors']))
@property
def argspec(self):
args = copy.deepcopy(AUTH_ARG_SPEC)
args.update(
dict(
kind=dict(required=True),
api_version=dict(default='v1', aliases=['api', 'version']),
name=dict(),
namespace=dict(),
label_selectors=dict(type='list', default=[]),
field_selectors=dict(type='list', default=[]),
)
)
return args
def main():
KubernetesInfoModule().execute_module()
if __name__ == '__main__':
main()

@ -1,176 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
module: python_requirements_info
short_description: Show python path and assert dependency versions
description:
- Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
- This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
version_added: "2.7"
options:
dependencies:
description: >
A list of version-likes or module names to check for installation.
Supported operators: <, >, <=, >=, or ==. The bare module name like
I(ansible), the module with a specific version like I(boto3==1.6.1), or a
partial version like I(requests>2) are all valid specifications.
author:
- Will Thames (@willthames)
- Ryan Scott Brown (@ryansb)
'''
EXAMPLES = '''
- name: show python lib/site paths
python_requirements_info:
- name: check for modern boto3 and botocore versions
python_requirements_info:
dependencies:
- boto3>1.6
- botocore<2
'''
RETURN = '''
python:
description: path to python version used
returned: always
type: str
sample: /usr/local/opt/python@2/bin/python2.7
python_version:
description: version of python
returned: always
type: str
sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
python_system_path:
description: List of paths python is looking for modules in
returned: always
type: list
sample:
- /usr/local/opt/python@2/site-packages/
- /usr/lib/python/site-packages/
- /usr/lib/python/site-packages/
valid:
description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
returned: always
type: dict
sample:
boto3:
desired: null
installed: 1.7.60
botocore:
desired: botocore<2
installed: 1.10.60
mismatched:
description: A dictionary of dependencies that did not satisfy the desired version
returned: always
type: dict
sample:
botocore:
desired: botocore>2
installed: 1.10.60
not_found:
description: A list of packages that could not be imported at all, and are not installed
returned: always
type: list
sample:
- boto4
- requests
'''
import re
import sys
import operator
HAS_DISTUTILS = False
try:
import pkg_resources
from distutils.version import LooseVersion
HAS_DISTUTILS = True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
operations = {
'<=': operator.le,
'>=': operator.ge,
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
}
def main():
module = AnsibleModule(
argument_spec=dict(
dependencies=dict(type='list')
),
supports_check_mode=True,
)
if module._name == 'python_requirements_facts':
module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
version='2.13', collection_name='ansible.builtin')
if not HAS_DISTUTILS:
module.fail_json(
msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
python=sys.executable,
python_version=sys.version,
python_system_path=sys.path,
)
pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
results = dict(
not_found=[],
mismatched={},
valid={},
)
for dep in (module.params.get('dependencies') or []):
match = pkg_dep_re.match(dep)
if match is None:
module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
pkg, op, version = match.groups()
if op is not None and op not in operations:
module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
try:
existing = pkg_resources.get_distribution(pkg).version
except pkg_resources.DistributionNotFound:
# not there
results['not_found'].append(pkg)
continue
if op is None and version is None:
results['valid'][pkg] = {
'installed': existing,
'desired': None,
}
elif operations[op](LooseVersion(existing), LooseVersion(version)):
results['valid'][pkg] = {
'installed': existing,
'desired': dep,
}
else:
results['mismatched'] = {
'installed': existing,
'desired': dep,
}
module.exit_json(
python=sys.executable,
python_version=sys.version,
python_system_path=sys.path,
**results
)
if __name__ == '__main__':
main()

@ -1,180 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sts_assume_role
short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
description:
- Assume a role using AWS Security Token Service and obtain temporary credentials.
version_added: "2.0"
author:
- Boris Ekelchik (@bekelchik)
- Marek Piatek (@piontas)
options:
role_arn:
description:
- The Amazon Resource Name (ARN) of the role that the caller is
assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs).
required: true
type: str
role_session_name:
description:
- Name of the role's session - will be used by CloudTrail.
required: true
type: str
policy:
description:
- Supplemental policy to use in addition to assumed role's policies.
type: str
duration_seconds:
description:
- The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours).
- The max depends on the IAM role's sessions duration setting.
- By default, the value is set to 3600 seconds.
type: int
external_id:
description:
- A unique identifier that is used by third parties to assume a role in their customers' accounts.
type: str
mfa_serial_number:
description:
- The identification number of the MFA device that is associated with the user who is making the AssumeRole call.
type: str
mfa_token:
description:
- The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
type: str
notes:
- In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token.
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
- python >= 2.6
'''
RETURN = '''
sts_creds:
description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token
returned: always
type: dict
sample:
access_key: XXXXXXXXXXXXXXXXXXXX
expiration: 2017-11-11T11:11:11+00:00
secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
sts_user:
description: The Amazon Resource Name (ARN) and the assumed role ID
returned: always
type: dict
sample:
assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob
arn: ARO123EXAMPLE123:Bob
changed:
description: True if obtaining the credentials succeeds
type: bool
returned: always
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
- sts_assume_role:
role_arn: "arn:aws:iam::123456789012:role/someRole"
role_session_name: "someRoleSession"
register: assumed_role
# Use the assumed role above to tag an instance in account 123456789012
- ec2_tag:
aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
security_token: "{{ assumed_role.sts_creds.session_token }}"
resource: i-xyzxyz01
state: present
tags:
MyNewTag: value
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, ParamValidationError
except ImportError:
pass # caught by AnsibleAWSModule
def _parse_response(response):
credentials = response.get('Credentials', {})
user = response.get('AssumedRoleUser', {})
sts_cred = {
'access_key': credentials.get('AccessKeyId'),
'secret_key': credentials.get('SecretAccessKey'),
'session_token': credentials.get('SessionToken'),
'expiration': credentials.get('Expiration')
}
sts_user = camel_dict_to_snake_dict(user)
return sts_cred, sts_user
def assume_role_policy(connection, module):
params = {
'RoleArn': module.params.get('role_arn'),
'RoleSessionName': module.params.get('role_session_name'),
'Policy': module.params.get('policy'),
'DurationSeconds': module.params.get('duration_seconds'),
'ExternalId': module.params.get('external_id'),
'SerialNumber': module.params.get('mfa_serial_number'),
'TokenCode': module.params.get('mfa_token')
}
changed = False
kwargs = dict((k, v) for k, v in params.items() if v is not None)
try:
response = connection.assume_role(**kwargs)
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json_aws(e)
sts_cred, sts_user = _parse_response(response)
module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user)
def main():
argument_spec = dict(
role_arn=dict(required=True),
role_session_name=dict(required=True),
duration_seconds=dict(required=False, default=None, type='int'),
external_id=dict(required=False, default=None),
policy=dict(required=False, default=None),
mfa_serial_number=dict(required=False, default=None),
mfa_token=dict(required=False, default=None)
)
module = AnsibleAWSModule(argument_spec=argument_spec)
connection = module.client('sts')
assume_role_policy(connection, module)
if __name__ == '__main__':
main()
Loading…
Cancel
Save