Add test case for k8s cascading deletes (#55987)

* Add test case for non-cascading deletes

Deleting a DaemonSet does not delete associated pods,
even though it should

* Add coverage module when using pip

Otherwise tests seemingly fail
pull/57154/head
Will Thames 6 years ago committed by jctanner
parent 5008e1d479
commit ac1895453f

@ -1,15 +1,15 @@
recreate_crd_default_merge_expectation: recreate_crd is not failed recreate_crd_default_merge_expectation: recreate_crd is not failed
wait_pod_metadata: k8s_pod_metadata:
labels: labels:
app: "{{ wait_pod_name }}" app: "{{ k8s_pod_name }}"
wait_pod_spec: k8s_pod_spec:
containers: containers:
- image: "{{ wait_pod_image }}" - image: "{{ k8s_pod_image }}"
imagePullPolicy: Always imagePullPolicy: Always
name: "{{ wait_pod_name }}" name: "{{ k8s_pod_name }}"
command: "{{ wait_pod_command }}" command: "{{ k8s_pod_command }}"
readinessProbe: readinessProbe:
initialDelaySeconds: 15 initialDelaySeconds: 15
exec: exec:
@ -19,14 +19,14 @@ wait_pod_spec:
limits: limits:
cpu: "100m" cpu: "100m"
memory: "100Mi" memory: "100Mi"
ports: "{{ wait_pod_ports }}" ports: "{{ k8s_pod_ports }}"
wait_pod_command: [] k8s_pod_command: []
wait_pod_ports: [] k8s_pod_ports: []
wait_pod_template: k8s_pod_template:
metadata: "{{ wait_pod_metadata }}" metadata: "{{ k8s_pod_metadata }}"
spec: "{{ wait_pod_spec }}" spec: "{{ k8s_pod_spec }}"
k8s_openshift: yes k8s_openshift: yes

@ -0,0 +1,101 @@
- name: ensure that there are actually some nodes
k8s_facts:
kind: Node
register: nodes
- block:
- set_fact:
delete_namespace: delete
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ delete_namespace }}"
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: delete-daemonset
namespace: "{{ delete_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
k8s_pod_name: delete-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: check if pods exist
k8s_facts:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_create
- name: assert that there are pods
assert:
that:
- pods_create.resources
- name: remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
namespace: "{{ delete_namespace }}"
state: absent
wait: yes
- name: show status of pods
k8s_facts:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
- name: wait for background deletion
pause:
seconds: 30
- name: check if pods still exist
k8s_facts:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_delete
- name: assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ delete_namespace }}"
state: absent
when: (nodes.resources | length) > 0

@ -4,6 +4,7 @@
# Kubernetes resources # Kubernetes resources
- include_tasks: delete.yml
- include_tasks: waiter.yml - include_tasks: waiter.yml
- block: - block:

@ -12,10 +12,11 @@
- pip: - pip:
name: name:
- openshift==0.8.1 - openshift==0.8.8
- coverage
virtualenv: "{{ virtualenv }}" virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}" virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: yes virtualenv_site_packages: no
- include_tasks: validate_not_installed.yml - include_tasks: validate_not_installed.yml
vars: vars:
@ -29,11 +30,12 @@
- pip: - pip:
name: name:
- openshift==0.8.1 - openshift==0.8.8
- kubernetes-validate==1.12.0 - kubernetes-validate==1.12.0
- coverage
virtualenv: "{{ virtualenv }}" virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}" virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: yes virtualenv_site_packages: no
- include_tasks: validate_installed.yml - include_tasks: validate_installed.yml
vars: vars:
@ -50,9 +52,10 @@
name: name:
- openshift==0.6.0 - openshift==0.6.0
- kubernetes==6.0.0 - kubernetes==6.0.0
- coverage
virtualenv: "{{ virtualenv }}" virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}" virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: yes virtualenv_site_packages: no
- include_tasks: older_openshift_fail.yml - include_tasks: older_openshift_fail.yml
vars: vars:
@ -68,10 +71,11 @@
- pip: - pip:
name: name:
- openshift==0.8.1 - openshift==0.8.8
- coverage
virtualenv: "{{ virtualenv }}" virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}" virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: yes virtualenv_site_packages: no
- include_tasks: full_test.yml - include_tasks: full_test.yml
vars: vars:

@ -25,9 +25,6 @@
- "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg" - "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg"
- "'. This is required for append_hash.' in k8s_append_hash.msg" - "'. This is required for append_hash.' in k8s_append_hash.msg"
# merge_type
- include_tasks: crd.yml
# validate # validate
- name: attempt to use validate with older openshift - name: attempt to use validate with older openshift
k8s: k8s:

@ -1,3 +1,9 @@
- python_requirements_facts:
dependencies:
- openshift
- kubernetes
- kubernetes-validate
- k8s: - k8s:
definition: definition:
apiVersion: v1 apiVersion: v1

@ -6,6 +6,7 @@
- block: - block:
- set_fact: - set_fact:
wait_namespace: wait wait_namespace: wait
- name: ensure namespace exists - name: ensure namespace exists
k8s: k8s:
definition: definition:
@ -20,14 +21,14 @@
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: "{{ wait_pod_name }}" name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}" namespace: "{{ wait_namespace }}"
spec: "{{ wait_pod_spec }}" spec: "{{ k8s_pod_spec }}"
wait: yes wait: yes
vars: vars:
wait_pod_name: wait-pod k8s_pod_name: wait-pod
wait_pod_image: alpine:3.8 k8s_pod_image: alpine:3.8
wait_pod_command: k8s_pod_command:
- sleep - sleep
- "10000" - "10000"
register: wait_pod register: wait_pod
@ -49,13 +50,13 @@
spec: spec:
selector: selector:
matchLabels: matchLabels:
app: "{{ wait_pod_name }}" app: "{{ k8s_pod_name }}"
template: "{{ wait_pod_template }}" template: "{{ k8s_pod_template }}"
wait: yes wait: yes
wait_timeout: 180 wait_timeout: 180
vars: vars:
wait_pod_name: wait-ds k8s_pod_name: wait-ds
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:1 k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds register: ds
- name: check that daemonset wait worked - name: check that daemonset wait worked
@ -74,15 +75,15 @@
spec: spec:
selector: selector:
matchLabels: matchLabels:
app: "{{ wait_pod_name }}" app: "{{ k8s_pod_name }}"
updateStrategy: updateStrategy:
type: RollingUpdate type: RollingUpdate
template: "{{ wait_pod_template }}" template: "{{ k8s_pod_template }}"
wait: yes wait: yes
wait_timeout: 180 wait_timeout: 180
vars: vars:
wait_pod_name: wait-ds k8s_pod_name: wait-ds
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2 k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
register: update_ds_check_mode register: update_ds_check_mode
- name: check that check_mode returned changed - name: check that check_mode returned changed
@ -101,15 +102,15 @@
spec: spec:
selector: selector:
matchLabels: matchLabels:
app: "{{ wait_pod_name }}" app: "{{ k8s_pod_name }}"
updateStrategy: updateStrategy:
type: RollingUpdate type: RollingUpdate
template: "{{ wait_pod_template }}" template: "{{ k8s_pod_template }}"
wait: yes wait: yes
wait_timeout: 180 wait_timeout: 180
vars: vars:
wait_pod_name: wait-ds k8s_pod_name: wait-ds
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2 k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
register: ds register: ds
- name: get updated pods - name: get updated pods
@ -125,7 +126,7 @@
assert: assert:
that: that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- updated_ds_pods.resources[0].spec.containers[0].image.endswith(":2") - updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
- name: add a crashing pod - name: add a crashing pod
k8s: k8s:
@ -133,15 +134,15 @@
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: "{{ wait_pod_name }}" name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}" namespace: "{{ wait_namespace }}"
spec: "{{ wait_pod_spec }}" spec: "{{ k8s_pod_spec }}"
wait: yes wait: yes
wait_timeout: 30 wait_timeout: 30
vars: vars:
wait_pod_name: wait-crash-pod k8s_pod_name: wait-crash-pod
wait_pod_image: alpine:3.8 k8s_pod_image: alpine:3.8
wait_pod_command: k8s_pod_command:
- /bin/false - /bin/false
register: crash_pod register: crash_pod
ignore_errors: yes ignore_errors: yes
@ -157,14 +158,14 @@
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: "{{ wait_pod_name }}" name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}" namespace: "{{ wait_namespace }}"
spec: "{{ wait_pod_spec }}" spec: "{{ k8s_pod_spec }}"
wait: yes wait: yes
wait_timeout: 30 wait_timeout: 30
vars: vars:
wait_pod_name: wait-no-image-pod k8s_pod_name: wait-no-image-pod
wait_pod_image: i_made_this_up:and_this_too k8s_pod_image: i_made_this_up:and_this_too
register: no_image_pod register: no_image_pod
ignore_errors: yes ignore_errors: yes
@ -185,13 +186,13 @@
replicas: 3 replicas: 3
selector: selector:
matchLabels: matchLabels:
app: "{{ wait_pod_name }}" app: "{{ k8s_pod_name }}"
template: "{{ wait_pod_template }}" template: "{{ k8s_pod_template }}"
wait: yes wait: yes
vars: vars:
wait_pod_name: wait-deploy k8s_pod_name: wait-deploy
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:1 k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
wait_pod_ports: k8s_pod_ports:
- containerPort: 8080 - containerPort: 8080
name: http name: http
protocol: TCP protocol: TCP
@ -215,13 +216,13 @@
replicas: 3 replicas: 3
selector: selector:
matchLabels: matchLabels:
app: "{{ wait_pod_name }}" app: "{{ k8s_pod_name }}"
template: "{{ wait_pod_template }}" template: "{{ k8s_pod_template }}"
wait: yes wait: yes
vars: vars:
wait_pod_name: wait-deploy k8s_pod_name: wait-deploy
wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2 k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
wait_pod_ports: k8s_pod_ports:
- containerPort: 8080 - containerPort: 8080
name: http name: http
protocol: TCP protocol: TCP
@ -277,14 +278,14 @@
namespace: "{{ wait_namespace }}" namespace: "{{ wait_namespace }}"
spec: spec:
selector: selector:
app: "{{ wait_pod_name }}" app: "{{ k8s_pod_name }}"
ports: ports:
- port: 8080 - port: 8080
targetPort: 8080 targetPort: 8080
protocol: TCP protocol: TCP
wait: yes wait: yes
vars: vars:
wait_pod_name: wait-deploy k8s_pod_name: wait-deploy
register: service register: service
- name: assert that waiting for service works - name: assert that waiting for service works
@ -304,13 +305,13 @@
replicas: 3 replicas: 3
selector: selector:
matchLabels: matchLabels:
app: "{{ wait_pod_name }}" app: "{{ k8s_pod_name }}"
template: "{{ wait_pod_template }}" template: "{{ k8s_pod_template }}"
wait: yes wait: yes
vars: vars:
wait_pod_name: wait-crash-deploy k8s_pod_name: wait-crash-deploy
wait_pod_image: alpine:3.8 k8s_pod_image: alpine:3.8
wait_pod_command: k8s_pod_command:
- /bin/false - /bin/false
register: wait_crash_deploy register: wait_crash_deploy
ignore_errors: yes ignore_errors: yes

Loading…
Cancel
Save