Second batch of incidental integration tests. (#67765)

* Update incidental test aliases.

* Rewrite target references for renamed targets.

* Add incidental tests to CI.

* Update sanity tests for incidental cloud tests.

* Initial copy of incidental tests.

* Copy contrib files into test.

* Update paths in test.

* Add support plugins.

* Update plugin to work around missing deps.

* Update sanity ignores.

* Fix matrix entries.

* Remove debug echo.
pull/67772/head
Matt Clay 5 years ago committed by GitHub
parent a19ae28326
commit e3591223a0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -165,6 +165,9 @@ matrix:
- env: T=aws/2.7/4
- env: T=aws/3.6/4
- env: T=i/aws/2.7/1
- env: T=i/aws/3.6/1
- env: T=azure/2.7/1
- env: T=azure/3.6/1
@ -198,29 +201,42 @@ matrix:
- env: T=azure/2.7/11
- env: T=azure/3.6/11
- env: T=i/azure/2.7/1
- env: T=i/azure/3.6/1
- env: T=vcenter/2.7/1
- env: T=vcenter/3.6/1
- env: T=vcenter/2.7/2
- env: T=vcenter/3.6/2
- env: T=i/vcenter//1
- env: T=cs/2.7/1
- env: T=cs/3.6/1
- env: T=cs/2.7/2
- env: T=cs/3.6/2
- env: T=i/cs//1
- env: T=tower/2.7/1
- env: T=tower/3.6/1
- env: T=i/tower//1
- env: T=cloud/2.7/1
- env: T=cloud/3.6/1
- env: T=i/cloud//1
- env: T=hcloud/2.7/1
- env: T=hcloud/3.6/1
- env: T=hcloud/2.7/2
- env: T=hcloud/3.6/2
- env: T=i/hcloud//1
branches:
except:
- "*-patch-*"

@ -0,0 +1,2 @@
cloud/aws
shippable/aws/incidental

@ -0,0 +1,10 @@
---
# defaults file for aws_codebuild
# IAM role names have to be less than 64 characters
# The 8 digit identifier at the end of resource_prefix helps determine during
# which test something was created and allows tests to be run in parallel
# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
# we need both sets of digits to keep the resource name unique
unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
iam_role_name: "ansible-test-sts-{{ unique_id }}-codebuild-service-role"

@ -0,0 +1,12 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "codebuild.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}

@ -0,0 +1,119 @@
---
# tasks file for aws_codebuild
- name: Run aws_codebuild integration tests.
block:
# ==================== preparations ========================================
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
- name: create IAM role needed for CodeBuild
iam_role:
name: "{{ iam_role_name }}"
description: Role with permissions for CodeBuild actions.
assume_role_policy_document: "{{ lookup('file', 'codebuild_iam_trust_policy.json') }}"
state: present
<<: *aws_connection_info
register: codebuild_iam_role
- name: Set variable with aws account id
set_fact:
aws_account_id: "{{ codebuild_iam_role.iam_role.arn.split(':')[4] }}"
# ================== integration test ==========================================
- name: create CodeBuild project
aws_codebuild:
name: "{{ resource_prefix }}-test-ansible-codebuild"
description: Build project for testing the Ansible aws_codebuild module
service_role: "{{ codebuild_iam_role.iam_role.arn }}"
timeout_in_minutes: 30
source:
type: CODEPIPELINE
buildspec: ''
artifacts:
namespace_type: NONE
packaging: NONE
type: CODEPIPELINE
name: test
environment:
compute_type: BUILD_GENERAL1_SMALL
privileged_mode: true
image: 'aws/codebuild/docker:17.09.0'
type: LINUX_CONTAINER
environment_variables:
- { name: 'FOO_ENV', value: 'other' }
tags:
- { key: 'purpose', value: 'ansible-test' }
state: present
<<: *aws_connection_info
register: output
retries: 10
delay: 5
until: output is success
- assert:
that:
- "output.project.description == 'Build project for testing the Ansible aws_codebuild module'"
- name: idempotence check rerunning same Codebuild task
aws_codebuild:
name: "{{ resource_prefix }}-test-ansible-codebuild"
description: Build project for testing the Ansible aws_codebuild module
service_role: "{{ codebuild_iam_role.iam_role.arn }}"
timeout_in_minutes: 30
source:
type: CODEPIPELINE
buildspec: ''
artifacts:
namespace_type: NONE
packaging: NONE
type: CODEPIPELINE
name: test
encryption_key: 'arn:aws:kms:{{ aws_region }}:{{ aws_account_id }}:alias/aws/s3'
environment:
compute_type: BUILD_GENERAL1_SMALL
privileged_mode: true
image: 'aws/codebuild/docker:17.09.0'
type: LINUX_CONTAINER
environment_variables:
- { name: 'FOO_ENV', value: 'other' }
tags:
- { key: 'purpose', value: 'ansible-test' }
state: present
<<: *aws_connection_info
register: rerun_test_output
- assert:
that:
- "rerun_test_output.project.created == output.project.created"
- name: delete CodeBuild project
aws_codebuild:
name: "{{ output.project.name }}"
source:
type: CODEPIPELINE
buildspec: ''
artifacts: {}
state: absent
<<: *aws_connection_info
async: 300
# ============================== cleanup ======================================
always:
- name: cleanup IAM role created for CodeBuild test
iam_role:
name: "{{ iam_role_name }}"
state: absent
<<: *aws_connection_info

@ -0,0 +1,4 @@
# the random_num is generated in a set_fact task at the start of the testsuite
state_machine_name: "{{ resource_prefix }}_step_functions_state_machine_ansible_test_{{ random_num }}"
step_functions_role_name: "ansible-test-sts-{{ resource_prefix }}-step_functions-role"
execution_name: "{{ resource_prefix }}_sfn_execution"

@ -0,0 +1,15 @@
{
"StartAt": "HelloWorld",
"States": {
"HelloWorld": {
"Type": "Pass",
"Result": "Some other result",
"Next": "Wait"
},
"Wait": {
"Type": "Wait",
"Seconds": 30,
"End": true
}
}
}

@ -0,0 +1,10 @@
{
"StartAt": "HelloWorld",
"States": {
"HelloWorld": {
"Type": "Pass",
"Result": "Hello World!",
"End": true
}
}
}

@ -0,0 +1,12 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "states.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}

@ -0,0 +1,300 @@
---
- name: Integration test for AWS Step Function state machine module
module_defaults:
group/aws:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token | default(omit) }}"
region: "{{ aws_region }}"
block:
# ==== Setup ==================================================
- name: Create IAM service role needed for Step Functions
iam_role:
name: "{{ step_functions_role_name }}"
description: Role with permissions for AWS Step Functions actions.
assume_role_policy_document: "{{ lookup('file', 'state_machines_iam_trust_policy.json') }}"
state: present
register: step_functions_role
- name: Pause a few seconds to ensure IAM role is available to next task
pause:
seconds: 10
# ==== Tests ===================================================
- name: Create a random component for state machine name
set_fact:
random_num: "{{ 999999999 | random }}"
- name: Create a new state machine -- check_mode
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
definition: "{{ lookup('file','state_machine.json') }}"
role_arn: "{{ step_functions_role.iam_role.arn }}"
tags:
project: helloWorld
state: present
register: creation_check
check_mode: yes
- assert:
that:
- creation_check.changed == True
- creation_check.output == 'State machine would be created.'
- name: Create a new state machine
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
definition: "{{ lookup('file','state_machine.json') }}"
role_arn: "{{ step_functions_role.iam_role.arn }}"
tags:
project: helloWorld
state: present
register: creation_output
- assert:
that:
- creation_output.changed == True
- name: Pause a few seconds to ensure state machine role is available
pause:
seconds: 5
- name: Idempotent rerun of same state function -- check_mode
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
definition: "{{ lookup('file','state_machine.json') }}"
role_arn: "{{ step_functions_role.iam_role.arn }}"
tags:
project: helloWorld
state: present
register: result
check_mode: yes
- assert:
that:
- result.changed == False
- result.output == 'State is up-to-date.'
- name: Idempotent rerun of same state function
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
definition: "{{ lookup('file','state_machine.json') }}"
role_arn: "{{ step_functions_role.iam_role.arn }}"
tags:
project: helloWorld
state: present
register: result
- assert:
that:
- result.changed == False
- name: Update an existing state machine -- check_mode
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
definition: "{{ lookup('file','alternative_state_machine.json') }}"
role_arn: "{{ step_functions_role.iam_role.arn }}"
tags:
differentTag: different_tag
state: present
register: update_check
check_mode: yes
- assert:
that:
- update_check.changed == True
- "update_check.output == 'State machine would be updated: {{ creation_output.state_machine_arn }}'"
- name: Update an existing state machine
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
definition: "{{ lookup('file','alternative_state_machine.json') }}"
role_arn: "{{ step_functions_role.iam_role.arn }}"
tags:
differentTag: different_tag
state: present
register: update_output
- assert:
that:
- update_output.changed == True
- update_output.state_machine_arn == creation_output.state_machine_arn
- name: Start execution of state machine -- check_mode
aws_step_functions_state_machine_execution:
name: "{{ execution_name }}"
execution_input: "{}"
state_machine_arn: "{{ creation_output.state_machine_arn }}"
register: start_execution_output
check_mode: yes
- assert:
that:
- start_execution_output.changed == True
- "start_execution_output.output == 'State machine execution would be started.'"
- name: Start execution of state machine
aws_step_functions_state_machine_execution:
name: "{{ execution_name }}"
execution_input: "{}"
state_machine_arn: "{{ creation_output.state_machine_arn }}"
register: start_execution_output
- assert:
that:
- start_execution_output.changed
- "'execution_arn' in start_execution_output"
- "'start_date' in start_execution_output"
- name: Start execution of state machine (check for idempotency) (check mode)
aws_step_functions_state_machine_execution:
name: "{{ execution_name }}"
execution_input: "{}"
state_machine_arn: "{{ creation_output.state_machine_arn }}"
register: start_execution_output_idem_check
check_mode: yes
- assert:
that:
- not start_execution_output_idem_check.changed
- "start_execution_output_idem_check.output == 'State machine execution already exists.'"
- name: Start execution of state machine (check for idempotency)
aws_step_functions_state_machine_execution:
name: "{{ execution_name }}"
execution_input: "{}"
state_machine_arn: "{{ creation_output.state_machine_arn }}"
register: start_execution_output_idem
- assert:
that:
- not start_execution_output_idem.changed
- name: Stop execution of state machine -- check_mode
aws_step_functions_state_machine_execution:
action: stop
execution_arn: "{{ start_execution_output.execution_arn }}"
cause: "cause of the failure"
error: "error code of the failure"
register: stop_execution_output
check_mode: yes
- assert:
that:
- stop_execution_output.changed
- "stop_execution_output.output == 'State machine execution would be stopped.'"
- name: Stop execution of state machine
aws_step_functions_state_machine_execution:
action: stop
execution_arn: "{{ start_execution_output.execution_arn }}"
cause: "cause of the failure"
error: "error code of the failure"
register: stop_execution_output
- assert:
that:
- stop_execution_output.changed
- "'stop_date' in stop_execution_output"
- name: Stop execution of state machine (check for idempotency)
aws_step_functions_state_machine_execution:
action: stop
execution_arn: "{{ start_execution_output.execution_arn }}"
cause: "cause of the failure"
error: "error code of the failure"
register: stop_execution_output
- assert:
that:
- not stop_execution_output.changed
- name: Try stopping a non-running execution -- check_mode
aws_step_functions_state_machine_execution:
action: stop
execution_arn: "{{ start_execution_output.execution_arn }}"
cause: "cause of the failure"
error: "error code of the failure"
register: stop_execution_output
check_mode: yes
- assert:
that:
- not stop_execution_output.changed
- "stop_execution_output.output == 'State machine execution is not running.'"
- name: Try stopping a non-running execution
aws_step_functions_state_machine_execution:
action: stop
execution_arn: "{{ start_execution_output.execution_arn }}"
cause: "cause of the failure"
error: "error code of the failure"
register: stop_execution_output
check_mode: yes
- assert:
that:
- not stop_execution_output.changed
- name: Start execution of state machine with the same execution name
aws_step_functions_state_machine_execution:
name: "{{ execution_name }}"
state_machine_arn: "{{ creation_output.state_machine_arn }}"
register: start_execution_output_again
- assert:
that:
- not start_execution_output_again.changed
- name: Remove state machine -- check_mode
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
state: absent
register: deletion_check
check_mode: yes
- assert:
that:
- deletion_check.changed == True
- "deletion_check.output == 'State machine would be deleted: {{ creation_output.state_machine_arn }}'"
- name: Remove state machine
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
state: absent
register: deletion_output
- assert:
that:
- deletion_output.changed == True
- deletion_output.state_machine_arn == creation_output.state_machine_arn
- name: Non-existent state machine is absent
aws_step_functions_state_machine:
name: "non_existing_state_machine"
state: absent
register: result
- assert:
that:
- result.changed == False
# ==== Cleanup ====================================================
always:
- name: Cleanup - delete state machine
aws_step_functions_state_machine:
name: "{{ state_machine_name }}"
state: absent
ignore_errors: true
- name: Cleanup - delete IAM role needed for Step Functions test
iam_role:
name: "{{ step_functions_role_name }}"
state: absent
ignore_errors: true

@ -0,0 +1,3 @@
cloud/azure
shippable/azure/incidental
destructive

@ -0,0 +1,131 @@
- name: Fix resource prefix
set_fact:
fixed_resource_prefix: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- name: Fix resource prefix
set_fact:
funcapp_name_basic: "fa{{ fixed_resource_prefix }}basic"
funcapp_name_container: "fa{{ fixed_resource_prefix }}container"
funcapp_name_params: "fa{{ fixed_resource_prefix }}params"
storage_account_name: "sa{{ fixed_resource_prefix }}"
plan_name: "ap{{ fixed_resource_prefix }}"
- name: create storage account for function apps
azure_rm_storageaccount:
resource_group: '{{ resource_group }}'
name: "{{ storage_account_name }}"
account_type: Standard_LRS
- name: create basic function app
azure_rm_functionapp:
resource_group: "{{ resource_group }}"
name: "{{ funcapp_name_basic }}"
storage_account: "{{ storage_account_name }}"
register: output
- name: assert the function was created
assert:
that: output.changed
- name: list facts for function
azure_rm_functionapp_info:
resource_group: '{{ resource_group }}'
name: "{{ funcapp_name_basic }}"
register: results
- name: assert the facts were retrieved
assert:
that:
- results.ansible_info.azure_functionapps|length == 1
- results.ansible_info.azure_functionapps[0].name == "{{ funcapp_name_basic }}"
- name: delete basic function app
azure_rm_functionapp:
resource_group: '{{ resource_group }}'
name: "{{ funcapp_name_basic }}"
state: absent
register: output
- name: assert the function was deleted
assert:
that: output.changed
- name: create a function with app settings
azure_rm_functionapp:
resource_group: '{{ resource_group }}'
name: "{{ funcapp_name_params }}"
storage_account: "{{ storage_account_name }}"
app_settings:
hello: world
things: more stuff
FUNCTIONS_EXTENSION_VERSION: "~2"
register: output
- name: assert the function with app settings was created
assert:
that: output.changed
- name: change app settings
azure_rm_functionapp:
resource_group: '{{ resource_group }}'
name: "{{ funcapp_name_params }}"
storage_account: "{{ storage_account_name }}"
app_settings:
hello: world
things: more stuff
FUNCTIONS_EXTENSION_VERSION: "~2"
another: one
register: output
- name: assert the function was changed
assert:
that: output.changed
- name: delete the function app
azure_rm_functionapp:
resource_group: '{{ resource_group }}'
name: "{{ funcapp_name_params }}"
state: absent
register: output
- name: assert the function was deleted
assert:
that: output.changed
- name: Create a linux app service plan
azure_rm_appserviceplan:
resource_group: "{{ resource_group }}"
name: "{{ plan_name }}"
sku: S1
is_linux: true
number_of_workers: 1
- name: "Create azure function app {{ function_app }}"
azure_rm_functionapp:
resource_group: "{{ resource_group }}"
name: "{{ funcapp_name_container }}"
storage_account: "{{ storage_account_name }}"
plan:
resource_group: "{{ resource_group }}"
name: "{{ plan_name }}"
container_settings:
name: httpd
app_settings:
FUNCTIONS_EXTENSION_VERSION: "~2"
register: output
- name: assert the function was changed
assert:
that: output.changed
- name: delete the function app
azure_rm_functionapp:
resource_group: '{{ resource_group }}'
name: "{{ funcapp_name_container }}"
state: absent
- name: delete storage account
azure_rm_storageaccount:
resource_group: '{{ resource_group }}'
name: "{{ storage_account_name }}"
state: absent

@ -0,0 +1,3 @@
cloud/azure
destructive
shippable/azure/incidental

@ -0,0 +1,640 @@
- name: Prepare random number
set_fact:
rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
run_once: yes
- name: Create instance of MariaDB Server -- check mode
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}
sku:
name: B_Gen5_1
tier: Basic
location: westus2
storage_mb: 51200
version: 10.2
enforce_ssl: True
admin_username: zimxyz
admin_password: Testpasswordxyz12!
check_mode: yes
register: output
- name: Assert the resource instance is well created
assert:
that:
- output.changed
- name: Create instance of MariaDB Server
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}
sku:
name: B_Gen5_1
tier: Basic
location: westus2
storage_mb: 51200
version: 10.2
enforce_ssl: True
admin_username: zimxyz
admin_password: Testpasswordxyz12!
register: output
- name: Assert the resource instance is well created
assert:
that:
- output.changed
- output.state == 'Ready'
- name: Create again instance of MariaDB Server
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}
sku:
name: B_Gen5_1
tier: Basic
location: westus2
storage_mb: 51200
version: 10.2
enforce_ssl: True
admin_username: zimxyz
admin_password: Testpasswordxyz12!
register: output
- name: Assert the state has not changed
assert:
that:
- output.changed == false
- output.state == 'Ready'
- name: Update instance of MariaDB Server, change storage size
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}
sku:
name: B_Gen5_1
tier: Basic
location: westus2
storage_mb: 128000
version: 10.2
enforce_ssl: True
admin_username: zimxyz
admin_password: Testpasswordxyz12!
register: output
- name: Assert the state has not changed
assert:
that:
- output.changed
- output.state == 'Ready'
- debug:
var: output
- name: Gather facts MariaDB Server
azure_rm_mariadbserver_facts:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}
register: output
- name: Assert that storage size is correct
assert:
that:
- output.servers[0]['storage_mb'] == 128000
- name: Create second instance of MariaDB Server
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}second
sku:
name: B_Gen5_1
tier: Basic
location: westus2
storage_mb: 51200
version: 10.2
enforce_ssl: True
admin_username: zimxyz
admin_password: Testpasswordxyz12!
tags:
aaa: bbb
- name: Create second instance of MariaDB Server
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}second
sku:
name: B_Gen5_1
tier: Basic
location: westus2
storage_mb: 51200
version: 10.2
enforce_ssl: True
admin_username: zimxyz
admin_password: Testpasswordxyz12!
tags:
ccc: ddd
- name: Gather facts MariaDB Server
azure_rm_mariadbserver_facts:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}second
register: output
- name: Assert that facts are returned
assert:
that:
- output.changed == False
- output.servers[0]['id'] != None
- output.servers[0]['name'] != None
- output.servers[0]['location'] != None
- output.servers[0]['sku']['name'] != None
- output.servers[0]['sku']['tier'] != None
- output.servers[0]['sku']['capacity'] != None
- output.servers[0]['version'] != None
- output.servers[0]['user_visible_state'] != None
- output.servers[0]['fully_qualified_domain_name'] != None
- output.servers[0]['tags']['aaa'] == 'bbb'
- output.servers[0]['tags']['ccc'] == 'ddd'
- name: Gather facts MariaDB Server
azure_rm_mariadbserver_facts:
resource_group: "{{ resource_group }}"
register: output
- name: Assert that facts are returned
assert:
that:
- output.changed == False
- output.servers[0]['id'] != None
- output.servers[0]['name'] != None
- output.servers[0]['location'] != None
- output.servers[0]['sku']['name'] != None
- output.servers[0]['sku']['tier'] != None
- output.servers[0]['sku']['capacity'] != None
- output.servers[0]['version'] != None
- output.servers[0]['user_visible_state'] != None
- output.servers[0]['fully_qualified_domain_name'] != None
- output.servers[1]['id'] != None
- output.servers[1]['name'] != None
- output.servers[1]['location'] != None
- output.servers[1]['sku']['name'] != None
- output.servers[1]['sku']['tier'] != None
- output.servers[1]['sku']['capacity'] != None
- output.servers[1]['version'] != None
- output.servers[1]['user_visible_state'] != None
- output.servers[1]['fully_qualified_domain_name'] != None
#
# azure_rm_mariadbdatabase tests below
#
- name: Create instance of MariaDB Database -- check mode
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
check_mode: yes
register: output
- name: Assert the resource instance is well created
assert:
that:
- output.changed
- name: Create instance of MariaDB Database
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
collation: latin1_swedish_ci
charset: latin1
register: output
- name: Assert the resource instance is well created
assert:
that:
- output.changed
- output.name == 'testdatabase'
- name: Create again instance of MariaDB Database
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
collation: latin1_swedish_ci
charset: latin1
register: output
- name: Assert the state has not changed
assert:
that:
- output.changed == false
- output.name == 'testdatabase'
- name: Try to update database without force_update
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
collation: latin1_czech_ci
charset: latin1
ignore_errors: yes
register: output
- name: Assert that nothing has changed
assert:
that:
- output.changed == False
- name: Update instance of database using force_update
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
collation: latin1_czech_ci
charset: latin1
force_update: yes
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- output.name == 'testdatabase'
- name: Create second instance of MariaDB Database
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase2
- name: Gather facts MariaDB Database
azure_rm_mariadbdatabase_facts:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
register: output
- name: Assert that facts are returned
assert:
that:
- output.changed == False
- output.databases[0]['server_name'] != None
- output.databases[0]['name'] != None
- output.databases[0]['charset'] != None
- output.databases[0]['collation'] != None
- name: Gather facts MariaDB Database
azure_rm_mariadbdatabase_facts:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
register: output
- name: Assert that facts are returned
assert:
that:
- output.changed == False
- output.databases[0]['server_name'] != None
- output.databases[0]['name'] != None
- output.databases[0]['charset'] != None
- output.databases[0]['collation'] != None
- output.databases[1]['server_name'] != None
- output.databases[1]['name'] != None
- output.databases[1]['charset'] != None
- output.databases[1]['collation'] != None
- name: Delete instance of MariaDB Database -- check mode
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
state: absent
check_mode: yes
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Delete instance of MariaDB Database
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
state: absent
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Delete unexisting instance of MariaDB Database
azure_rm_mariadbdatabase:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: testdatabase
state: absent
register: output
- name: Assert the state has changed
assert:
that:
- output.changed == false
#
# azure_rm_firewallrule tests below
#
- name: Create instance of Firewall Rule -- check mode
azure_rm_mariadbfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
check_mode: yes
register: output
- name: Assert the resource instance is well created
assert:
that:
- output.changed
- name: Create instance of Firewall Rule
azure_rm_mariadbfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
register: output
- name: Assert the resource instance is well created
assert:
that:
- output.changed
- name: Create again instance of Firewall Rule
azure_rm_mariadbfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
register: output
- name: Assert the state has not changed
assert:
that:
- output.changed == false
- name: Delete instance of Firewall Rule -- check mode
azure_rm_mariadbfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
state: absent
check_mode: yes
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Create instance of Firewall Rule -- second
azure_rm_mariadbfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}second
start_ip_address: 172.28.10.136
end_ip_address: 172.28.10.138
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Gather facts MariaDB Firewall Rule
azure_rm_mariadbfirewallrule_facts:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
register: output
- name: Assert that facts are returned
assert:
that:
- output.changed == False
- output.rules[0].id != None
- output.rules[0].server_name != None
- output.rules[0].name != None
- output.rules[0].start_ip_address != None
- output.rules[0].end_ip_address != None
- "output.rules | length == 1"
- name: Gather facts MariaDB Firewall Rule
azure_rm_mariadbfirewallrule_facts:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
register: output
- name: Assert that facts are returned
assert:
that:
- output.changed == False
- output.rules[0].id != None
- output.rules[0].server_name != None
- output.rules[0].name != None
- output.rules[0].start_ip_address != None
- output.rules[0].end_ip_address != None
- output.rules[1].id != None
- output.rules[1].name != None
- output.rules[1].start_ip_address != None
- output.rules[1].end_ip_address != None
- "output.rules | length == 2"
- name: Delete instance of Firewall Rule
azure_rm_mariadbfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
state: absent
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Delete unexisting instance of Firewall Rule
azure_rm_mariadbfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
state: absent
register: output
- name: Assert the state has changed
assert:
that:
- output.changed == false
- name: Delete instance of Firewall Rule - second
azure_rm_mariadbfirewallrule:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}second
state: absent
- name: Gather facts MariaDB Firewall Rule
azure_rm_mariadbfirewallrule_facts:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: firewallrule{{ rpfx }}
register: output
- name: Assert that empty list was returned
assert:
that:
- output.changed == False
- "output.rules | length == 0"
#
# configuration
#
- name: Create instance of Configuration -- check mode
azure_rm_mariadbconfiguration:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: event_scheduler
value: "ON"
check_mode: yes
register: output
- name: Assert that change was registered
assert:
that:
- output.changed
- name: Try to delete default configuraion
azure_rm_mariadbconfiguration_facts:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: event_scheduler
register: output
- name: Get facts of event_scheduler
debug:
var: output
- name: Try to delete default configuraion
azure_rm_mariadbconfiguration:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: event_scheduler
state: absent
register: output
- name: Assert that change was registered
assert:
that:
- not output.changed
- name: Try to change default configuraion
azure_rm_mariadbconfiguration:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: event_scheduler
value: "ON"
register: output
- name: Assert that change was registered
assert:
that:
- output.changed
- name: Try to change default configuration -- idempotent
azure_rm_mariadbconfiguration:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: event_scheduler
value: "ON"
register: output
- name: Assert that change was registered
assert:
that:
- not output.changed
- name: Try to reset configuration
azure_rm_mariadbconfiguration:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: event_scheduler
state: absent
register: output
- name: Assert that change was registered
assert:
that:
- output.changed
- name: Try to reset configuration -- idempotent
azure_rm_mariadbconfiguration:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: event_scheduler
state: absent
register: output
- name: Assert that change was registered
assert:
that:
- not output.changed
- name: Gather facts MariaDB Configuration
azure_rm_mariadbconfiguration_facts:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
name: event_scheduler
register: output
- name: Assert that facts are returned
assert:
that:
- output.changed == False
- output.settings[0].id != None
- output.settings[0].name != None
- output.settings[0].value != None
- output.settings[0].description != None
- output.settings[0].source != None
- output.settings | length == 1
- name: Gather facts MariaDB Configuration
azure_rm_mariadbconfiguration_facts:
resource_group: "{{ resource_group }}"
server_name: mariadbsrv{{ rpfx }}
register: output
- name: Assert that facts are returned
assert:
that:
- output.changed == False
- output.settings[0].id != None
- output.settings[0].name != None
- output.settings[0].value != None
- output.settings[0].description != None
- output.settings[0].source != None
- output.settings | length > 1
#
# clean up azure_rm_mariadbserver test
#
- name: Delete instance of MariaDB Server -- check mode
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}
state: absent
check_mode: yes
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Delete instance of MariaDB Server
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}
state: absent
register: output
- name: Assert the state has changed
assert:
that:
- output.changed
- name: Delete unexisting instance of MariaDB Server
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}
state: absent
register: output
- name: Assert the state has changed
assert:
that:
- output.changed == false
- name: Delete second instance of MariaDB Server
azure_rm_mariadbserver:
resource_group: "{{ resource_group }}"
name: mariadbsrv{{ rpfx }}second
state: absent
async: 400
poll: 0

@ -0,0 +1,3 @@
cloud/azure
destructive
shippable/azure/incidental

@ -0,0 +1,158 @@
- name: Prepare random number
set_fact:
nsgname: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
storageaccountname: "stacc{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
dbname: "mdb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
run_once: yes
- name: Call REST API
azure_rm_resource:
api_version: '2018-02-01'
resource_group: "{{ resource_group }}"
provider: network
resource_type: networksecuritygroups
resource_name: "{{ nsgname }}"
body:
location: eastus
idempotency: yes
register: output
- name: Assert that something has changed
assert:
that: output.changed
- name: Call REST API
azure_rm_resource:
api_version: '2018-02-01'
resource_group: "{{ resource_group }}"
provider: network
resource_type: networksecuritygroups
resource_name: "{{ nsgname }}"
body:
location: eastus
idempotency: yes
register: output
- name: Assert that nothing has changed
assert:
that: not output.changed
- name: Call REST API
azure_rm_resource:
api_version: '2018-02-01'
resource_group: "{{ resource_group }}"
provider: network
resource_type: networksecuritygroups
resource_name: "{{ nsgname }}"
body:
location: eastus
tags:
a: "abc"
b: "cde"
idempotency: yes
register: output
- name: Assert that something has changed
assert:
that: output.changed
- name: Try to get information about account
azure_rm_resource_facts:
api_version: '2018-02-01'
resource_group: "{{ resource_group }}"
provider: network
resource_type: networksecuritygroups
resource_name: "{{ nsgname }}"
register: output
- name: Assert value was returned
assert:
that:
- not output.changed
- output.response[0]['name'] != None
- output.response | length == 1
- name: Try to query a list
azure_rm_resource_facts:
api_version: '2018-02-01'
resource_group: "{{ resource_group }}"
provider: network
resource_type: networksecuritygroups
register: output
- name: Assert value was returned
assert:
that:
- not output.changed
- output.response[0]['name'] != None
- output.response | length >= 1
- name: Try to query a list - same without API version
azure_rm_resource_facts:
resource_group: "{{ resource_group }}"
provider: network
resource_type: networksecuritygroups
register: output
- name: Assert value was returned
assert:
that:
- not output.changed
- output.response[0]['name'] != None
- output.response | length >= 1
- name: Query all the resources in the resource group
azure_rm_resource_facts:
resource_group: "{{ resource_group }}"
resource_type: resources
register: output
- name: Assert value was returned
assert:
that:
- not output.changed
- output.response | length >= 1
- name: Create storage account that requires LRO polling
azure_rm_resource:
polling_timeout: 600
polling_interval: 60
api_version: '2018-07-01'
resource_group: "{{ resource_group }}"
provider: Storage
resource_type: storageAccounts
resource_name: "{{ storageaccountname }}"
body:
sku:
name: Standard_GRS
kind: Storage
location: eastus
register: output
- name: Assert that storage was successfully created
assert:
that: "output['response']['name'] == '{{ storageaccountname }}'"
- name: Try to storage keys -- special case when subresource part has no name
azure_rm_resource:
resource_group: "{{ resource_group }}"
provider: storage
resource_type: storageAccounts
resource_name: "{{ storageaccountname }}"
subresource:
- type: listkeys
api_version: '2018-03-01-preview'
method: POST
register: keys
- name: Assert that key was returned
assert:
that: keys['response']['keys'][0]['value'] | length > 0
- name: Delete storage - without API version
azure_rm_resource:
polling_timeout: 600
polling_interval: 60
method: DELETE
resource_group: "{{ resource_group }}"
provider: Storage
resource_type: storageAccounts
resource_name: "{{ storageaccountname }}"

@ -0,0 +1,3 @@
cloud/azure
shippable/azure/incidental
destructive

@ -0,0 +1,434 @@
- name: Fix resource prefix
set_fact:
linux_app_plan_resource_group: "{{ resource_group_secondary }}"
win_app_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}{{ 1000 | random}}winapp"
win_plan_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}winplan"
linux_plan_name: "{{ (resource_group_secondary | replace('-','x'))[-8:] }}linplan"
slot1_name: "stage1"
- name: Create a windows web app with non-exist app service plan
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}1"
plan:
resource_group: "{{ resource_group }}"
name: "{{ win_plan_name }}"
is_linux: false
sku: S1
- name: Create a windows web app with existing app service plan
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}2"
plan: "{{ win_plan_name }}"
register: output
- name: stop the web app
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}2"
plan: "{{ win_plan_name }}"
app_state: stopped
register: output
- name: assert output changed
assert:
that:
output.changed
# enable after webapp_facts merged
# - name: get the web app
# azure_rm_webapp_facts:
# resource_group: "{{ resource_group }}"
# name: "{{ win_app_name }}2"
# register: stopped
# - name: assert web app is stopped
# assert:
# that:
# - stopped.properties.state == "Stopped"
- name: Create a windows web app with existing app service plan, try to update some root level params
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}3"
plan: "{{ win_plan_name }}"
dns_registration: true
https_only: true
tags:
testwebapptag: test
register: output
- name: get web app with resource group and tag
azure_rm_webapp_facts:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}3"
tags:
- testwebapptag
register: output
- assert:
that:
- output.webapps | length == 1
- name: Create a win web app with java run time specific
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}4"
plan: "{{ win_plan_name }}"
frameworks:
- name: "java"
version: "1.8"
settings:
java_container: "Tomcat"
java_container_version: "8.0"
app_settings:
testkey: "testvalue"
register: output
- name: assert the web app was created
assert:
that: output.changed
- name: get web app with name
azure_rm_webapp_facts:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}4"
register: output
- assert:
that:
- output.webapps | length == 1
- output.webapps[0].app_settings | length == 1
- output.webapps[0].frameworks | length > 1 # there's default frameworks eg net_framework
- name: Update app settings and framework
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}4"
plan: "{{ win_plan_name }}"
frameworks:
- name: "java"
version: "1.7"
settings:
java_container: "Tomcat"
java_container_version: "8.5"
app_settings:
testkey2: "testvalue2"
register: output
- name: Assert the web app was updated
assert:
that:
- output.changed
- name: get web app with name
azure_rm_webapp_facts:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}4"
register: output
- name: Assert updating
assert:
that:
- output.webapps[0].app_settings | length == 2
- output.webapps[0].app_settings['testkey'] == 'testvalue'
- output.webapps[0].app_settings['testkey2'] == 'testvalue2'
- name: get web app with return publishing profile
azure_rm_webapp_facts:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}4"
return_publish_profile: true
register: output
- assert:
that:
- output.webapps | length == 1
- output.webapps[0].publishing_username != ""
- output.webapps[0].publishing_password != ""
- name: Purge all existing app settings
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}4"
plan: "{{ win_plan_name }}"
purge_app_settings: true
register: output
- name: Assert the web app was updated
assert:
that: output.changed
- name: Create a win web app with python run time and php run time
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}5"
plan: "{{ win_plan_name }}"
frameworks:
- name: "python"
version: "2.7"
- name: node
version: "6.6"
- name: "php"
version: "7.0"
register: output
- name: Assert the web app was created
assert:
that: output.changed
- name: Create a docker web app with some app settings
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}6"
plan:
resource_group: "{{ linux_app_plan_resource_group }}"
name: "{{ linux_plan_name }}"
is_linux: true
sku: S1
number_of_workers: 1
container_settings:
name: "ansible/ansible:ubuntu1404"
register: output
- name: Assert the web app was created
assert:
that: output.changed
- name: Create a docker web app with private acr registry
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}7"
plan:
resource_group: "{{ linux_app_plan_resource_group }}"
name: "{{ linux_plan_name }}"
container_settings:
name: "ansible/ansible:ubuntu1404"
registry_server_url: test.io
registry_server_user: user
registry_server_password: password
register: output
- name: Assert the web app was created
assert:
that: output.changed
- name: Create a linux web app with nodejs framework
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}8"
plan:
resource_group: "{{ linux_app_plan_resource_group }}"
name: "{{ linux_plan_name }}"
frameworks:
- name: node
version: "6.6"
register: output
- name: Should be idempotent with linux web app created
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}8"
plan:
resource_group: "{{ linux_app_plan_resource_group }}"
name: "{{ linux_plan_name }}"
frameworks:
- name: node
version: "6.6"
register: output
- assert:
that: not output.changed
- name: Update nodejs framework
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}8"
plan:
resource_group: "{{ linux_app_plan_resource_group }}"
name: "{{ linux_plan_name }}"
frameworks:
- name: node
version: "6.9"
register: output
- name: Assert the web app was created
assert:
that: output.changed
- name: Create a linux web app with deployment source github
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}10"
plan:
resource_group: "{{ linux_app_plan_resource_group }}"
name: "{{ linux_plan_name }}"
deployment_source:
url: "https://github.com/test/test"
branch: master
scm_type: GitHub
register: output
- name: Assert the web app was created
assert:
that: output.changed
- name: Delete web app
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}8"
state: absent
register: output
- name: Assert the web app was deleted
assert:
that: output.changed
- name: assert error that java is mutually exclusive with frameworks
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}11"
plan: "{{ win_plan_name }}"
frameworks:
- name: "python"
version: "2.7"
- name: "java"
version: "1.8"
register: fail_win_java_version_mutual_exclusive
failed_when: 'fail_win_java_version_mutual_exclusive.msg != "Java is mutually exclusive with other frameworks."'
- name: assert error when linux web app, only can specify one framework
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ linux_plan_name }}12"
plan:
resource_group: "{{ linux_app_plan_resource_group }}"
name: "{{ linux_plan_name }}"
frameworks:
- name: "python"
version: "2.7"
- name: "node"
version: "6.6"
register: fail_linux_one_framework_only
failed_when: fail_linux_one_framework_only.msg != "Can specify one framework only for Linux web app."
- name: Create a linux web app with java tomcat container
azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}13"
plan:
resource_group: "{{ linux_app_plan_resource_group }}"
name: "{{ linux_plan_name }}"
frameworks:
- name: java
version: "8"
settings:
java_container: "tomcat"
java_container_version: "8.5"
register: output
- name: Assert the web app was created
assert:
that: output.changed
- name: Get facts with publish profile
azure_rm_webapp_facts:
resource_group: "{{ resource_group }}"
name: "{{ win_app_name }}13"
no_log: true
register: facts
- name: Assert publish profile returned
assert:
that:
- facts.webapps[0].ftp_publish_url != ''
- name: Create a webapp slot (Check mode)
azure_rm_webappslot:
resource_group: "{{ resource_group }}"
webapp_name: "{{ win_app_name }}13"
name: "{{ slot1_name }}"
configuration_source: "{{ win_app_name }}13"
app_settings:
testkey: testvalue
check_mode: yes
register: output
- name: Assert slot check mode creation
assert:
that:
- output.changed
- name: Create a webapp slot
azure_rm_webappslot:
resource_group: "{{ resource_group }}"
webapp_name: "{{ win_app_name }}13"
name: "{{ slot1_name }}"
configuration_source: "{{ win_app_name }}13"
app_settings:
testkey: testvalueslot
register: output
- name: Assert slot creation
assert:
that:
- output.changed
- name: Update webapp slot (idempotence)
azure_rm_webappslot:
resource_group: "{{ resource_group }}"
webapp_name: "{{ win_app_name }}13"
name: "{{ slot1_name }}"
app_settings:
testkey: testvalueslot
register: output
- name: Assert idempotence
assert:
that:
- not output.changed
- name: Update webapp slot
azure_rm_webappslot:
resource_group: "{{ resource_group }}"
webapp_name: "{{ win_app_name }}13"
name: "{{ slot1_name }}"
frameworks:
- name: "node"
version: "10.1"
app_settings:
testkey: testvalue2
register: output
- name: Assert updating
assert:
that:
- output.changed
- name: Swap webapp slot
azure_rm_webappslot:
resource_group: "{{ resource_group }}"
webapp_name: "{{ win_app_name }}13"
name: "{{ slot1_name }}"
swap:
action: swap
register: output
- name: Assert swap
assert:
that:
- output.changed
- name: Stop webapp slot
azure_rm_webappslot:
resource_group: "{{ resource_group }}"
webapp_name: "{{ win_app_name }}13"
name: "{{ slot1_name }}"
app_state: stopped
register: output
- name: Assert stopped
assert:
that:
- output.changed

@ -0,0 +1,2 @@
cloud/aws
shippable/aws/incidental

@ -0,0 +1,8 @@
stack_name: "{{ resource_prefix }}"
vpc_name: '{{ resource_prefix }}-vpc'
vpc_seed: '{{ resource_prefix }}'
vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'

@ -0,0 +1,37 @@
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Create an Amazon EC2 instance.",
"Parameters" : {
"InstanceType" : {
"Description" : "EC2 instance type",
"Type" : "String",
"Default" : "t3.nano",
"AllowedValues" : [ "t3.micro", "t3.nano"]
},
"ImageId" : {
"Type" : "String"
},
"SubnetId" : {
"Type" : "String"
}
},
"Resources" : {
"EC2Instance" : {
"Type" : "AWS::EC2::Instance",
"Properties" : {
"InstanceType" : { "Ref" : "InstanceType" },
"ImageId" : { "Ref" : "ImageId" },
"SubnetId": { "Ref" : "SubnetId" }
}
}
},
"Outputs" : {
"InstanceId" : {
"Value" : { "Ref" : "EC2Instance" }
}
}
}

@ -0,0 +1,463 @@
---
- module_defaults:
group/aws:
aws_access_key: '{{ aws_access_key | default(omit) }}'
aws_secret_key: '{{ aws_secret_key | default(omit) }}'
security_token: '{{ security_token | default(omit) }}'
region: '{{ aws_region | default(omit) }}'
block:
# ==== Env setup ==========================================================
- name: list available AZs
aws_az_info:
register: region_azs
- name: pick an AZ for testing
set_fact:
availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
- name: Create a test VPC
ec2_vpc_net:
name: "{{ vpc_name }}"
cidr_block: "{{ vpc_cidr }}"
tags:
Name: Cloudformation testing
register: testing_vpc
- name: Create a test subnet
ec2_vpc_subnet:
vpc_id: "{{ testing_vpc.vpc.id }}"
cidr: "{{ subnet_cidr }}"
az: "{{ availability_zone }}"
register: testing_subnet
- name: Find AMI to use
ec2_ami_info:
owners: 'amazon'
filters:
name: '{{ ec2_ami_name }}'
register: ec2_amis
- name: Set fact with latest AMI
vars:
latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
set_fact:
ec2_ami_image: '{{ latest_ami.image_id }}'
# ==== Cloudformation tests ===============================================
# 1. Basic stack creation (check mode, actual run and idempotency)
# 2. Tags
# 3. cloudformation_info tests (basic + all_facts)
# 4. termination_protection
# 5. create_changeset + changeset_name
# There is still scope to add tests for -
# 1. capabilities
# 2. stack_policy
# 3. on_create_failure (covered in unit tests)
# 4. Passing in a role
# 5. nested stacks?
- name: create a cloudformation stack (check mode)
cloudformation:
stack_name: "{{ stack_name }}"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
check_mode: yes
- name: check task return attributes
assert:
that:
- cf_stack.changed
- "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg"
- name: create a cloudformation stack
cloudformation:
stack_name: "{{ stack_name }}"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
- name: check task return attributes
assert:
that:
- cf_stack.changed
- "'events' in cf_stack"
- "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output"
- "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
- "'stack_resources' in cf_stack"
- name: create a cloudformation stack (check mode) (idempotent)
cloudformation:
stack_name: "{{ stack_name }}"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
check_mode: yes
- name: check task return attributes
assert:
that:
- not cf_stack.changed
- name: create a cloudformation stack (idempotent)
cloudformation:
stack_name: "{{ stack_name }}"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
- name: check task return attributes
assert:
that:
- not cf_stack.changed
- "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output"
- "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
- "'stack_resources' in cf_stack"
- name: get stack details
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
- name: assert stack info
assert:
that:
- "'cloudformation' in stack_info"
- "stack_info.cloudformation | length == 1"
- "stack_name in stack_info.cloudformation"
- "'stack_description' in stack_info.cloudformation[stack_name]"
- "'stack_outputs' in stack_info.cloudformation[stack_name]"
- "'stack_parameters' in stack_info.cloudformation[stack_name]"
- "'stack_tags' in stack_info.cloudformation[stack_name]"
- "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
- name: get stack details (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "'cloudformation' in stack_info"
- "stack_info.cloudformation | length == 1"
- "stack_name in stack_info.cloudformation"
- "'stack_description' in stack_info.cloudformation[stack_name]"
- "'stack_outputs' in stack_info.cloudformation[stack_name]"
- "'stack_parameters' in stack_info.cloudformation[stack_name]"
- "'stack_tags' in stack_info.cloudformation[stack_name]"
- "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
- name: get stack details (all_facts)
cloudformation_info:
stack_name: "{{ stack_name }}"
all_facts: yes
register: stack_info
- name: assert stack info
assert:
that:
- "'stack_events' in stack_info.cloudformation[stack_name]"
- "'stack_policy' in stack_info.cloudformation[stack_name]"
- "'stack_resource_list' in stack_info.cloudformation[stack_name]"
- "'stack_resources' in stack_info.cloudformation[stack_name]"
- "'stack_template' in stack_info.cloudformation[stack_name]"
- name: get stack details (all_facts) (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
all_facts: yes
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "'stack_events' in stack_info.cloudformation[stack_name]"
- "'stack_policy' in stack_info.cloudformation[stack_name]"
- "'stack_resource_list' in stack_info.cloudformation[stack_name]"
- "'stack_resources' in stack_info.cloudformation[stack_name]"
- "'stack_template' in stack_info.cloudformation[stack_name]"
# ==== Cloudformation tests (create changeset) ============================
# try to create a changeset by changing instance type
- name: create a changeset
cloudformation:
stack_name: "{{ stack_name }}"
create_changeset: yes
changeset_name: "test-changeset"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.micro"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: create_changeset_result
- name: assert changeset created
assert:
that:
- "create_changeset_result.changed"
- "'change_set_id' in create_changeset_result"
- "'Stack CREATE_CHANGESET complete' in create_changeset_result.output"
- name: get stack details with changesets
cloudformation_info:
stack_name: "{{ stack_name }}"
stack_change_sets: True
register: stack_info
- name: assert changesets in info
assert:
that:
- "'stack_change_sets' in stack_info.cloudformation[stack_name]"
- name: get stack details with changesets (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
stack_change_sets: True
register: stack_info
check_mode: yes
- name: assert changesets in info
assert:
that:
- "'stack_change_sets' in stack_info.cloudformation[stack_name]"
# try to create an empty changeset by passing in unchanged template
- name: create a changeset
cloudformation:
stack_name: "{{ stack_name }}"
create_changeset: yes
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: create_changeset_result
- name: assert changeset created
assert:
that:
- "not create_changeset_result.changed"
- "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output"
# ==== Cloudformation tests (termination_protection) ======================
- name: set termination protection to true
cloudformation:
stack_name: "{{ stack_name }}"
termination_protection: yes
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
# This fails - #65592
# - name: check task return attributes
# assert:
# that:
# - cf_stack.changed
- name: get stack details
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
- name: assert stack info
assert:
that:
- "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
- name: get stack details (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
- name: set termination protection to false
cloudformation:
stack_name: "{{ stack_name }}"
termination_protection: no
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
# This fails - #65592
# - name: check task return attributes
# assert:
# that:
# - cf_stack.changed
- name: get stack details
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
- name: assert stack info
assert:
that:
- "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
- name: get stack details (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
# ==== Cloudformation tests (delete stack tests) ==========================
- name: delete cloudformation stack (check mode)
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
check_mode: yes
register: cf_stack
- name: check task return attributes
assert:
that:
- cf_stack.changed
- "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg"
- name: delete cloudformation stack
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
register: cf_stack
- name: check task return attributes
assert:
that:
- cf_stack.changed
- "'output' in cf_stack and 'Stack Deleted' in cf_stack.output"
- name: delete cloudformation stack (check mode) (idempotent)
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
check_mode: yes
register: cf_stack
- name: check task return attributes
assert:
that:
- not cf_stack.changed
- "'msg' in cf_stack"
- >-
"Stack doesn't exist" in cf_stack.msg
- name: delete cloudformation stack (idempotent)
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
register: cf_stack
- name: check task return attributes
assert:
that:
- not cf_stack.changed
- "'output' in cf_stack and 'Stack not found.' in cf_stack.output"
- name: get stack details
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
- name: assert stack info
assert:
that:
- "not stack_info.cloudformation"
- name: get stack details (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "not stack_info.cloudformation"
# ==== Cleanup ============================================================
always:
- name: delete stack
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
ignore_errors: yes
- name: Delete test subnet
ec2_vpc_subnet:
vpc_id: "{{ testing_vpc.vpc.id }}"
cidr: "{{ subnet_cidr }}"
state: absent
ignore_errors: yes
- name: Delete test VPC
ec2_vpc_net:
name: "{{ vpc_name }}"
cidr_block: "{{ vpc_cidr }}"
state: absent
ignore_errors: yes

@ -0,0 +1,6 @@
---
cs_resource_prefix: "cs-{{ (ansible_date_time.iso8601_micro | to_uuid).split('-')[0] }}"
cs_common_template: CentOS 5.6 (64-bit) no GUI (Simulator)
cs_common_service_offering: Small Instance
cs_common_zone_adv: Sandbox-simulator-advanced
cs_common_zone_basic: Sandbox-simulator-basic

@ -0,0 +1,3 @@
---
dependencies:
- incidental_cs_common

@ -0,0 +1,303 @@
- name: pre-setup
cs_role:
name: "testRole"
register: testRole
- name: verify pre-setup
assert:
that:
- testRole is successful
- name: setup
cs_role_permission:
name: "fakeRolePerm"
role: "{{ testRole.id }}"
state: absent
register: roleperm
- name: verify setup
assert:
that:
- roleperm is successful
- name: setup2
cs_role_permission:
name: "fakeRolePerm2"
role: "{{ testRole.id }}"
state: absent
register: roleperm2
- name: verify setup2
assert:
that:
- roleperm2 is successful
- name: test fail if missing name
cs_role_permission:
role: "{{ testRole.id }}"
register: roleperm
ignore_errors: true
- name: verify results of fail if missing name
assert:
that:
- roleperm is failed
- 'roleperm.msg == "missing required arguments: name"'
- name: test fail if missing role
cs_role_permission:
name: "fakeRolePerm"
register: roleperm
ignore_errors: true
- name: verify results of fail if missing role
assert:
that:
- roleperm is failed
- 'roleperm.msg == "missing required arguments: role"'
- name: test fail if role does not exist
cs_role_permission:
name: "fakeRolePerm"
role: "testtest"
register: roleperm
ignore_errors: true
- name: verify results of fail if role does not exist
assert:
that:
- roleperm is failed
- roleperm.msg == "Role 'testtest' not found"
- name: test fail if state is incorrcect
cs_role_permission:
state: badstate
role: "{{ testRole.id }}"
name: "fakeRolePerm"
permission: allow
register: roleperm
ignore_errors: true
- name: verify results of fail if state is incorrcect
assert:
that:
- roleperm is failed
- 'roleperm.msg == "value of state must be one of: present, absent, got: badstate"'
- name: test create role permission in check mode
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
permission: allow
description: "fakeRolePerm description"
register: roleperm
check_mode: yes
- name: verify results of role permission in check mode
assert:
that:
- roleperm is successful
- roleperm is changed
- name: test create role permission
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
permission: allow
description: "fakeRolePerm description"
register: roleperm
- name: verify results of role permission
assert:
that:
- roleperm is successful
- roleperm is changed
- roleperm.name == "fakeRolePerm"
- roleperm.permission == "allow"
- roleperm.description == "fakeRolePerm description"
- name: test create role permission idempotency
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
permission: allow
description: "fakeRolePerm description"
register: roleperm
- name: verify results of role permission idempotency
assert:
that:
- roleperm is successful
- roleperm is not changed
- roleperm.name == "fakeRolePerm"
- roleperm.permission == "allow"
- roleperm.description == "fakeRolePerm description"
- name: test update role permission in check_mode
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
permission: deny
description: "fakeRolePerm description"
register: roleperm
check_mode: yes
- name: verify results of update role permission in check mode
assert:
that:
- roleperm is successful
- roleperm is changed
- roleperm.name == "fakeRolePerm"
- roleperm.permission == "allow"
- roleperm.description == "fakeRolePerm description"
- name: test update role permission
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
permission: deny
description: "fakeRolePerm description"
register: roleperm
- name: verify results of update role permission
assert:
that:
- roleperm is successful
- roleperm is changed
- roleperm.name == "fakeRolePerm"
- roleperm.permission == "deny"
- roleperm.description == "fakeRolePerm description"
- name: test update role permission idempotency
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
permission: deny
description: "fakeRolePerm description"
register: roleperm
- name: verify results of update role permission idempotency
assert:
that:
- roleperm is successful
- roleperm is not changed
- roleperm.name == "fakeRolePerm"
- roleperm.permission == "deny"
- roleperm.description == "fakeRolePerm description"
- name: test create a second role permission
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm2"
permission: allow
register: roleperm2
- name: verify results of create a second role permission
assert:
that:
- roleperm2 is successful
- roleperm2 is changed
- roleperm2.name == "fakeRolePerm2"
- name: test update rules order in check_mode
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
parent: "{{ roleperm2.id }}"
register: roleperm
check_mode: yes
- name: verify results of update rule order check mode
assert:
that:
- roleperm is successful
- roleperm is changed
- roleperm.name == "fakeRolePerm"
- name: test update rules order
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
parent: "{{ roleperm2.id }}"
register: roleperm
- name: verify results of update rule order
assert:
that:
- roleperm is successful
- roleperm is changed
- roleperm.name == "fakeRolePerm"
- name: test update rules order to the top of the list
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
parent: 0
register: roleperm
- name: verify results of update rule order to the top of the list
assert:
that:
- roleperm is successful
- roleperm is changed
- roleperm.name == "fakeRolePerm"
- name: test update rules order with parent NAME
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
parent: "{{ roleperm2.name }}"
register: roleperm
- name: verify results of update rule order with parent NAME
assert:
that:
- roleperm is successful
- roleperm is changed
- roleperm.name == "fakeRolePerm"
- name: test fail if permission AND parent args are present
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
permission: allow
parent: 0
register: roleperm
ignore_errors: true
- name: verify results of fail if permission AND parent args are present
assert:
that:
- roleperm is failed
- 'roleperm.msg == "parameters are mutually exclusive: permission|parent"'
- name: test fail if parent does not exist
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
parent: "badParent"
register: roleperm
ignore_errors: true
- name: verify results of fail if parent does not exist
assert:
that:
- roleperm is failed
- roleperm.msg == "Parent rule 'badParent' not found"
- name: test remove role permission in check_mode
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
state: absent
register: roleperm
check_mode: yes
- name: verify results of rename role permission in check_mode
assert:
that:
- roleperm is successful
- roleperm is changed
- name: test remove role permission
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm"
state: absent
register: roleperm
- name: verify results of remove role permission
assert:
that:
- roleperm is successful
- roleperm is changed
- name: remove second role permission
cs_role_permission:
role: "{{ testRole.id }}"
name: "fakeRolePerm2"
state: absent
register: roleperm
- name: verify results of remove second role permission
assert:
that:
- roleperm is successful
- roleperm is changed

@ -0,0 +1,3 @@
---
dependencies:
- incidental_cs_common

@ -0,0 +1,223 @@
---
- name: setup service offering
cs_service_offering:
name: Micro
state: absent
register: so
- name: verify setup service offering
assert:
that:
- so is successful
- name: create service offering in check mode
cs_service_offering:
name: Micro
display_text: Micro 512mb 1cpu
cpu_number: 1
cpu_speed: 2198
memory: 512
host_tags: eco
storage_tags:
- eco
- backup
storage_type: local
register: so
check_mode: true
- name: verify create service offering in check mode
assert:
that:
- so is changed
- name: create service offering
cs_service_offering:
name: Micro
display_text: Micro 512mb 1cpu
cpu_number: 1
cpu_speed: 2198
memory: 512
host_tags: eco
storage_tags:
- eco
- backup
storage_type: local
register: so
- name: verify create service offering
assert:
that:
- so is changed
- so.name == "Micro"
- so.display_text == "Micro 512mb 1cpu"
- so.cpu_number == 1
- so.cpu_speed == 2198
- so.memory == 512
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"
- name: create service offering idempotence
cs_service_offering:
name: Micro
display_text: Micro 512mb 1cpu
cpu_number: 1
cpu_speed: 2198
memory: 512
host_tags: eco
storage_tags:
- eco
- backup
storage_type: local
register: so
- name: verify create service offering idempotence
assert:
that:
- so is not changed
- so.name == "Micro"
- so.display_text == "Micro 512mb 1cpu"
- so.cpu_number == 1
- so.cpu_speed == 2198
- so.memory == 512
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"
- name: update service offering in check mode
cs_service_offering:
name: Micro
display_text: Micro RAM 512MB 1vCPU
register: so
check_mode: true
- name: verify create update offering in check mode
assert:
that:
- so is changed
- so.name == "Micro"
- so.display_text == "Micro 512mb 1cpu"
- so.cpu_number == 1
- so.cpu_speed == 2198
- so.memory == 512
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"
- name: update service offering
cs_service_offering:
name: Micro
display_text: Micro RAM 512MB 1vCPU
register: so
- name: verify update service offerin
assert:
that:
- so is changed
- so.name == "Micro"
- so.display_text == "Micro RAM 512MB 1vCPU"
- so.cpu_number == 1
- so.cpu_speed == 2198
- so.memory == 512
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"
- name: update service offering idempotence
cs_service_offering:
name: Micro
display_text: Micro RAM 512MB 1vCPU
register: so
- name: verify update service offering idempotence
assert:
that:
- so is not changed
- so.name == "Micro"
- so.display_text == "Micro RAM 512MB 1vCPU"
- so.cpu_number == 1
- so.cpu_speed == 2198
- so.memory == 512
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"
- name: remove service offering in check mode
cs_service_offering:
name: Micro
state: absent
check_mode: true
register: so
- name: verify remove service offering in check mode
assert:
that:
- so is changed
- so.name == "Micro"
- so.display_text == "Micro RAM 512MB 1vCPU"
- so.cpu_number == 1
- so.cpu_speed == 2198
- so.memory == 512
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"
- name: remove service offering
cs_service_offering:
name: Micro
state: absent
register: so
- name: verify remove service offering
assert:
that:
- so is changed
- so.name == "Micro"
- so.display_text == "Micro RAM 512MB 1vCPU"
- so.cpu_number == 1
- so.cpu_speed == 2198
- so.memory == 512
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"
- name: remove service offering idempotence
cs_service_offering:
name: Micro
state: absent
register: so
- name: verify remove service offering idempotence
assert:
that:
- so is not changed
- name: create custom service offering
cs_service_offering:
name: custom
display_text: custom offer
is_customized: yes
host_tags: eco
storage_tags:
- eco
- backup
storage_type: local
register: so
- name: verify create custom service offering
assert:
that:
- so is changed
- so.name == "custom"
- so.display_text == "custom offer"
- so.is_customized == True
- so.cpu_number is not defined
- so.cpu_speed is not defined
- so.memory is not defined
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"
- name: remove custom service offering
cs_service_offering:
name: custom
state: absent
register: so
- name: verify remove service offering
assert:
that:
- so is changed
- so.name == "custom"
- so.display_text == "custom offer"
- so.host_tags == ['eco']
- so.storage_tags == ['eco', 'backup']
- so.storage_type == "local"

@ -0,0 +1,3 @@
---
- import_tasks: guest_vm_service_offering.yml
- import_tasks: system_vm_service_offering.yml

@ -0,0 +1,151 @@
---
- name: setup system offering
cs_service_offering:
name: System Offering for Ansible
is_system: true
state: absent
register: so
- name: verify setup system offering
assert:
that:
- so is successful
- name: fail missing storage type and is_system
cs_service_offering:
name: System Offering for Ansible
cpu_number: 1
cpu_speed: 500
memory: 512
host_tag: perf
storage_tag: perf
storage_type: shared
offer_ha: true
limit_cpu_usage: false
is_system: true
register: so
ignore_errors: true
- name: verify create system service offering in check mode
assert:
that:
- so is failed
- so.msg.startswith('missing required arguments:')
- name: create system service offering in check mode
cs_service_offering:
name: System Offering for Ansible
cpu_number: 1
cpu_speed: 500
memory: 512
host_tag: perf
storage_tag: perf
storage_type: shared
offer_ha: true
limit_cpu_usage: false
system_vm_type: domainrouter
is_system: true
register: so
check_mode: true
- name: verify create system service offering in check mode
assert:
that:
- so is changed
- name: create system service offering
cs_service_offering:
name: System Offering for Ansible
cpu_number: 1
cpu_speed: 500
memory: 512
host_tag: perf
storage_tag: perf
storage_type: shared
offer_ha: true
limit_cpu_usage: false
system_vm_type: domainrouter
is_system: true
register: so
- name: verify create system service offering
assert:
that:
- so is changed
- so.name == "System Offering for Ansible"
- so.display_text == "System Offering for Ansible"
- so.cpu_number == 1
- so.cpu_speed == 500
- so.memory == 512
- so.host_tags == ['perf']
- so.storage_tags == ['perf']
- so.storage_type == "shared"
- so.offer_ha == true
- so.limit_cpu_usage == false
- so.system_vm_type == "domainrouter"
- so.is_system == true
- name: create system service offering idempotence
cs_service_offering:
name: System Offering for Ansible
cpu_number: 1
cpu_speed: 500
memory: 512
host_tag: perf
storage_tag: perf
storage_type: shared
offer_ha: true
limit_cpu_usage: false
system_vm_type: domainrouter
is_system: true
register: so
- name: verify create system service offering idempotence
assert:
that:
- so is not changed
- so.name == "System Offering for Ansible"
- so.display_text == "System Offering for Ansible"
- so.cpu_number == 1
- so.cpu_speed == 500
- so.memory == 512
- so.host_tags == ['perf']
- so.storage_tags == ['perf']
- so.storage_type == "shared"
- so.offer_ha == true
- so.limit_cpu_usage == false
- so.system_vm_type == "domainrouter"
- so.is_system == true
- name: remove system service offering in check mode
cs_service_offering:
name: System Offering for Ansible
is_system: true
state: absent
check_mode: true
register: so
- name: verify remove system service offering in check mode
assert:
that:
- so is changed
- so.name == "System Offering for Ansible"
- so.is_system == true
- name: remove system service offering
cs_service_offering:
name: System Offering for Ansible
is_system: true
state: absent
register: so
- name: verify remove system service offering
assert:
that:
- so is changed
- so.name == "System Offering for Ansible"
- so.is_system == true
- name: remove system service offering idempotence
cs_service_offering:
name: System Offering for Ansible
is_system: true
state: absent
register: so
- name: verify remove system service offering idempotence
assert:
that:
- so is not changed

@ -0,0 +1,2 @@
cloud/aws
shippable/aws/incidental

@ -0,0 +1,17 @@
[tests]
# Sorted fastest to slowest
version_fail_wrapper
ebs_optimized
block_devices
cpu_options
default_vpc_tests
external_resource_attach
instance_no_wait
iam_instance_role
termination_protection
tags_and_vpc_settings
checkmode_tests
[all:vars]
ansible_connection=local
ansible_python_interpreter="{{ ansible_playbook_python }}"

@ -0,0 +1,43 @@
---
# Beware: most of our tests here are run in parallel.
# To add new tests you'll need to add a new host to the inventory and a matching
# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
# Prepare the VPC and figure out which AMI to use
- hosts: all
gather_facts: no
tasks:
- module_defaults:
group/aws:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token | default(omit) }}"
region: "{{ aws_region }}"
vars:
# We can't just use "run_once" because the facts don't propagate when
# running an 'include' that was run_once
setup_run_once: yes
block:
- include_role:
name: 'ec2_instance'
tasks_from: find_ami.yml
- include_role:
name: 'ec2_instance'
tasks_from: env_setup.yml
rescue:
- include_role:
name: 'ec2_instance'
tasks_from: env_cleanup.yml
run_once: yes
- fail:
msg: 'Environment preparation failed'
run_once: yes
# VPC should get cleaned up once all hosts have run
- hosts: all
gather_facts: no
strategy: free
#serial: 10
roles:
- ec2_instance

@ -0,0 +1,3 @@
dependencies:
- incidental_setup_ec2
- setup_remote_tmp_dir

@ -0,0 +1,14 @@
---
# defaults file for ec2_instance
ec2_instance_owner: 'integration-run-{{ resource_prefix }}'
ec2_instance_type: 't3.micro'
ec2_instance_tag_TestId: '{{ resource_prefix }}-{{ inventory_hostname }}'
ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
vpc_name: '{{ resource_prefix }}-vpc'
vpc_seed: '{{ resource_prefix }}'
vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
subnet_a_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
subnet_a_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.32.'
subnet_b_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.33.0/24'
subnet_b_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.33.'

@ -0,0 +1,13 @@
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}

@ -0,0 +1,82 @@
- block:
- name: "New instance with an extra block device"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-ebs-vols"
image_id: "{{ ec2_ami_image }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
volumes:
- device_name: /dev/sdb
ebs:
volume_size: 20
delete_on_termination: true
volume_type: standard
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
instance_type: "{{ ec2_instance_type }}"
wait: true
register: block_device_instances
- name: "Gather instance info"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-ebs-vols"
register: block_device_instances_info
- assert:
that:
- block_device_instances is not failed
- block_device_instances is changed
- block_device_instances_info.instances[0].block_device_mappings[0]
- block_device_instances_info.instances[0].block_device_mappings[1]
- block_device_instances_info.instances[0].block_device_mappings[1].device_name == '/dev/sdb'
- name: "New instance with an extra block device (check mode)"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-ebs-vols-checkmode"
image_id: "{{ ec2_ami_image }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
volumes:
- device_name: /dev/sdb
ebs:
volume_size: 20
delete_on_termination: true
volume_type: standard
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
instance_type: "{{ ec2_instance_type }}"
check_mode: yes
- name: "fact presented ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-ebs-vols"
"instance-state-name": "running"
register: presented_instance_fact
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-ebs-vols-checkmode"
register: checkmode_instance_fact
- name: "Confirm whether the check mode is working normally."
assert:
that:
- "{{ presented_instance_fact.instances | length }} > 0"
- "{{ checkmode_instance_fact.instances | length }} == 0"
- name: "Terminate instances"
ec2_instance:
state: absent
instance_ids: "{{ block_device_instances.instance_ids }}"
always:
- name: "Terminate block_devices instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,172 @@
- block:
- name: "Make basic instance"
ec2_instance:
state: present
name: "{{ resource_prefix }}-checkmode-comparison"
image_id: "{{ ec2_ami_image }}"
security_groups: "{{ sg.group_id }}"
instance_type: "{{ ec2_instance_type }}"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
wait: false
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
register: basic_instance
- name: "Make basic instance (check mode)"
ec2_instance:
state: present
name: "{{ resource_prefix }}-checkmode-comparison-checkmode"
image_id: "{{ ec2_ami_image }}"
security_groups: "{{ sg.group_id }}"
instance_type: "{{ ec2_instance_type }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
check_mode: yes
- name: "fact presented ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-checkmode-comparison"
register: presented_instance_fact
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-checkmode-comparison-checkmode"
register: checkmode_instance_fact
- name: "Confirm whether the check mode is working normally."
assert:
that:
- "{{ presented_instance_fact.instances | length }} > 0"
- "{{ checkmode_instance_fact.instances | length }} == 0"
- name: "Stop instance (check mode)"
ec2_instance:
state: stopped
name: "{{ resource_prefix }}-checkmode-comparison"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
check_mode: yes
- name: "fact ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-checkmode-comparison"
register: confirm_checkmode_stopinstance_fact
- name: "Verify that it was not stopped."
assert:
that:
- '"{{ confirm_checkmode_stopinstance_fact.instances[0].state.name }}" != "stopped"'
- name: "Stop instance."
ec2_instance:
state: stopped
name: "{{ resource_prefix }}-checkmode-comparison"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
register: instance_stop
until: not instance_stop.failed
retries: 10
- name: "fact stopped ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-checkmode-comparison"
register: confirm_stopinstance_fact
- name: "Verify that it was stopped."
assert:
that:
- '"{{ confirm_stopinstance_fact.instances[0].state.name }}" in ["stopped", "stopping"]'
- name: "Running instance in check mode."
ec2_instance:
state: running
name: "{{ resource_prefix }}-checkmode-comparison"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
check_mode: yes
- name: "fact ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-checkmode-comparison"
register: confirm_checkmode_runninginstance_fact
- name: "Verify that it was not running."
assert:
that:
- '"{{ confirm_checkmode_runninginstance_fact.instances[0].state.name }}" != "running"'
- name: "Running instance."
ec2_instance:
state: running
name: "{{ resource_prefix }}-checkmode-comparison"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
- name: "fact ec2 instance."
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-checkmode-comparison"
register: confirm_runninginstance_fact
- name: "Verify that it was running."
assert:
that:
- '"{{ confirm_runninginstance_fact.instances[0].state.name }}" == "running"'
- name: "Terminate instance in check mode."
ec2_instance:
state: absent
name: "{{ resource_prefix }}-checkmode-comparison"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
check_mode: yes
- name: "fact ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-checkmode-comparison"
register: confirm_checkmode_terminatedinstance_fact
- name: "Verify that it was not terminated,"
assert:
that:
- '"{{ confirm_checkmode_terminatedinstance_fact.instances[0].state.name }}" != "terminated"'
- name: "Terminate instance."
ec2_instance:
state: absent
name: "{{ resource_prefix }}-checkmode-comparison"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
- name: "fact ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-checkmode-comparison"
register: confirm_terminatedinstance_fact
- name: "Verify that it was terminated,"
assert:
that:
- '"{{ confirm_terminatedinstance_fact.instances[0].state.name }}" == "terminated"'
always:
- name: "Terminate checkmode instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,86 @@
- block:
- name: "create t3.nano instance with cpu_options"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
instance_type: t3.nano
cpu_options:
core_count: 1
threads_per_core: 1
wait: false
register: instance_creation
- name: "instance with cpu_options created with the right options"
assert:
that:
- instance_creation is success
- instance_creation is changed
- name: "modify cpu_options on existing instance (warning displayed)"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
instance_type: t3.nano
cpu_options:
core_count: 1
threads_per_core: 2
wait: false
register: cpu_options_update
ignore_errors: yes
- name: "fact presented ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
register: presented_instance_fact
- name: "modify cpu_options has no effect on existing instance"
assert:
that:
- cpu_options_update is success
- cpu_options_update is not changed
- "{{ presented_instance_fact.instances | length }} > 0"
- "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']"
- "{{ presented_instance_fact.instances.0.cpu_options.core_count }} == 1"
- "{{ presented_instance_fact.instances.0.cpu_options.threads_per_core }} == 1"
- name: "create t3.nano instance with cpu_options(check mode)"
ec2_instance:
name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
instance_type: t3.nano
cpu_options:
core_count: 1
threads_per_core: 1
check_mode: yes
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
register: checkmode_instance_fact
- name: "Confirm existence of instance id."
assert:
that:
- "{{ checkmode_instance_fact.instances | length }} == 0"
always:
- name: "Terminate cpu_options instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,57 @@
- block:
- name: "Make instance in a default subnet of the VPC"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-default-vpc"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
security_group: "default"
instance_type: "{{ ec2_instance_type }}"
wait: false
register: in_default_vpc
- name: "Make instance in a default subnet of the VPC(check mode)"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-default-vpc-checkmode"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
security_group: "default"
instance_type: "{{ ec2_instance_type }}"
check_mode: yes
- name: "fact presented ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-default-vpc"
register: presented_instance_fact
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-default-vpc-checkmode"
register: checkmode_instance_fact
- name: "Confirm whether the check mode is working normally."
assert:
that:
- "{{ presented_instance_fact.instances | length }} > 0"
- "{{ checkmode_instance_fact.instances | length }} == 0"
- name: "Terminate instances"
ec2_instance:
state: absent
instance_ids: "{{ in_default_vpc.instance_ids }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
always:
- name: "Terminate vpc_tests instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,41 @@
- block:
- name: "Make EBS optimized instance in the testing subnet of the test VPC"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
security_groups: "{{ sg.group_id }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
ebs_optimized: true
instance_type: t3.nano
wait: false
register: ebs_opt_in_vpc
- name: "Get ec2 instance info"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
register: ebs_opt_instance_info
- name: "Assert instance is ebs_optimized"
assert:
that:
- "{{ ebs_opt_instance_info.instances.0.ebs_optimized }}"
- name: "Terminate instances"
ec2_instance:
state: absent
instance_ids: "{{ ebs_opt_in_vpc.instance_ids }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
always:
- name: "Terminate ebs_optimzed instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,93 @@
- name: "remove Instances"
ec2_instance:
state: absent
filters:
vpc-id: "{{ testing_vpc.vpc.id }}"
wait: yes
ignore_errors: yes
retries: 10
- name: "remove ENIs"
ec2_eni_info:
filters:
vpc-id: "{{ testing_vpc.vpc.id }}"
register: enis
- name: "delete all ENIs"
ec2_eni:
state: absent
eni_id: "{{ item.id }}"
until: removed is not failed
with_items: "{{ enis.network_interfaces }}"
ignore_errors: yes
retries: 10
- name: "remove the security group"
ec2_group:
state: absent
name: "{{ resource_prefix }}-sg"
description: a security group for ansible tests
vpc_id: "{{ testing_vpc.vpc.id }}"
register: removed
until: removed is not failed
ignore_errors: yes
retries: 10
- name: "remove routing rules"
ec2_vpc_route_table:
state: absent
vpc_id: "{{ testing_vpc.vpc.id }}"
tags:
created: "{{ resource_prefix }}-route"
routes:
- dest: 0.0.0.0/0
gateway_id: "{{ igw.gateway_id }}"
subnets:
- "{{ testing_subnet_a.subnet.id }}"
- "{{ testing_subnet_b.subnet.id }}"
register: removed
until: removed is not failed
ignore_errors: yes
retries: 10
- name: "remove internet gateway"
ec2_vpc_igw:
state: absent
vpc_id: "{{ testing_vpc.vpc.id }}"
register: removed
until: removed is not failed
ignore_errors: yes
retries: 10
- name: "remove subnet A"
ec2_vpc_subnet:
state: absent
vpc_id: "{{ testing_vpc.vpc.id }}"
cidr: "{{ subnet_a_cidr }}"
register: removed
until: removed is not failed
ignore_errors: yes
retries: 10
- name: "remove subnet B"
ec2_vpc_subnet:
state: absent
vpc_id: "{{ testing_vpc.vpc.id }}"
cidr: "{{ subnet_b_cidr }}"
register: removed
until: removed is not failed
ignore_errors: yes
retries: 10
- name: "remove the VPC"
ec2_vpc_net:
state: absent
name: "{{ vpc_name }}"
cidr_block: "{{ vpc_cidr }}"
tags:
Name: Ansible Testing VPC
tenancy: default
register: removed
until: removed is not failed
ignore_errors: yes
retries: 10

@ -0,0 +1,79 @@
- run_once: '{{ setup_run_once | default("no") | bool }}'
block:
- name: "fetch AZ availability"
aws_az_info:
register: az_info
- name: "Assert that we have multiple AZs available to us"
assert:
that: az_info.availability_zones | length >= 2
- name: "pick AZs"
set_fact:
subnet_a_az: '{{ az_info.availability_zones[0].zone_name }}'
subnet_b_az: '{{ az_info.availability_zones[1].zone_name }}'
- name: "Create VPC for use in testing"
ec2_vpc_net:
state: present
name: "{{ vpc_name }}"
cidr_block: "{{ vpc_cidr }}"
tags:
Name: Ansible ec2_instance Testing VPC
tenancy: default
register: testing_vpc
- name: "Create internet gateway for use in testing"
ec2_vpc_igw:
state: present
vpc_id: "{{ testing_vpc.vpc.id }}"
register: igw
- name: "Create default subnet in zone A"
ec2_vpc_subnet:
state: present
vpc_id: "{{ testing_vpc.vpc.id }}"
cidr: "{{ subnet_a_cidr }}"
az: "{{ subnet_a_az }}"
resource_tags:
Name: "{{ resource_prefix }}-subnet-a"
register: testing_subnet_a
- name: "Create secondary subnet in zone B"
ec2_vpc_subnet:
state: present
vpc_id: "{{ testing_vpc.vpc.id }}"
cidr: "{{ subnet_b_cidr }}"
az: "{{ subnet_b_az }}"
resource_tags:
Name: "{{ resource_prefix }}-subnet-b"
register: testing_subnet_b
- name: "create routing rules"
ec2_vpc_route_table:
state: present
vpc_id: "{{ testing_vpc.vpc.id }}"
tags:
created: "{{ resource_prefix }}-route"
routes:
- dest: 0.0.0.0/0
gateway_id: "{{ igw.gateway_id }}"
subnets:
- "{{ testing_subnet_a.subnet.id }}"
- "{{ testing_subnet_b.subnet.id }}"
- name: "create a security group with the vpc"
ec2_group:
state: present
name: "{{ resource_prefix }}-sg"
description: a security group for ansible tests
vpc_id: "{{ testing_vpc.vpc.id }}"
rules:
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
register: sg

@ -0,0 +1,129 @@
- block:
# Make custom ENIs and attach via the `network` parameter
- ec2_eni:
state: present
delete_on_termination: true
subnet_id: "{{ testing_subnet_b.subnet.id }}"
security_groups:
- "{{ sg.group_id }}"
register: eni_a
- ec2_eni:
state: present
delete_on_termination: true
subnet_id: "{{ testing_subnet_b.subnet.id }}"
security_groups:
- "{{ sg.group_id }}"
register: eni_b
- ec2_eni:
state: present
delete_on_termination: true
subnet_id: "{{ testing_subnet_b.subnet.id }}"
security_groups:
- "{{ sg.group_id }}"
register: eni_c
- ec2_key:
name: "{{ resource_prefix }}_test_key"
- name: "Make instance in the testing subnet created in the test VPC"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-eni-vpc"
key_name: "{{ resource_prefix }}_test_key"
network:
interfaces:
- id: "{{ eni_a.interface.id }}"
image_id: "{{ ec2_ami_image }}"
availability_zone: '{{ subnet_b_az }}'
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
instance_type: "{{ ec2_instance_type }}"
wait: false
register: in_test_vpc
- name: "Gather {{ resource_prefix }}-test-eni-vpc info"
ec2_instance_info:
filters:
"tag:Name": '{{ resource_prefix }}-test-eni-vpc'
register: in_test_vpc_instance
- assert:
that:
- 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"'
- '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1'
- name: "Add a second interface"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-eni-vpc"
network:
interfaces:
- id: "{{ eni_a.interface.id }}"
- id: "{{ eni_b.interface.id }}"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
instance_type: "{{ ec2_instance_type }}"
wait: false
register: add_interface
until: add_interface is not failed
ignore_errors: yes
retries: 10
- name: "Make instance in the testing subnet created in the test VPC(check mode)"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-eni-vpc-checkmode"
key_name: "{{ resource_prefix }}_test_key"
network:
interfaces:
- id: "{{ eni_c.interface.id }}"
image_id: "{{ ec2_ami_image }}"
availability_zone: '{{ subnet_b_az }}'
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
instance_type: "{{ ec2_instance_type }}"
check_mode: yes
- name: "fact presented ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-eni-vpc"
register: presented_instance_fact
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-eni-vpc-checkmode"
register: checkmode_instance_fact
- name: "Confirm existence of instance id."
assert:
that:
- "{{ presented_instance_fact.instances | length }} > 0"
- "{{ checkmode_instance_fact.instances | length }} == 0"
always:
- name: "Terminate external_resource_attach instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes
- ec2_key:
state: absent
name: "{{ resource_prefix }}_test_key"
ignore_errors: yes
- ec2_eni:
state: absent
eni_id: '{{ item.interface.id }}'
ignore_errors: yes
with_items:
- '{{ eni_a }}'
- '{{ eni_b }}'
- '{{ eni_c }}'

@ -0,0 +1,15 @@
- run_once: '{{ setup_run_once | default("no") | bool }}'
block:
- name: "Find AMI to use"
run_once: yes
ec2_ami_info:
owners: 'amazon'
filters:
name: '{{ ec2_ami_name }}'
register: ec2_amis
- name: "Set fact with latest AMI"
run_once: yes
vars:
latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
set_fact:
ec2_ami_image: '{{ latest_ami.image_id }}'

@ -0,0 +1,127 @@
- block:
- name: "Create IAM role for test"
iam_role:
state: present
name: "ansible-test-sts-{{ resource_prefix }}-test-policy"
assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
create_instance_profile: yes
managed_policy:
- AmazonEC2ContainerServiceRole
register: iam_role
- name: "Create second IAM role for test"
iam_role:
state: present
name: "ansible-test-sts-{{ resource_prefix }}-test-policy-2"
assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
create_instance_profile: yes
managed_policy:
- AmazonEC2ContainerServiceRole
register: iam_role_2
- name: "wait 10 seconds for roles to become available"
wait_for:
timeout: 10
delegate_to: localhost
- name: "Make instance with an instance_role"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-instance-role"
image_id: "{{ ec2_ami_image }}"
security_groups: "{{ sg.group_id }}"
instance_type: "{{ ec2_instance_type }}"
instance_role: "ansible-test-sts-{{ resource_prefix }}-test-policy"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
register: instance_with_role
- assert:
that:
- 'instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
- name: "Make instance with an instance_role(check mode)"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-instance-role-checkmode"
image_id: "{{ ec2_ami_image }}"
security_groups: "{{ sg.group_id }}"
instance_type: "{{ ec2_instance_type }}"
instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
check_mode: yes
- name: "fact presented ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-instance-role"
register: presented_instance_fact
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-instance-role-checkmode"
register: checkmode_instance_fact
- name: "Confirm whether the check mode is working normally."
assert:
that:
- "{{ presented_instance_fact.instances | length }} > 0"
- "{{ checkmode_instance_fact.instances | length }} == 0"
- name: "Update instance with new instance_role"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-instance-role"
image_id: "{{ ec2_ami_image }}"
security_groups: "{{ sg.group_id }}"
instance_type: "{{ ec2_instance_type }}"
instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}"
vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
register: instance_with_updated_role
- name: "wait 10 seconds for role update to complete"
wait_for:
timeout: 10
delegate_to: localhost
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-instance-role"
register: updates_instance_info
- assert:
that:
- 'updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
- 'updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id'
always:
- name: "Terminate iam_instance_role instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes
- name: "Delete IAM role for test"
iam_role:
state: absent
name: "{{ item }}"
assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
create_instance_profile: yes
managed_policy:
- AmazonEC2ContainerServiceRole
loop:
- "ansible-test-sts-{{ resource_prefix }}-test-policy"
- "ansible-test-sts-{{ resource_prefix }}-test-policy-2"
register: removed
until: removed is not failed
ignore_errors: yes
retries: 10

@ -0,0 +1,68 @@
- block:
- name: "New instance and don't wait for it to complete"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-no-wait"
image_id: "{{ ec2_ami_image }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
wait: false
instance_type: "{{ ec2_instance_type }}"
register: in_test_vpc
- assert:
that:
- in_test_vpc is not failed
- in_test_vpc is changed
- in_test_vpc.instances is not defined
- in_test_vpc.instance_ids is defined
- in_test_vpc.instance_ids | length > 0
- name: "New instance and don't wait for it to complete ( check mode )"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-no-wait-checkmode"
image_id: "{{ ec2_ami_image }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
wait: false
instance_type: "{{ ec2_instance_type }}"
check_mode: yes
- name: "Facts for ec2 test instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-no-wait"
register: real_instance_fact
until: real_instance_fact.instances | length > 0
retries: 10
- name: "Facts for checkmode ec2 test instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-no-wait-checkmode"
register: checkmode_instance_fact
- name: "Confirm whether the check mode is working normally."
assert:
that:
- "{{ real_instance_fact.instances | length }} > 0"
- "{{ checkmode_instance_fact.instances | length }} == 0"
- name: "Terminate instances"
ec2_instance:
state: absent
instance_ids: "{{ in_test_vpc.instance_ids }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
always:
- name: "Terminate instance_no_wait instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,48 @@
---
# Beware: most of our tests here are run in parallel.
# To add new tests you'll need to add a new host to the inventory and a matching
# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
#
# Please make sure you tag your instances with
# tags:
# "tag:TestId": "{{ ec2_instance_tag_TestId }}"
# And delete them based off that tag at the end of your specific set of tests
#
# ###############################################################################
#
# A Note about ec2 environment variable name preference:
# - EC2_URL -> AWS_URL
# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
# - EC2_REGION -> AWS_REGION
#
- name: "Wrap up all tests and setup AWS credentials"
module_defaults:
group/aws:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token | default(omit) }}"
region: "{{ aws_region }}"
block:
- debug:
msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}"
- include_tasks: '{{ inventory_hostname }}.yml'
- debug:
msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}"
always:
- set_fact:
_role_complete: True
- vars:
completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}'
hosts_in_play: '{{ ansible_play_hosts_all | length }}'
debug:
msg: "{{ completed_hosts }} of {{ hosts_in_play }} complete"
- include_tasks: env_cleanup.yml
vars:
completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}'
hosts_in_play: '{{ ansible_play_hosts_all | length }}'
when:
- aws_cleanup
- completed_hosts == hosts_in_play

@ -0,0 +1,158 @@
- block:
- name: "Make instance in the testing subnet created in the test VPC"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-basic-vpc-create"
image_id: "{{ ec2_ami_image }}"
user_data: |
#cloud-config
package_upgrade: true
package_update: true
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
Something: else
security_groups: "{{ sg.group_id }}"
network:
source_dest_check: false
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
instance_type: "{{ ec2_instance_type }}"
wait: false
register: in_test_vpc
- name: "Make instance in the testing subnet created in the test VPC(check mode)"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
image_id: "{{ ec2_ami_image }}"
user_data: |
#cloud-config
package_upgrade: true
package_update: true
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
Something: else
security_groups: "{{ sg.group_id }}"
network:
source_dest_check: false
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
instance_type: "{{ ec2_instance_type }}"
check_mode: yes
- name: "Try to re-make the instance, hopefully this shows changed=False"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-basic-vpc-create"
image_id: "{{ ec2_ami_image }}"
user_data: |
#cloud-config
package_upgrade: true
package_update: true
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
Something: else
security_groups: "{{ sg.group_id }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
instance_type: "{{ ec2_instance_type }}"
register: remake_in_test_vpc
- name: "Remaking the same instance resulted in no changes"
assert:
that: not remake_in_test_vpc.changed
- name: "check that instance IDs match anyway"
assert:
that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]'
- name: "check that source_dest_check was set to false"
assert:
that: 'not remake_in_test_vpc.instances[0].source_dest_check'
- name: "fact presented ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-basic-vpc-create"
register: presented_instance_fact
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
register: checkmode_instance_fact
- name: "Confirm whether the check mode is working normally."
assert:
that:
- "{{ presented_instance_fact.instances | length }} > 0"
- "{{ checkmode_instance_fact.instances | length }} == 0"
- name: "Alter it by adding tags"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-basic-vpc-create"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
Another: thing
security_groups: "{{ sg.group_id }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
instance_type: "{{ ec2_instance_type }}"
register: add_another_tag
- ec2_instance_info:
instance_ids: "{{ add_another_tag.instance_ids }}"
register: check_tags
- name: "Remaking the same instance resulted in no changes"
assert:
that:
- check_tags.instances[0].tags.Another == 'thing'
- check_tags.instances[0].tags.Something == 'else'
- name: "Purge a tag"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-basic-vpc-create"
image_id: "{{ ec2_ami_image }}"
purge_tags: true
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
Another: thing
security_groups: "{{ sg.group_id }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
instance_type: "{{ ec2_instance_type }}"
- ec2_instance_info:
instance_ids: "{{ add_another_tag.instance_ids }}"
register: check_tags
- name: "Remaking the same instance resulted in no changes"
assert:
that:
- "'Something' not in check_tags.instances[0].tags"
- name: "check that subnet-default public IP rule was followed"
assert:
that:
- check_tags.instances[0].public_dns_name == ""
- check_tags.instances[0].private_ip_address.startswith(subnet_b_startswith)
- check_tags.instances[0].subnet_id == testing_subnet_b.subnet.id
- name: "check that tags were applied"
assert:
that:
- check_tags.instances[0].tags.Name.startswith(resource_prefix)
- "'{{ check_tags.instances[0].state.name }}' in ['pending', 'running']"
- name: "Terminate instance"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: false
register: result
- assert:
that: result.changed
always:
- name: "Terminate tags_and_vpc_settings instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,101 @@
- block:
- name: "Make termination-protected instance in the testing subnet created in the test VPC"
ec2_instance:
state: running
name: "{{ resource_prefix }}-test-protected-instance-in-vpc"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
security_groups: "{{ sg.group_id }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
termination_protection: true
instance_type: "{{ ec2_instance_type }}"
wait: yes
register: in_test_vpc
- name: "Make termination-protected instance in the testing subnet created in the test VPC(check mode)"
ec2_instance:
state: running
name: "{{ resource_prefix }}-test-protected-instance-in-vpc-checkmode"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
security_groups: "{{ sg.group_id }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
termination_protection: true
instance_type: "{{ ec2_instance_type }}"
check_mode: yes
- name: "fact presented ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-protected-instance-in-vpc"
"instance-state-name": "running"
register: presented_instance_fact
- name: "fact checkmode ec2 instance"
ec2_instance_info:
filters:
"tag:Name": "{{ resource_prefix }}-test-protected-instance-in-vpc-checkmode"
register: checkmode_instance_fact
- name: "Confirm whether the check mode is working normally."
assert:
that:
- "{{ presented_instance_fact.instances | length }} > 0"
- "'{{ presented_instance_fact.instances.0.state.name }}' in ['running', 'pending']"
- "{{ checkmode_instance_fact.instances | length }} == 0"
- name: "Try to terminate the instance"
ec2_instance:
state: absent
name: "{{ resource_prefix }}-test-protected-instance-in-vpc"
image_id: "{{ ec2_ami_image }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
security_groups: "{{ sg.group_id }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
termination_protection: true
instance_type: "{{ ec2_instance_type }}"
register: bad_terminate
ignore_errors: yes
- name: "Cannot terminate protected instance"
assert:
that:
- bad_terminate is failed
- name: "Alter termination protection setting"
ec2_instance:
state: present
name: "{{ resource_prefix }}-test-protected-instance-in-vpc"
image_id: "{{ ec2_ami_image }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
termination_protection: false
instance_type: "{{ ec2_instance_type }}"
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
- name: "Try to terminate the instance again (should work)"
ec2_instance:
state: absent
name: "{{ resource_prefix }}-test-protected-instance-in-vpc"
image_id: "{{ ec2_ami_image }}"
vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
instance_type: "{{ ec2_instance_type }}"
wait: false
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
register: terminate_results
- assert:
that: terminate_results is not failed
always:
- name: "Terminate termination_protection instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,29 @@
- block:
- name: "create t3.nano with cpu options (fails gracefully)"
ec2_instance:
state: present
name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-ec2"
image_id: "{{ ec2_ami_image }}"
instance_type: "t3.nano"
cpu_options:
core_count: 1
threads_per_core: 1
tags:
TestId: "{{ ec2_instance_tag_TestId }}"
register: ec2_instance_cpu_options_creation
ignore_errors: yes
- name: "check that graceful error message is returned when creation with cpu_options and old botocore"
assert:
that:
- ec2_instance_cpu_options_creation.failed
- 'ec2_instance_cpu_options_creation.msg == "cpu_options is only supported with botocore >= 1.10.16"'
always:
- name: "Terminate version_fail instances"
ec2_instance:
state: absent
filters:
"tag:TestId": "{{ ec2_instance_tag_TestId }}"
wait: yes
ignore_errors: yes

@ -0,0 +1,30 @@
---
- include_role:
name: 'setup_remote_tmp_dir'
- set_fact:
virtualenv: "{{ remote_tmp_dir }}/virtualenv"
virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
- set_fact:
virtualenv_interpreter: "{{ virtualenv }}/bin/python"
- pip:
name: "virtualenv"
- pip:
name:
- 'botocore<1.10.16'
- boto3
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
virtualenv_site_packages: no
- include_tasks: version_fail.yml
vars:
ansible_python_interpreter: "{{ virtualenv_interpreter }}"
- file:
state: absent
path: "{{ virtualenv }}"

@ -0,0 +1,12 @@
#!/usr/bin/env bash
#
# Beware: most of our tests here are run in parallel.
# To add new tests you'll need to add a new host to the inventory and a matching
# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
set -eux
export ANSIBLE_ROLES_PATH=../
ansible-playbook main.yml -i inventory "$@"

@ -0,0 +1,2 @@
cloud/hcloud
shippable/hcloud/incidental

@ -0,0 +1,5 @@
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
hcloud_prefix: "tests"
hcloud_server_name: "{{hcloud_prefix}}-integration"

@ -0,0 +1,565 @@
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
- name: setup
hcloud_server:
name: "{{ hcloud_server_name }}"
state: absent
register: result
- name: verify setup
assert:
that:
- result is success
- name: test missing required parameters on create server
hcloud_server:
name: "{{ hcloud_server_name }}"
register: result
ignore_errors: yes
- name: verify fail test missing required parameters on create server
assert:
that:
- result is failed
- 'result.msg == "missing required arguments: server_type, image"'
- name: test create server with check mode
hcloud_server:
name: "{{ hcloud_server_name }}"
server_type: cx11
image: ubuntu-18.04
state: present
register: result
check_mode: yes
- name: test create server server
assert:
that:
- result is changed
- name: test create server
hcloud_server:
name: "{{ hcloud_server_name}}"
server_type: cx11
image: ubuntu-18.04
state: started
register: main_server
- name: verify create server
assert:
that:
- main_server is changed
- main_server.hcloud_server.name == "{{ hcloud_server_name }}"
- main_server.hcloud_server.server_type == "cx11"
- main_server.hcloud_server.status == "running"
- main_server.root_password != ""
- name: test create server idempotence
hcloud_server:
name: "{{ hcloud_server_name }}"
state: started
register: result
- name: verify create server idempotence
assert:
that:
- result is not changed
- name: test stop server with check mode
hcloud_server:
name: "{{ hcloud_server_name }}"
state: stopped
register: result
check_mode: yes
- name: verify stop server with check mode
assert:
that:
- result is changed
- result.hcloud_server.status == "running"
- name: test stop server
hcloud_server:
name: "{{ hcloud_server_name }}"
state: stopped
register: result
- name: verify stop server
assert:
that:
- result is changed
- result.hcloud_server.status == "off"
- name: test start server with check mode
hcloud_server:
name: "{{ hcloud_server_name }}"
state: started
register: result
check_mode: true
- name: verify start server with check mode
assert:
that:
- result is changed
- name: test start server
hcloud_server:
name: "{{ hcloud_server_name }}"
state: started
register: result
- name: verify start server
assert:
that:
- result is changed
- result.hcloud_server.status == "running"
- name: test start server idempotence
hcloud_server:
name: "{{ hcloud_server_name }}"
state: started
register: result
- name: verify start server idempotence
assert:
that:
- result is not changed
- result.hcloud_server.status == "running"
- name: test stop server by its id
hcloud_server:
id: "{{ main_server.hcloud_server.id }}"
state: stopped
register: result
- name: verify stop server by its id
assert:
that:
- result is changed
- result.hcloud_server.status == "off"
- name: test resize server running without force
hcloud_server:
name: "{{ hcloud_server_name }}"
server_type: "cx21"
state: present
register: result
check_mode: true
- name: verify test resize server running without force
assert:
that:
- result is changed
- result.hcloud_server.server_type == "cx11"
- name: test resize server with check mode
hcloud_server:
name: "{{ hcloud_server_name }}"
server_type: "cx21"
state: stopped
register: result
check_mode: true
- name: verify resize server with check mode
assert:
that:
- result is changed
- name: test resize server without disk
hcloud_server:
name: "{{ hcloud_server_name }}"
server_type: "cx21"
state: stopped
register: result
- name: verify resize server without disk
assert:
that:
- result is changed
- result.hcloud_server.server_type == "cx21"
- name: test resize server idempotence
hcloud_server:
name: "{{ hcloud_server_name }}"
server_type: "cx21"
state: stopped
register: result
- name: verify resize server idempotence
assert:
that:
- result is not changed
- name: test resize server to smaller plan
hcloud_server:
name: "{{ hcloud_server_name }}"
server_type: "cx11"
state: stopped
register: result
- name: verify resize server to smaller plan
assert:
that:
- result is changed
- result.hcloud_server.server_type == "cx11"
- name: test resize server with disk
hcloud_server:
name: "{{ hcloud_server_name }}"
server_type: "cx21"
upgrade_disk: true
state: stopped
register: result
- name: verify resize server with disk
assert:
that:
- result is changed
- result.hcloud_server.server_type == "cx21"
- name: test enable backups with check mode
hcloud_server:
name: "{{ hcloud_server_name }}"
backups: true
state: stopped
register: result
check_mode: true
- name: verify enable backups with check mode
assert:
that:
- result is changed
- name: test enable backups
hcloud_server:
name: "{{ hcloud_server_name }}"
backups: true
state: stopped
register: result
- name: verify enable backups
assert:
that:
- result is changed
- result.hcloud_server.backup_window != ""
- name: test enable backups idempotence
hcloud_server:
name: "{{ hcloud_server_name }}"
backups: true
state: stopped
register: result
- name: verify enable backups idempotence
assert:
that:
- result is not changed
- result.hcloud_server.backup_window != ""
- name: test rebuild server
hcloud_server:
name: "{{ hcloud_server_name }}"
image: ubuntu-18.04
state: rebuild
register: result_after_test
- name: verify rebuild server
assert:
that:
- result_after_test is changed
- result.hcloud_server.id == result_after_test.hcloud_server.id
- name: test rebuild server with check mode
hcloud_server:
name: "{{ hcloud_server_name }}"
image: ubuntu-18.04
state: rebuild
register: result_after_test
check_mode: true
- name: verify rebuild server with check mode
assert:
that:
- result_after_test is changed
- name: test update server protection booth protection arguments are required
hcloud_server:
name: "{{ hcloud_server_name }}"
delete_protection: true
state: present
register: result_after_test
ignore_errors: true
- name: verify update server protection booth protection arguments are required
assert:
that:
- result_after_test is failed
- 'result_after_test.msg == "parameters are required together: delete_protection, rebuild_protection"'
- name: test update server protection fails if they are not the same
hcloud_server:
name: "{{ hcloud_server_name }}"
delete_protection: true
rebuild_protection: false
state: present
register: result_after_test
ignore_errors: true
- name: verify update server protection fails if they are not the same
assert:
that:
- result_after_test is failed
- name: test update server protection
hcloud_server:
name: "{{ hcloud_server_name }}"
delete_protection: true
rebuild_protection: true
state: present
register: result_after_test
ignore_errors: true
- name: verify update server protection
assert:
that:
- result_after_test is changed
- result_after_test.hcloud_server.delete_protection is sameas true
- result_after_test.hcloud_server.rebuild_protection is sameas true
- name: test server without protection set to be idempotent
hcloud_server:
name: "{{hcloud_server_name}}"
register: result_after_test
- name: verify test server without protection set to be idempotent
assert:
that:
- result_after_test is not changed
- result_after_test.hcloud_server.delete_protection is sameas true
- result_after_test.hcloud_server.rebuild_protection is sameas true
- name: test delete server fails if it is protected
hcloud_server:
name: "{{hcloud_server_name}}"
state: absent
ignore_errors: yes
register: result
- name: verify delete server fails if it is protected
assert:
that:
- result is failed
- 'result.msg == "server deletion is protected"'
- name: test rebuild server fails if it is protected
hcloud_server:
name: "{{hcloud_server_name}}"
image: ubuntu-18.04
state: rebuild
ignore_errors: yes
register: result
- name: verify rebuild server fails if it is protected
assert:
that:
- result is failed
- 'result.msg == "server rebuild is protected"'
- name: test remove server protection
hcloud_server:
name: "{{ hcloud_server_name }}"
delete_protection: false
rebuild_protection: false
state: present
register: result_after_test
ignore_errors: true
- name: verify remove server protection
assert:
that:
- result_after_test is changed
- result_after_test.hcloud_server.delete_protection is sameas false
- result_after_test.hcloud_server.rebuild_protection is sameas false
- name: absent server
hcloud_server:
name: "{{ hcloud_server_name }}"
state: absent
register: result
- name: verify absent server
assert:
that:
- result is success
- name: test create server with ssh key
hcloud_server:
name: "{{ hcloud_server_name}}"
server_type: cx11
image: "ubuntu-18.04"
ssh_keys:
- ci@ansible.hetzner.cloud
state: started
register: main_server
- name: verify create server with ssh key
assert:
that:
- main_server is changed
- main_server.hcloud_server.name == "{{ hcloud_server_name }}"
- main_server.hcloud_server.server_type == "cx11"
- main_server.hcloud_server.status == "running"
- main_server.root_password != ""
- name: absent server
hcloud_server:
name: "{{ hcloud_server_name }}"
state: absent
register: result
- name: verify absent server
assert:
that:
- result is success
- name: test create server with rescue_mode
hcloud_server:
name: "{{ hcloud_server_name}}"
server_type: cx11
image: "ubuntu-18.04"
ssh_keys:
- ci@ansible.hetzner.cloud
rescue_mode: "linux64"
state: started
register: main_server
- name: verify create server with rescue_mode
assert:
that:
- main_server is changed
- main_server.hcloud_server.name == "{{ hcloud_server_name }}"
- main_server.hcloud_server.server_type == "cx11"
- main_server.hcloud_server.status == "running"
- main_server.root_password != ""
- main_server.hcloud_server.rescue_enabled is sameas true
- name: absent server
hcloud_server:
name: "{{ hcloud_server_name }}"
state: absent
register: result
- name: verify absent server
assert:
that:
- result is success
- name: setup server
hcloud_server:
name: "{{ hcloud_server_name}}"
server_type: cx11
image: ubuntu-18.04
state: started
register: main_server
- name: verify setup server
assert:
that:
- main_server is changed
- main_server.hcloud_server.name == "{{ hcloud_server_name }}"
- main_server.hcloud_server.server_type == "cx11"
- main_server.hcloud_server.status == "running"
- main_server.root_password != ""
- name: test activate rescue mode with check_mode
hcloud_server:
name: "{{ hcloud_server_name }}"
rescue_mode: "linux64"
ssh_keys:
- ci@ansible.hetzner.cloud
state: present
register: main_server
check_mode: true
- name: verify activate rescue mode
assert:
that:
- main_server is changed
- name: test activate rescue mode
hcloud_server:
name: "{{ hcloud_server_name }}"
rescue_mode: "linux64"
ssh_keys:
- ci@ansible.hetzner.cloud
state: present
register: main_server
- name: verify activate rescue mode
assert:
that:
- main_server is changed
- main_server.hcloud_server.rescue_enabled is sameas true
- name: test disable rescue mode
hcloud_server:
name: "{{ hcloud_server_name }}"
ssh_keys:
- ci@ansible.hetzner.cloud
state: present
register: main_server
- name: verify activate rescue mode
assert:
that:
- main_server is changed
- main_server.hcloud_server.rescue_enabled is sameas false
- name: test activate rescue mode without ssh keys
hcloud_server:
name: "{{ hcloud_server_name }}"
rescue_mode: "linux64"
state: present
register: main_server
- name: verify activate rescue mode without ssh keys
assert:
that:
- main_server is changed
- main_server.hcloud_server.rescue_enabled is sameas true
- name: cleanup
hcloud_server:
name: "{{ hcloud_server_name }}"
state: absent
register: result
- name: verify cleanup
assert:
that:
- result is success
- name: test create server with labels
hcloud_server:
name: "{{ hcloud_server_name}}"
server_type: cx11
image: "ubuntu-18.04"
ssh_keys:
- ci@ansible.hetzner.cloud
labels:
key: value
mylabel: "val123"
state: started
register: main_server
- name: verify create server with labels
assert:
that:
- main_server is changed
- main_server.hcloud_server.labels.key == "value"
- main_server.hcloud_server.labels.mylabel == "val123"
- name: test update server with labels
hcloud_server:
name: "{{ hcloud_server_name}}"
server_type: cx11
image: "ubuntu-18.04"
ssh_keys:
- ci@ansible.hetzner.cloud
labels:
key: other
mylabel: "val123"
state: started
register: main_server
- name: verify update server with labels
assert:
that:
- main_server is changed
- main_server.hcloud_server.labels.key == "other"
- main_server.hcloud_server.labels.mylabel == "val123"
- name: test update server with labels in other order
hcloud_server:
name: "{{ hcloud_server_name}}"
server_type: cx11
image: "ubuntu-18.04"
ssh_keys:
- ci@ansible.hetzner.cloud
labels:
mylabel: "val123"
key: other
state: started
register: main_server
- name: verify update server with labels in other order
assert:
that:
- main_server is not changed
- name: cleanup with labels
hcloud_server:
name: "{{ hcloud_server_name }}"
state: absent
register: result
- name: verify cleanup
assert:
that:
- result is success

@ -0,0 +1,11 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
vars:
template_name: "../templates/{{ template | default('inventory.yml') }}"
tasks:
- name: write inventory config file
copy:
dest: ../test.aws_ec2.yml
content: "{{ lookup('template', template_name) }}"

@ -0,0 +1,9 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: write inventory config file
copy:
dest: ../test.aws_ec2.yml
content: ""

@ -0,0 +1,64 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
# Create new host, add it to inventory and then terminate it without updating the cache
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -0,0 +1,62 @@
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: get image ID to create an instance
ec2_ami_info:
filters:
architecture: x86_64
owner-id: '125523088429'
virtualization-type: hvm
root-device-type: ebs
name: 'Fedora-Atomic-27*'
<<: *aws_connection_info
register: fedora_images
- set_fact:
image_id: '{{ fedora_images.images.0.image_id }}'
- name: create a VPC to work in
ec2_vpc_net:
cidr_block: 10.10.0.0/24
state: present
name: '{{ resource_prefix }}_setup'
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
register: setup_vpc
- set_fact:
vpc_id: '{{ setup_vpc.vpc.id }}'
- name: create a subnet to use for creating an ec2 instance
ec2_vpc_subnet:
az: '{{ aws_region }}a'
tags: '{{ resource_prefix }}_setup'
vpc_id: '{{ setup_vpc.vpc.id }}'
cidr: 10.10.0.0/24
state: present
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
register: setup_subnet
- set_fact:
subnet_id: '{{ setup_subnet.subnet.id }}'
- name: create a security group to use for creating an ec2 instance
ec2_group:
name: '{{ resource_prefix }}_setup'
description: 'created by Ansible integration tests'
state: present
vpc_id: '{{ setup_vpc.vpc.id }}'
<<: *aws_connection_info
register: setup_sg
- set_fact:
sg_id: '{{ setup_sg.group_id }}'

@ -0,0 +1,39 @@
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: remove setup security group
ec2_group:
name: '{{ resource_prefix }}_setup'
description: 'created by Ansible integration tests'
state: absent
vpc_id: '{{ vpc_id }}'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup subnet
ec2_vpc_subnet:
az: '{{ aws_region }}a'
tags: '{{ resource_prefix }}_setup'
vpc_id: '{{ vpc_id }}'
cidr: 10.10.0.0/24
state: absent
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup VPC
ec2_vpc_net:
cidr_block: 10.10.0.0/24
state: absent
name: '{{ resource_prefix }}_setup'
resource_tags:
Name: '{{ resource_prefix }}_setup'
<<: *aws_connection_info
ignore_errors: yes

@ -0,0 +1,9 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: assert inventory was not populated by aws_ec2 inventory plugin
assert:
that:
- "'aws_ec2' not in groups"

@ -0,0 +1,18 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
tasks:
- name: assert cache was used to populate inventory
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- meta: refresh_inventory
- name: assert refresh_inventory updated the cache
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"

@ -0,0 +1,91 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
# Create new host, refresh inventory, remove host, refresh inventory
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: assert group was populated with inventory and is no longer empty
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- "groups.aws_ec2.0 == '{{ resource_prefix }}'"
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
- meta: refresh_inventory
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -0,0 +1,79 @@
---
- hosts: 127.0.0.1
connection: local
gather_facts: no
environment: "{{ ansible_test.environment }}"
tasks:
- block:
# Create VPC, subnet, security group, and find image_id to create instance
- include_tasks: setup.yml
# Create new host, refresh inventory
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: '{{ aws_access_key }}'
aws_secret_key: '{{ aws_secret_key }}'
security_token: '{{ security_token }}'
region: '{{ aws_region }}'
no_log: yes
- name: create a new host
ec2:
image: '{{ image_id }}'
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
tag1: value1
tag2: value2
instance_type: t2.micro
wait: yes
group_id: '{{ sg_id }}'
vpc_subnet_id: '{{ subnet_id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: register the keyed sg group name
set_fact:
sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
- name: register one of the keyed tag groups name
set_fact:
tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
- name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
assert:
that:
# There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
- "groups | length == 9"
- "groups[tag_group_name] | length == 1"
- "groups[sg_group_name] | length == 1"
- "groups.arch_x86_64 | length == 1"
- "groups.tag_with_name_key | length == 1"
- vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: "{{ sg_id }}"
vpc_subnet_id: "{{ subnet_id }}"
<<: *aws_connection_info
ignore_errors: yes
when: setup_instance is defined
- include_tasks: tear_down.yml

@ -0,0 +1,74 @@
- name: test updating inventory
block:
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
- name: create a new host
ec2:
image: "{{ images[aws_region] }}"
exact_count: 1
count_tag:
Name: '{{ resource_prefix }}'
instance_tags:
Name: '{{ resource_prefix }}'
instance_type: t2.micro
wait: yes
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
register: setup_instance
- meta: refresh_inventory
- name: assert group was populated with inventory and is no longer empty
assert:
that:
- "'aws_ec2' in groups"
- "groups.aws_ec2 | length == 1"
- "groups.aws_ec2.0 == '{{ resource_prefix }}'"
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
- meta: refresh_inventory
- name: assert group was populated with inventory but is empty
assert:
that:
- "'aws_ec2' in groups"
- "not groups.aws_ec2"
always:
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
Name: '{{ resource_prefix }}'
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
ignore_errors: yes

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -eux
# ensure test config is empty
ansible-playbook playbooks/empty_inventory_config.yml "$@"
export ANSIBLE_INVENTORY_ENABLED=aws_ec2
# test with default inventory file
ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
export ANSIBLE_INVENTORY=test.aws_ec2.yml
# test empty inventory config
ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
# generate inventory config and test using it
ansible-playbook playbooks/create_inventory_config.yml "$@"
ansible-playbook playbooks/test_populating_inventory.yml "$@"
# generate inventory config with caching and test using it
ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml'" "$@"
ansible-playbook playbooks/populate_cache.yml "$@"
ansible-playbook playbooks/test_inventory_cache.yml "$@"
# remove inventory cache
rm -r aws_ec2_cache_dir/
# generate inventory config with constructed features and test using it
ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml'" "$@"
ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
# cleanup inventory config
ansible-playbook playbooks/empty_inventory_config.yml "$@"

@ -0,0 +1,12 @@
plugin: aws_ec2
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'
hostnames:
- tag:Name
- dns-name

@ -0,0 +1,12 @@
plugin: aws_ec2
cache: True
cache_plugin: jsonfile
cache_connection: aws_ec2_cache_dir
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'

@ -0,0 +1,20 @@
plugin: aws_ec2
aws_access_key_id: '{{ aws_access_key }}'
aws_secret_access_key: '{{ aws_secret_key }}'
aws_security_token: '{{ security_token }}'
regions:
- '{{ aws_region }}'
filters:
tag:Name:
- '{{ resource_prefix }}'
keyed_groups:
- key: 'security_groups|json_query("[].group_id")'
prefix: 'security_groups'
- key: 'tags'
prefix: 'tag'
- prefix: 'arch'
key: "architecture"
compose:
test_compose_var_sum: tags.tag1 + tags.tag2
groups:
tag_with_name_key: "'Name' in (tags | list)"

@ -0,0 +1,3 @@
shippable/cloud/incidental
cloud/foreman
destructive

@ -0,0 +1,5 @@
[defaults]
inventory = test-config.foreman.yaml
[inventory]
enable_plugins = foreman

@ -0,0 +1,31 @@
---
- hosts: localhost
vars:
foreman_stub_host: "{{ lookup('env', 'FOREMAN_HOST') }}"
foreman_stub_port: "{{ lookup('env', 'FOREMAN_PORT') }}"
foreman_stub_api_path: /api/v2
cached_hosts_key: "http://{{ foreman_stub_host }}:{{ foreman_stub_port }}{{ foreman_stub_api_path }}/hosts"
tasks:
- name: verify a cache file was created
find:
path:
- ./foreman_cache
register: matching_files
- assert:
that:
- matching_files.matched == 1
- name: read the cached inventory
set_fact:
contents: "{{ lookup('file', matching_files.files.0.path) }}"
- name: extract all the host names
set_fact:
cached_hosts: "{{ contents[cached_hosts_key] | json_query('[*].name') }}"
- assert:
that:
"'{{ item }}' in cached_hosts"
loop:
- "v6.example-780.com"
- "c4.j1.y5.example-487.com"

@ -0,0 +1,50 @@
#!/usr/bin/env bash
[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
set -euo pipefail
export ANSIBLE_INVENTORY
export ANSIBLE_PYTHON_INTERPRETER
unset ANSIBLE_INVENTORY
unset ANSIBLE_PYTHON_INTERPRETER
export ANSIBLE_CONFIG=ansible.cfg
export FOREMAN_HOST="${FOREMAN_HOST:-localhost}"
export FOREMAN_PORT="${FOREMAN_PORT:-8080}"
FOREMAN_CONFIG=test-config.foreman.yaml
# Set inventory caching environment variables to populate a jsonfile cache
export ANSIBLE_INVENTORY_CACHE=True
export ANSIBLE_INVENTORY_CACHE_PLUGIN=jsonfile
export ANSIBLE_INVENTORY_CACHE_CONNECTION=./foreman_cache
# flag for checking whether cleanup has already fired
_is_clean=
function _cleanup() {
[[ -n "$_is_clean" ]] && return # don't double-clean
echo Cleanup: removing $FOREMAN_CONFIG...
rm -vf "$FOREMAN_CONFIG"
unset ANSIBLE_CONFIG
unset FOREMAN_HOST
unset FOREMAN_PORT
unset FOREMAN_CONFIG
_is_clean=1
}
trap _cleanup INT TERM EXIT
cat > "$FOREMAN_CONFIG" <<FOREMAN_YAML
plugin: foreman
url: http://${FOREMAN_HOST}:${FOREMAN_PORT}
user: ansible-tester
password: secure
validate_certs: False
FOREMAN_YAML
ansible-playbook test_foreman_inventory.yml --connection=local "$@"
ansible-playbook inspect_cache.yml --connection=local "$@"
# remove inventory cache
rm -r ./foreman_cache

@ -0,0 +1,59 @@
---
- hosts: localhost
vars:
foreman_stub_host: "{{ lookup('env', 'FOREMAN_HOST') }}"
foreman_stub_port: "{{ lookup('env', 'FOREMAN_PORT') }}"
foreman_stub_api_path: /api/v2
foreman_stub_host_uri: "http://{{ foreman_stub_host }}:{{ foreman_stub_port }}"
foreman_stub_api_uri: "{{ foreman_stub_host_uri }}{{ foreman_stub_api_path }}"
foreman_stub_heartbeat_uri: "{{ foreman_stub_host_uri }}/ping"
tasks:
- debug:
msg: >-
Foreman host: {{ foreman_stub_host }} |
Foreman port: {{ foreman_stub_port }} |
API path: {{ foreman_stub_api_path }} |
Foreman API URL: {{ foreman_stub_api_uri }}
- name: Wait for Foreman API stub to come up online
wait_for:
host: "{{ foreman_stub_host }}"
port: "{{ foreman_stub_port }}"
state: started
# smoke test that flask app is serving
- name: Smoke test HTTP response from Foreman stub
uri:
url: "{{ foreman_stub_heartbeat_uri }}"
return_content: yes
register: heartbeat_resp
failed_when: >
heartbeat_resp.json.status != 'ok' or heartbeat_resp.json.response != 'pong'
#### Testing start
- name: >
Check that there are 'foreman_pgagne_sats' and 'foreman_base'
groups present in inventory
assert:
that: >
'{{ item }}' in groups
with_items:
- foreman_pgagne_sats
- foreman_base
- name: Check that host are in appropriate groups
assert:
that: >
'{{ item.key }}' in groups['{{ item.value }}']
with_dict:
v6.example-780.com: foreman_base
c4.j1.y5.example-487.com: ungrouped
- name: Check host UUIDs
assert:
that: >
hostvars['{{ item.key }}']['foreman_subscription_facet_attributes']['uuid'] == '{{ item.value }}'
with_dict:
v6.example-780.com: 2c72fa49-995a-4bbf-bda0-684c7048ad9f
c4.j1.y5.example-487.com: 0a494b6e-7e90-4ed2-8edc-43a41436a242
#### Testing end

@ -0,0 +1,3 @@
shippable/vcenter/incidental
cloud/vcenter
destructive

@ -0,0 +1,8 @@
[defaults]
inventory = test-config.vmware.yaml
[inventory]
enable_plugins = vmware_vm_inventory
cache = True
cache_plugin = jsonfile
cache_connection = inventory_cache

@ -0,0 +1,83 @@
#!/usr/bin/env bash
[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
set -euo pipefail
# Required to differentiate between Python 2 and 3 environ
PYTHON=${ANSIBLE_TEST_PYTHON_INTERPRETER:-python}
export ANSIBLE_CONFIG=ansible.cfg
export VMWARE_SERVER="${VCENTER_HOSTNAME}"
export VMWARE_USERNAME="${VCENTER_USERNAME}"
export VMWARE_PASSWORD="${VCENTER_PASSWORD}"
port=5000
VMWARE_CONFIG=test-config.vmware.yaml
inventory_cache="$(pwd)/inventory_cache"
cat > "$VMWARE_CONFIG" <<VMWARE_YAML
plugin: vmware_vm_inventory
strict: False
validate_certs: False
with_tags: False
VMWARE_YAML
cleanup() {
echo "Cleanup"
if [ -f "${VMWARE_CONFIG}" ]; then
rm -f "${VMWARE_CONFIG}"
fi
if [ -d "${inventory_cache}" ]; then
echo "Removing ${inventory_cache}"
rm -rf "${inventory_cache}"
fi
echo "Done"
exit 0
}
trap cleanup INT TERM EXIT
echo "DEBUG: Using ${VCENTER_HOSTNAME} with username ${VCENTER_USERNAME} and password ${VCENTER_PASSWORD}"
echo "Kill all previous instances"
curl "http://${VCENTER_HOSTNAME}:${port}/killall" > /dev/null 2>&1
echo "Start new VCSIM server"
curl "http://${VCENTER_HOSTNAME}:${port}/spawn?datacenter=1&cluster=1&folder=0" > /dev/null 2>&1
echo "Debugging new instances"
curl "http://${VCENTER_HOSTNAME}:${port}/govc_find"
# Get inventory
ansible-inventory -i ${VMWARE_CONFIG} --list
echo "Check if cache is working for inventory plugin"
if [ ! -n "$(find "${inventory_cache}" -maxdepth 1 -name 'vmware_vm_inventory_*' -print -quit)" ]; then
echo "Cache directory not found. Please debug"
exit 1
fi
echo "Cache is working"
# Get inventory using YAML
ansible-inventory -i ${VMWARE_CONFIG} --list --yaml
# Install TOML for --toml
${PYTHON} -m pip freeze | grep toml > /dev/null 2>&1
TOML_TEST_RESULT=$?
if [ $TOML_TEST_RESULT -ne 0 ]; then
echo "Installing TOML package"
${PYTHON} -m pip install toml
else
echo "TOML package already exists, skipping installation"
fi
# Get inventory using TOML
ansible-inventory -i ${VMWARE_CONFIG} --list --toml
TOML_INVENTORY_LIST_RESULT=$?
if [ $TOML_INVENTORY_LIST_RESULT -ne 0 ]; then
echo "Inventory plugin failed to list inventory host using --toml, please debug"
exit 1
fi
# Test playbook with given inventory
ansible-playbook -i ${VMWARE_CONFIG} test_vmware_vm_inventory.yml --connection=local "$@"

@ -0,0 +1,24 @@
# Test code for the vmware guest dynamic plugin module
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
- name: Test VMware Guest Dynamic Inventroy Plugin
hosts: localhost
tasks:
- name: store the vcenter container ip
set_fact:
vcsim: "{{ lookup('env', 'VCENTER_HOSTNAME') }}"
- name: Check that there are 'all' and 'otherGuest' groups present in inventory
assert:
that: "'{{ item }}' in {{ groups.keys() | list }}"
with_items:
- all
- otherGuest
- name: Check if Hostname and other details are populated in hostvars
assert:
that:
- hostvars[item].name is defined
with_items: "{{ groups['all'] }}"

@ -0,0 +1,23 @@
Wait tests
----------
wait tests require at least one node, and don't work on the normal k8s
openshift-origin container as provided by ansible-test --docker -v k8s
minikube, Kubernetes from Docker or any other Kubernetes service will
suffice.
If kubectl is already using the right config file and context, you can
just do
```
cd test/integration/targets/k8s
./runme.sh -vv
```
otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT`
and use the same command

@ -0,0 +1,2 @@
cloud/openshift
shippable/cloud/incidental

@ -0,0 +1,32 @@
recreate_crd_default_merge_expectation: recreate_crd is not failed
k8s_pod_metadata:
labels:
app: "{{ k8s_pod_name }}"
k8s_pod_spec:
containers:
- image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
name: "{{ k8s_pod_name }}"
command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
command:
- /bin/true
resources:
limits:
cpu: "100m"
memory: "100Mi"
ports: "{{ k8s_pod_ports }}"
k8s_pod_command: []
k8s_pod_ports: []
k8s_pod_template:
metadata: "{{ k8s_pod_metadata }}"
spec: "{{ k8s_pod_spec }}"
k8s_openshift: yes

@ -0,0 +1,20 @@
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: acme-crt
spec:
secretName: acme-crt-secret
dnsNames:
- foo.example.com
- bar.example.com
acme:
config:
- ingressClass: nginx
domains:
- foo.example.com
- bar.example.com
issuerRef:
name: letsencrypt-prod
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: Issuer

@ -0,0 +1,21 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: kuard
unwanted: value
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

@ -0,0 +1,20 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: kuard
name: kuard
namespace: default
spec:
replicas: hello
selector:
matchLabels:
app: kuard
template:
metadata:
labels:
app: kuard
spec:
containers:
- image: gcr.io/kuar-demo/kuard-amd64:1
name: kuard

@ -0,0 +1,14 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: certificates.certmanager.k8s.io
spec:
group: certmanager.k8s.io
version: v1alpha1
scope: Namespaced
names:
kind: Certificate
plural: certificates
shortNames:
- cert
- certs

@ -0,0 +1,2 @@
dependencies:
- setup_remote_tmp_dir

@ -0,0 +1,68 @@
- block:
- name: Ensure that append_hash namespace exists
k8s:
kind: Namespace
name: append-hash
- name: create k8s_resource variable
set_fact:
k8s_resource:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
- name: Create config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap1
- name: check configmap is created with a hash
assert:
that:
- k8s_configmap1 is changed
- k8s_configmap1.result.metadata.name != 'config-map-test'
- k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-'
- name: recreate same config map
k8s:
definition: "{{ k8s_resource }}"
append_hash: yes
register: k8s_configmap2
- name: check configmaps are different
assert:
that:
- k8s_configmap2 is not changed
- k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name
- name: add key to config map
k8s:
definition:
metadata:
name: config-map-test
namespace: append-hash
apiVersion: v1
kind: ConfigMap
data:
hello: world
another: value
append_hash: yes
register: k8s_configmap3
- name: check configmaps are different
assert:
that:
- k8s_configmap3 is changed
- k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name
always:
- name: ensure that namespace is removed
k8s:
kind: Namespace
name: append-hash
state: absent

@ -0,0 +1,277 @@
- block:
- python_requirements_info:
dependencies:
- openshift
- kubernetes
- set_fact:
apply_namespace: apply
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ apply_namespace }}"
- name: add a configmap
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap
- name: check configmap was created
assert:
that:
- k8s_configmap is changed
- k8s_configmap.result.metadata.annotations|default(False)
- name: add same configmap again
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2
- name: check nothing changed
assert:
that:
- k8s_configmap_2 is not changed
- name: add same configmap again with check mode on
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
two: "2"
three: "3"
apply: yes
check_mode: yes
register: k8s_configmap_check
- name: check nothing changed
assert:
that:
- k8s_configmap_check is not changed
- name: add same configmap again but using name and namespace args
k8s:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
definition:
kind: ConfigMap
apiVersion: v1
data:
one: "1"
two: "2"
three: "3"
apply: yes
register: k8s_configmap_2a
- name: check nothing changed
assert:
that:
- k8s_configmap_2a is not changed
- name: update configmap
k8s:
definition:
kind: ConfigMap
apiVersion: v1
metadata:
name: "apply-configmap"
namespace: "{{ apply_namespace }}"
data:
one: "1"
three: "3"
four: "4"
apply: yes
register: k8s_configmap_3
- name: ensure that configmap has been correctly updated
assert:
that:
- k8s_configmap_3 is changed
- "'four' in k8s_configmap_3.result.data"
- "'two' not in k8s_configmap_3.result.data"
- name: add a service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
type: NodePort
apply: yes
register: k8s_service
- name: add exactly same service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8080
targetPort: 8080
type: NodePort
apply: yes
register: k8s_service_2
- name: check nothing changed
assert:
that:
- k8s_service_2 is not changed
- name: change service ports
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_3
- name: check ports are correct
assert:
that:
- k8s_service_3 is changed
- k8s_service_3.result.spec.ports | length == 1
- k8s_service_3.result.spec.ports[0].port == 8081
- name: insert new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: mesh
port: 8080
targetPort: 8080
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_4
- name: check ports are correct
assert:
that:
- k8s_service_4 is changed
- k8s_service_4.result.spec.ports | length == 2
- k8s_service_4.result.spec.ports[0].port == 8080
- k8s_service_4.result.spec.ports[1].port == 8081
- name: remove new service port (check mode)
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
check_mode: yes
register: k8s_service_check
- name: check ports are correct
assert:
that:
- k8s_service_check is changed
- k8s_service_check.result.spec.ports | length == 1
- k8s_service_check.result.spec.ports[0].port == 8081
- name: remove new service port
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: apply-svc
namespace: "{{ apply_namespace }}"
spec:
selector:
app: whatever
ports:
- name: http
port: 8081
targetPort: 8081
type: NodePort
apply: yes
register: k8s_service_5
- name: check ports are correct
assert:
that:
- k8s_service_5 is changed
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ apply_namespace }}"
state: absent

@ -0,0 +1,71 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- block:
- name: Create a namespace
k8s:
name: crd
kind: Namespace
- name: install custom resource definitions
k8s:
definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
- name: pause 5 seconds to avoid race condition
pause:
seconds: 5
- name: create custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
apply: "{{ create_crd_with_apply | default(omit) }}"
register: create_crd
- name: patch custom resource definition
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
register: recreate_crd
ignore_errors: yes
- name: assert that recreating crd is as expected
assert:
that:
- recreate_crd_default_merge_expectation
- block:
- name: recreate custom resource definition with merge_type
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
merge_type: merge
namespace: crd
register: recreate_crd_with_merge
- name: recreate custom resource definition with merge_type list
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
merge_type:
- strategic-merge
- merge
namespace: crd
register: recreate_crd_with_merge_list
when: recreate_crd is successful
- name: remove crd
k8s:
definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
namespace: crd
state: absent
always:
- name: remove crd namespace
k8s:
kind: Namespace
name: crd
state: absent
ignore_errors: yes

@ -0,0 +1,101 @@
- name: ensure that there are actually some nodes
k8s_info:
kind: Node
register: nodes
- block:
- set_fact:
delete_namespace: delete
- name: ensure namespace exists
k8s:
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ delete_namespace }}"
- name: add a daemonset
k8s:
definition:
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: delete-daemonset
namespace: "{{ delete_namespace }}"
spec:
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
k8s_pod_name: delete-ds
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- name: check if pods exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_create
- name: assert that there are pods
assert:
that:
- pods_create.resources
- name: remove the daemonset
k8s:
kind: DaemonSet
name: delete-daemonset
namespace: "{{ delete_namespace }}"
state: absent
wait: yes
- name: show status of pods
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
- name: wait for background deletion
pause:
seconds: 30
- name: check if pods still exist
k8s_info:
namespace: "{{ delete_namespace }}"
kind: Pod
label_selectors:
- "app={{ k8s_pod_name }}"
vars:
k8s_pod_name: delete-ds
register: pods_delete
- name: assert that deleting the daemonset deleted the pods
assert:
that:
- not pods_delete.resources
always:
- name: remove namespace
k8s:
kind: Namespace
name: "{{ delete_namespace }}"
state: absent
when: (nodes.resources | length) > 0

@ -0,0 +1,375 @@
# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
# register: kubeconfig
# Kubernetes resources
- include_tasks: delete.yml
- include_tasks: apply.yml
- include_tasks: waiter.yml
- block:
- name: Create a namespace
k8s:
name: testing
kind: Namespace
register: output
- name: show output
debug:
var: output
- name: Setting validate_certs to true causes a failure
k8s:
name: testing
kind: Namespace
validate_certs: yes
ignore_errors: yes
register: output
- name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
assert:
that:
- output is failed
- name: k8s_info works with empty resources
k8s_info:
kind: Deployment
namespace: testing
api_version: extensions/v1beta1
register: k8s_info
- name: assert that k8s_info is in correct format
assert:
that:
- "'resources' in k8s_info"
- not k8s_info.resources
- name: Create a service
k8s:
state: present
resource_definition: &svc
apiVersion: v1
kind: Service
metadata:
name: web
namespace: testing
labels:
app: galaxy
service: web
spec:
selector:
app: galaxy
service: web
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000
register: output
- name: show output
debug:
var: output
- name: Create the service again
k8s:
state: present
resource_definition: *svc
register: output
- name: Service creation should be idempotent
assert:
that: not output.changed
- name: Create a ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: value
- name: Force update ConfigMap
k8s:
kind: ConfigMap
name: test-force-update
namespace: testing
definition:
data:
key: newvalue
force: yes
- name: Create PVC
k8s:
state: present
inline: &pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: elastic-volume
namespace: testing
spec:
resources:
requests:
storage: 5Gi
accessModes:
- ReadWriteOnce
- name: Show output
debug:
var: output
- name: Create the PVC again
k8s:
state: present
inline: *pvc
- name: PVC creation should be idempotent
assert:
that: not output.changed
- name: Create deployment
k8s:
state: present
inline: &deployment
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: elastic
labels:
app: galaxy
service: elastic
namespace: testing
spec:
template:
metadata:
labels:
app: galaxy
service: elastic
spec:
containers:
- name: elastic
volumeMounts:
- mountPath: /usr/share/elasticsearch/data
name: elastic-volume
command: ['elasticsearch']
image: 'ansible/galaxy-elasticsearch:2.4.6'
volumes:
- name: elastic-volume
persistentVolumeClaim:
claimName: elastic-volume
replicas: 1
strategy:
type: RollingUpdate
register: output
- name: Show output
debug:
var: output
- name: Create deployment again
k8s:
state: present
inline: *deployment
register: output
- name: Deployment creation should be idempotent
assert:
that: not output.changed
- debug:
var: k8s_openshift
- include: openshift.yml
when: k8s_openshift | bool
### Type tests
- name: Create a namespace from a string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing1
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing1
register: k8s_info_testing1
failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active"
- name: Create resources from a multidocument yaml string
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a multidocument yaml string
k8s:
state: absent
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing2
---
kind: Namespace
apiVersion: v1
metadata:
name: testing3
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing2
- testing3
register: k8s_namespaces
- name: Resources should not exist
assert:
that:
- not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_namespaces.results }}"
- name: Create resources from a list
k8s:
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- name: Lookup namespaces
k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_namespaces
- name: Resources should exist
assert:
that: item.resources[0].status.phase == 'Active'
loop: "{{ k8s_namespaces.results }}"
- name: Delete resources from a list
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- k8s_info:
api_version: v1
kind: Namespace
name: "{{ item }}"
loop:
- testing4
- testing5
register: k8s_info
- name: Resources are terminating if still in results
assert:
that: not item.resources or item.resources[0].status.phase == "Terminating"
loop: "{{ k8s_info.results }}"
- name: Create resources from a yaml string ending with ---
k8s:
definition: |+
---
kind: Namespace
apiVersion: v1
metadata:
name: testing6
---
- name: Namespace should exist
k8s_info:
kind: Namespace
api_version: v1
name: testing6
register: k8s_info_testing6
failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active"
- include_tasks: crd.yml
- include_tasks: lists.yml
- include_tasks: append_hash.yml
always:
- name: Delete all namespaces
k8s:
state: absent
definition:
- kind: Namespace
apiVersion: v1
metadata:
name: testing
- kind: Namespace
apiVersion: v1
metadata:
name: testing1
- kind: Namespace
apiVersion: v1
metadata:
name: testing2
- kind: Namespace
apiVersion: v1
metadata:
name: testing3
- kind: Namespace
apiVersion: v1
metadata:
name: testing4
- kind: Namespace
apiVersion: v1
metadata:
name: testing5
- kind: Namespace
apiVersion: v1
metadata:
name: testing6
ignore_errors: yes

@ -0,0 +1,140 @@
---
- name: Ensure testing1 namespace exists
k8s:
api_version: v1
kind: Namespace
name: testing1
- block:
- name: Create configmaps
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should exist
assert:
that: item.data.a is defined
with_items: '{{ cms.resources }}'
- name: Delete configmaps
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: ConfigMapList
items: '{{ configmaps }}'
- name: Get ConfigMaps
k8s_info:
api_version: v1
kind: ConfigMap
namespace: testing1
label_selectors:
- app=test
register: cms
- name: All three configmaps should not exist
assert:
that: not cms.resources
vars:
configmaps:
- metadata:
name: list-example-1
labels:
app: test
data:
a: first
- metadata:
name: list-example-2
labels:
app: test
data:
a: second
- metadata:
name: list-example-3
labels:
app: test
data:
a: third
- block:
- name: Create list of arbitrary resources
k8s:
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the created resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: All resources should exist
assert:
that: ((list_resources.results | sum(attribute="resources", start=[])) | length) == (resources | length)
- name: Delete list of arbitrary resources
k8s:
state: absent
namespace: testing1
definition:
apiVersion: v1
kind: List
namespace: testing1
items: '{{ resources }}'
- name: Get the resources
k8s_info:
api_version: '{{ item.apiVersion }}'
kind: '{{ item.kind }}'
namespace: testing1
name: '{{ item.metadata.name }}'
register: list_resources
with_items: '{{ resources }}'
- name: The resources should not exist
assert:
that: not ((list_resources.results | sum(attribute="resources", start=[])) | length)
vars:
resources:
- apiVersion: v1
kind: ConfigMap
metadata:
name: list-example-4
data:
key: value
- apiVersion: v1
kind: Service
metadata:
name: list-example-svc
labels:
app: test
spec:
selector:
app: test
ports:
- protocol: TCP
targetPort: 8000
name: port-8000-tcp
port: 8000

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save