Add mode, owner_id and group_id options to one_vm (#40217)

Add `mode` option which sets permission mode of a VM in octet format
Add `owner_id` and `group_id` which set the ownership of a VM
Move the waiting for state at the end of the module, so it could fail faster if there is some error
tagged_instances will only be returned only if count_attributes and/or count_labels are used, as specified in the documentation
Update relevant tests
Add tests for mode, owner_id, group_id
pull/40697/head
Milan Ilic 7 years ago committed by René Moser
parent dec392793b
commit 14f0fd9ab3

@ -127,6 +127,15 @@ options:
- C(count_labels) parameters should be deployed. Instances are either - C(count_labels) parameters should be deployed. Instances are either
- created or terminated based on this value. - created or terminated based on this value.
- NOTE':' Instances with the least IDs will be terminated first. - NOTE':' Instances with the least IDs will be terminated first.
mode:
description:
- Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
owner_id:
description:
- ID of the user which will be set as the owner of the instance
group_id:
description:
- ID of the group which will be set as the group of the instance
memory: memory:
description: description:
- The size of the memory for new instances (in MB, GB, ...) - The size of the memory for new instances (in MB, GB, ...)
@ -175,6 +184,17 @@ EXAMPLES = '''
attributes: attributes:
name: foo name: foo
# Deploy a new VM and set its group_id and mode
- one_vm:
template_id: 90
group_id: 16
mode: 660
# Change VM's permissions to 640
- one_vm:
instance_ids: 5
mode: 640
# Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks # Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks
- one_vm: - one_vm:
template_id: 15 template_id: 15
@ -356,14 +376,19 @@ instances:
description: vm's group name description: vm's group name
type: string type: string
sample: one-users sample: one-users
user_id: owner_id:
description: vm's user id description: vm's owner id
type: integer type: integer
sample: 143 sample: 143
user_name: owner_name:
description: vm's user name description: vm's owner name
type: string type: string
sample: app-user sample: app-user
mode:
description: vm's mode
type: string
returned: success
sample: 660
state: state:
description: state of an instance description: state of an instance
type: string type: string
@ -453,14 +478,19 @@ tagged_instances:
description: vm's group name description: vm's group name
type: string type: string
sample: one-users sample: one-users
user_id: owner_id:
description: vm's user id description: vm's user id
type: integer type: integer
sample: 143 sample: 143
user_name: owner_name:
description: vm's user name description: vm's user name
type: string type: string
sample: app-user sample: app-user
mode:
description: vm's mode
type: string
returned: success
sample: 660
state: state:
description: state of an instance description: state of an instance
type: string type: string
@ -620,6 +650,8 @@ def get_vm_info(client, vm):
vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
vm_uptime /= (60 * 60) vm_uptime /= (60 * 60)
permissions_str = parse_vm_permissions(client, vm)
# LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
vm_lcm_state = None vm_lcm_state = None
if vm.state == VM_STATES.index('ACTIVE'): if vm.state == VM_STATES.index('ACTIVE'):
@ -632,8 +664,8 @@ def get_vm_info(client, vm):
'vm_name': vm.name, 'vm_name': vm.name,
'state': VM_STATES[vm.state], 'state': VM_STATES[vm.state],
'lcm_state': vm_lcm_state, 'lcm_state': vm_lcm_state,
'user_name': vm.uname, 'owner_name': vm.uname,
'user_id': vm.uid, 'owner_id': vm.uid,
'networks': networks_info, 'networks': networks_info,
'disk_size': disk_size, 'disk_size': disk_size,
'memory': vm.template.memory + ' MB', 'memory': vm.template.memory + ' MB',
@ -643,12 +675,93 @@ def get_vm_info(client, vm):
'group_id': vm.gid, 'group_id': vm.gid,
'uptime_h': int(vm_uptime), 'uptime_h': int(vm_uptime),
'attributes': vm_attributes, 'attributes': vm_attributes,
'mode': permissions_str,
'labels': vm_labels 'labels': vm_labels
} }
return info return info
def parse_vm_permissions(client, vm):
import xml.etree.ElementTree as ET
vm_XML = client.call('vm.info', vm.id)
root = ET.fromstring(vm_XML)
perm_dict = {}
root = root.find('PERMISSIONS')
for child in root:
perm_dict[child.tag] = child.text
'''
This is the structure of the 'PERMISSIONS' dictionary:
"PERMISSIONS": {
"OWNER_U": "1",
"OWNER_M": "1",
"OWNER_A": "0",
"GROUP_U": "0",
"GROUP_M": "0",
"GROUP_A": "0",
"OTHER_U": "0",
"OTHER_M": "0",
"OTHER_A": "0"
}
'''
owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"])
group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"])
other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"])
permissions = str(owner_octal) + str(group_octal) + str(other_octal)
return permissions
def set_vm_permissions(module, client, vms, permissions):
changed = False
for vm in vms:
vm.info()
print(vm.id)
old_permissions = parse_vm_permissions(client, vm)
changed = changed or old_permissions != permissions
if not module.check_mode and old_permissions != permissions:
permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
mode_bits = [int(d) for d in permissions_str]
try:
client.call('vm.chmod', vm.id, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3],
mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
except oca.OpenNebulaException:
module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
return changed
def set_vm_ownership(module, client, vms, owner_id, group_id):
changed = False
for vm in vms:
vm.info()
if owner_id is None:
owner_id = vm.uid
if group_id is None:
group_id = vm.gid
changed = changed or owner_id != vm.uid or group_id != vm.gid
if not module.check_mode and (owner_id != vm.uid or group_id != vm.gid):
try:
client.call('vm.chown', vm.id, owner_id, group_id)
except oca.OpenNebulaException:
module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
return changed
def get_size_in_MB(module, size_str): def get_size_in_MB(module, size_str):
SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB'] SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
@ -831,15 +944,13 @@ def get_all_vms_by_attributes(client, attributes_dict, labels_list):
def create_count_of_vms(module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout): def create_count_of_vms(module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout):
new_vms_list = [] new_vms_list = []
instances_ids = []
instances = []
vm_name = '' vm_name = ''
if attributes_dict: if attributes_dict:
vm_name = attributes_dict.get('NAME', '') vm_name = attributes_dict.get('NAME', '')
if module.check_mode: if module.check_mode:
return {'changed': True} return True, [], []
# Create list of used indexes # Create list of used indexes
vm_filled_indexes_list = None vm_filled_indexes_list = None
@ -870,12 +981,7 @@ def create_count_of_vms(module, client, template_id, count, attributes_dict, lab
for vm in new_vms_list: for vm in new_vms_list:
wait_for_running(module, vm, wait_timeout) wait_for_running(module, vm, wait_timeout)
for vm in new_vms_list: return True, new_vms_list, []
vm_info = get_vm_info(client, vm)
instances.append(vm_info)
instances_ids.append(vm.id)
return {'changed': True, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': instances}
def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict, def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
@ -886,23 +992,19 @@ def create_exact_count_of_vms(module, client, template_id, exact_count, attribut
vm_count_diff = exact_count - len(vm_list) vm_count_diff = exact_count - len(vm_list)
changed = vm_count_diff != 0 changed = vm_count_diff != 0
result = {}
new_vms_list = [] new_vms_list = []
instances_ids = [] instances_list = []
instances = [] tagged_instances_list = vm_list
tagged_instances = list(get_vm_info(client, vm) for vm in vm_list)
if module.check_mode: if module.check_mode:
return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} return changed, instances_list, tagged_instances_list
if vm_count_diff > 0: if vm_count_diff > 0:
# Add more VMs # Add more VMs
result = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
labels_list, disk_size, network_attrs_list, wait, wait_timeout) labels_list, disk_size, network_attrs_list, wait, wait_timeout)
result['tagged_instances'] += tagged_instances tagged_instances_list += instances_list
return result
elif vm_count_diff < 0: elif vm_count_diff < 0:
# Delete surplus VMs # Delete surplus VMs
old_vms_list = [] old_vms_list = []
@ -917,13 +1019,12 @@ def create_exact_count_of_vms(module, client, template_id, exact_count, attribut
for vm in old_vms_list: for vm in old_vms_list:
wait_for_done(module, vm, wait_timeout) wait_for_done(module, vm, wait_timeout)
for vm in old_vms_list: instances_list = old_vms_list
vm_info = get_vm_info(client, vm) # store only the remaining instances
instances.append(vm_info) old_vms_set = set(old_vms_list)
instances_ids.append(vm.id) tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set]
tagged_instances[:] = [dct for dct in tagged_instances if dct.get('vm_id') != vm.id]
return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances} return changed, instances_list, tagged_instances_list
VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE'] VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE']
LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP', LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
@ -983,28 +1084,13 @@ def terminate_vm(module, client, vm, hard=False):
return changed return changed
def terminate_vms(module, client, vms, wait, wait_timeout, hard, tagged): def terminate_vms(module, client, vms, hard):
changed = False changed = False
instances_ids = []
instances = []
if tagged:
module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
for vm in vms: for vm in vms:
changed = terminate_vm(module, client, vm, hard) or changed changed = terminate_vm(module, client, vm, hard) or changed
if wait and not module.check_mode: return changed
for vm in vms:
if vm is not None:
wait_for_done(module, vm, wait_timeout)
for vm in vms:
if vm is not None:
instances_ids.append(vm.id)
instances.append(get_vm_info(client, vm))
return {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': []}
def poweroff_vm(module, vm, hard): def poweroff_vm(module, vm, hard):
@ -1026,32 +1112,16 @@ def poweroff_vm(module, vm, hard):
return changed return changed
def poweroff_vms(module, client, vms, wait, wait_timeout, hard, tagged): def poweroff_vms(module, client, vms, hard):
instances_ids = []
instances = []
tagged_instances = []
changed = False changed = False
for vm in vms: for vm in vms:
changed = poweroff_vm(module, vm, hard) or changed changed = poweroff_vm(module, vm, hard) or changed
if wait and not module.check_mode: return changed
for vm in vms:
wait_for_poweroff(module, vm, wait_timeout)
for vm in vms:
instances_ids.append(vm.id)
instances.append(get_vm_info(client, vm))
if tagged:
tagged_instances.append(get_vm_info(client, vm))
return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances}
def reboot_vms(module, client, vms, wait, wait_timeout, hard, tagged): def reboot_vms(module, client, vms, wait_timeout, hard):
instances_ids = []
instances = []
tagged_instances = []
if not module.check_mode: if not module.check_mode:
# Firstly, power-off all instances # Firstly, power-off all instances
@ -1069,17 +1139,7 @@ def reboot_vms(module, client, vms, wait, wait_timeout, hard, tagged):
for vm in vms: for vm in vms:
resume_vm(module, vm) resume_vm(module, vm)
if wait: return True
for vm in vms:
wait_for_running(module, vm, wait_timeout)
for vm in vms:
instances_ids.append(vm.id)
instances.append(get_vm_info(client, vm))
if tagged:
tagged_instances.append(get_vm_info(client, vm))
return {'changed': True, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances}
def resume_vm(module, vm): def resume_vm(module, vm):
@ -1099,27 +1159,13 @@ def resume_vm(module, vm):
return changed return changed
def resume_vms(module, client, vms, wait, wait_timeout, tagged): def resume_vms(module, client, vms):
instances_ids = []
instances = []
tagged_instances = []
changed = False changed = False
for vm in vms: for vm in vms:
changed = resume_vm(module, vm) or changed changed = resume_vm(module, vm) or changed
if wait and changed and not module.check_mode: return changed
for vm in vms:
wait_for_running(module, vm, wait_timeout)
for vm in vms:
instances_ids.append(vm.id)
instances.append(get_vm_info(client, vm))
if tagged:
tagged_instances.append(get_vm_info(client, vm))
return {'changed': changed, 'instances_ids': instances_ids, 'instances': instances, 'tagged_instances': tagged_instances}
def check_name_attribute(module, attributes): def check_name_attribute(module, attributes):
@ -1153,7 +1199,7 @@ def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
if vm.state != VM_STATES.index('POWEROFF'): if vm.state != VM_STATES.index('POWEROFF'):
module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
client.call('vm.disksaveas', vm.id, disk_id, image_name, 'OS', -1) client.call('vm.disksaveas', vm.id, disk_id, image_name, 'OS', -1)
wait_for_poweroff(module, vm, wait_timeout) wait_for_poweroff(module, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
def get_connection_info(module): def get_connection_info(module):
@ -1193,6 +1239,9 @@ def main():
"choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'], "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'],
"type": "str" "type": "str"
}, },
"mode": {"required": False, "type": "str"},
"owner_id": {"required": False, "type": "int"},
"group_id": {"required": False, "type": "int"},
"wait": {"default": True, "type": "bool"}, "wait": {"default": True, "type": "bool"},
"wait_timeout": {"default": 300, "type": "int"}, "wait_timeout": {"default": 300, "type": "int"},
"hard": {"default": False, "type": "bool"}, "hard": {"default": False, "type": "bool"},
@ -1213,6 +1262,7 @@ def main():
module = AnsibleModule(argument_spec=fields, module = AnsibleModule(argument_spec=fields,
mutually_exclusive=[ mutually_exclusive=[
['template_id', 'template_name', 'instance_ids'], ['template_id', 'template_name', 'instance_ids'],
['template_id', 'template_name', 'disk_saveas'],
['instance_ids', 'count_attributes', 'count'], ['instance_ids', 'count_attributes', 'count'],
['instance_ids', 'count_labels', 'count'], ['instance_ids', 'count_labels', 'count'],
['instance_ids', 'exact_count'], ['instance_ids', 'exact_count'],
@ -1237,6 +1287,9 @@ def main():
requested_template_name = params.get('template_name') requested_template_name = params.get('template_name')
requested_template_id = params.get('template_id') requested_template_id = params.get('template_id')
state = params.get('state') state = params.get('state')
permissions = params.get('mode')
owner_id = params.get('owner_id')
group_id = params.get('group_id')
wait = params.get('wait') wait = params.get('wait')
wait_timeout = params.get('wait_timeout') wait_timeout = params.get('wait_timeout')
hard = params.get('hard') hard = params.get('hard')
@ -1305,13 +1358,24 @@ def main():
if count <= 0: if count <= 0:
module.fail_json(msg='`count` has to be grater than 0') module.fail_json(msg='`count` has to be grater than 0')
if permissions is not None:
import re
if re.match("^[0-7]{3}$", permissions) is None:
module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600")
if exact_count is not None: if exact_count is not None:
# Deploy an exact count of VMs # Deploy an exact count of VMs
result = create_exact_count_of_vms(module, client, template_id, exact_count, attributes, count_attributes, changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, client, template_id, exact_count, attributes,
labels, count_labels, disk_size, networks, hard, wait, wait_timeout) count_attributes, labels, count_labels, disk_size,
networks, hard, wait, wait_timeout)
vms = tagged_instances_list
elif template_id and state == 'present': elif template_id and state == 'present':
# Deploy count VMs # Deploy count VMs
result = create_count_of_vms(module, client, template_id, count, attributes, labels, disk_size, networks, wait, wait_timeout) changed, instances_list, tagged_instances_list = create_count_of_vms(module, client, template_id, count,
attributes, labels, disk_size, networks, wait, wait_timeout)
# instances_list - new instances
# tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
vms = instances_list
else: else:
# Fetch data of instances, or change their state # Fetch data of instances, or change their state
if not (instance_ids or attributes or labels): if not (instance_ids or attributes or labels):
@ -1323,8 +1387,9 @@ def main():
if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']: if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']:
module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'") module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'")
vms = None vms = []
tagged = False tagged = False
changed = False
if instance_ids: if instance_ids:
vms = get_vms_by_ids(module, client, state, instance_ids) vms = get_vms_by_ids(module, client, state, instance_ids)
@ -1332,34 +1397,57 @@ def main():
tagged = True tagged = True
vms = get_all_vms_by_attributes(client, attributes, labels) vms = get_all_vms_by_attributes(client, attributes, labels)
instances = list(get_vm_info(client, vm) for vm in vms if vm is not None)
instances_ids = list(vm.id for vm in vms if vm is not None)
if tagged:
result = {'changed': False, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': instances}
else:
result = {'changed': False, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': []}
if len(vms) == 0 and state != 'absent' and state != 'present': if len(vms) == 0 and state != 'absent' and state != 'present':
module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
if len(vms) == 0 and state == 'present' and not tagged: if len(vms) == 0 and state == 'present' and not tagged:
module.fail_json(msg='There are no instances with specified `instance_ids`.') module.fail_json(msg='There are no instances with specified `instance_ids`.')
if tagged and state == 'absent':
module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
if state == 'absent': if state == 'absent':
result = terminate_vms(module, client, vms, wait, wait_timeout, hard, tagged) changed = terminate_vms(module, client, vms, hard)
elif state == 'rebooted': elif state == 'rebooted':
result = reboot_vms(module, client, vms, wait, wait_timeout, hard, tagged) changed = reboot_vms(module, client, vms, wait_timeout, hard)
elif state == 'poweredoff': elif state == 'poweredoff':
result = poweroff_vms(module, client, vms, wait, wait_timeout, hard, tagged) changed = poweroff_vms(module, client, vms, hard)
elif state == 'running': elif state == 'running':
result = resume_vms(module, client, vms, wait, wait_timeout, tagged) changed = resume_vms(module, client, vms)
instances_list = vms
tagged_instances_list = []
if permissions is not None:
changed = set_vm_permissions(module, client, vms, permissions) or changed
if owner_id is not None or group_id is not None:
changed = set_vm_ownership(module, client, vms, owner_id, group_id) or changed
if wait and not module.check_mode and state != 'present':
wait_for = {
'absent': wait_for_done,
'rebooted': wait_for_running,
'poweredoff': wait_for_poweroff,
'running': wait_for_running
}
for vm in vms:
if vm is not None:
wait_for[state](module, vm, wait_timeout)
if disk_saveas is not None: if disk_saveas is not None:
if len(vms) == 0: if len(vms) == 0:
module.fail_json(msg="There is no VM whose disk will be saved.") module.fail_json(msg="There is no VM whose disk will be saved.")
disk_save_as(module, client, vms[0], disk_saveas, wait_timeout) disk_save_as(module, client, vms[0], disk_saveas, wait_timeout)
result['changed'] = True changed = True
# instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
instances = list(get_vm_info(client, vm) for vm in instances_list if vm is not None)
instances_ids = list(vm.id for vm in instances_list if vm is not None)
# tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
tagged_instances = list(get_vm_info(client, vm) for vm in tagged_instances_list if vm is not None)
result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}
module.exit_json(**result) module.exit_json(**result)

@ -382,9 +382,9 @@
- deploy_vms_with_count is changed - deploy_vms_with_count is changed
- deploy_vms_with_count.instances_ids|length == 2 - deploy_vms_with_count.instances_ids|length == 2
- deploy_vms_with_count.instances|length == 2 - deploy_vms_with_count.instances|length == 2
- deploy_vms_with_count.tagged_instances|length == 2 - deploy_vms_with_count.tagged_instances|length == 0
- deploy_vms_with_count.tagged_instances[0].vm_name == "aero" - deploy_vms_with_count.instances[0].vm_name == "aero"
- deploy_vms_with_count.tagged_instances[1].vm_name == "aero" - deploy_vms_with_count.instances[1].vm_name == "aero"
- name: Deploy 2 VMs with attributes to check it is not idempotent - name: Deploy 2 VMs with attributes to check it is not idempotent
one_vm: one_vm:
@ -484,7 +484,7 @@
assert: assert:
that: that:
- not all_aero_vms_with_hash is changed - not all_aero_vms_with_hash is changed
- all_aero_vms_with_hash.tagged_instances|length == 3 - all_aero_vms_with_hash.instances|length == 3
- name: Decrement count of 'aero-#' instances - name: Decrement count of 'aero-#' instances
one_vm: one_vm:
@ -514,7 +514,7 @@
- new_vm_with_hash is changed - new_vm_with_hash is changed
- new_vm_with_hash.instances_ids|length == 1 - new_vm_with_hash.instances_ids|length == 1
- new_vm_with_hash.instances|length == 1 - new_vm_with_hash.instances|length == 1
- new_vm_with_hash.tagged_instances|length == 1 - new_vm_with_hash.tagged_instances|length == 0
- new_vm_with_hash.instances[0].vm_name|regex_replace('(\d+)$','\1')|int == 0 - new_vm_with_hash.instances[0].vm_name|regex_replace('(\d+)$','\1')|int == 0
always: always:
@ -626,7 +626,6 @@
assert: assert:
that: that:
- not vm_with_special_label is changed - not vm_with_special_label is changed
- vm_with_special_label.tagged_instances|length == 1
- vm_with_special_label.instances_ids|length == 1 - vm_with_special_label.instances_ids|length == 1
- vm_with_special_label.instances_ids[0] == new_vm_with_label.instances_ids[0] - vm_with_special_label.instances_ids[0] == new_vm_with_label.instances_ids[0]
@ -691,7 +690,7 @@
- name: Check there are 4 VMs with 'foo_app' key - name: Check there are 4 VMs with 'foo_app' key
assert: assert:
that: that:
- all_foo_app_vms.tagged_instances|length == 4 - all_foo_app_vms.instances|length == 4
- name: Decrement count of VMs with 'foo_app' key - name: Decrement count of VMs with 'foo_app' key
one_vm: one_vm:
@ -809,6 +808,222 @@
that: that:
- restricted_attributes.msg == "Restricted attribute `DISK` cannot be used when filtering VMs." - restricted_attributes.msg == "Restricted attribute `DISK` cannot be used when filtering VMs."
- block:
- name: Deploy VM and set its mode
one_vm:
template_id: '{{ one_template_id }}'
mode: 640
register: deployed_vm
- name: Check if mode is set correctly
assert:
that:
- deployed_vm is changed
- deployed_vm.instances|length == 1
- deployed_vm.instances[0].mode == "640"
- name: Set VM permissions to 660
one_vm:
instance_ids: '{{ deployed_vm.instances_ids }}'
mode: 660
register: deployed_vm
- name: Check if mode is set correctly
assert:
that:
- deployed_vm is changed
- deployed_vm.instances|length == 1
- deployed_vm.instances[0].mode == "660"
- name: Set 660 permissions againt to check idempotence
one_vm:
instance_ids: '{{ deployed_vm.instances_ids[0] }}'
mode: 660
register: chmod_idempotent
- name: Check if chmod is idempotent
assert:
that:
- chmod_idempotent is not changed
msg: 'Permissions changing is not idempotent'
- name: Try to set permissions incorectly
one_vm:
instance_ids: '{{ deployed_vm.instances_ids[0] }}'
mode: 8983
register: chmod_failed
failed_when: not chmod_failed is failed
- name: Try to set permissions incorectly
one_vm:
instance_ids: '{{ deployed_vm.instances_ids[0] }}'
mode: 64a
register: chmod_failed
failed_when: not chmod_failed is failed
- name: Set 664 permissions
one_vm:
instance_ids: '{{ deployed_vm.instances_ids[0] }}'
mode: 664
register: vm_chmod
- name: Verify permissions changing
assert:
that:
- vm_chmod is changed
- vm_chmod.instances|length == 1
- vm_chmod.instances[0].mode == "664"
msg: 'Permissions changing is failed'
- name: Deploy 2 VMs with label 'test-mode' and mode 640
one_vm:
template_id: '{{ one_template_id }}'
count_labels:
- test-mode
exact_count: 2
mode: 640
register: deployed_vm2
- name: Verify VMs permissions
assert:
that:
- deployed_vm2 is changed
- deployed_vm2.instances|length == 2
- deployed_vm2.instances[0].mode == "640"
- deployed_vm2.instances[1].mode == "640"
- name: Change permissions of first VM
one_vm:
instance_ids: '{{ deployed_vm2.instances_ids[0] }}'
mode: 644
register: chmod_vm1
- name: Verify VM permissions
assert:
that:
- chmod_vm1 is changed
- chmod_vm1.instances|length == 1
- chmod_vm1.instances[0].mode == "644"
- name: Change permissions on both VMs
one_vm:
instance_ids: '{{ deployed_vm2.instances_ids }}'
mode: 644
register: deployed_vm2
- name: Verify VMs permissions
assert:
that:
- deployed_vm2 is changed
- deployed_vm2.instances|length == 2
- deployed_vm2.instances[0].mode == "644"
- deployed_vm2.instances[1].mode == "644"
- name: Change VMs permissions using the label
one_vm:
labels:
- test-mode
mode: 664
register: label_chmod
- name: Verify VMs permissions
assert:
that:
- label_chmod is changed
- label_chmod.instances|length == 2
- label_chmod.instances[0].mode == "664"
- label_chmod.instances[1].mode == "664"
- name: Deploy 2 more VMs with label 'test-mode' and mode 640
one_vm:
template_id: '{{ one_template_id }}'
count_labels:
- test-mode
exact_count: 4
mode: 640
register: deployed_vm4
- name: Verify VMs permissions
assert:
that:
- deployed_vm4 is changed
- deployed_vm4.tagged_instances|length == 4
- deployed_vm4.tagged_instances[0].mode == "640"
- deployed_vm4.tagged_instances[1].mode == "640"
- deployed_vm4.tagged_instances[2].mode == "640"
- deployed_vm4.tagged_instances[3].mode == "640"
- name: Terminate 2 VMs with label 'test-mode' and set mode 660 on remaining VMs
one_vm:
template_id: '{{ one_template_id }}'
count_labels:
- test-mode
exact_count: 2
mode: 660
register: terminate_vm4
- name: Verify VMs permissions
assert:
that:
- terminate_vm4 is changed
- terminate_vm4.instances|length == 2 # 2 removed
- terminate_vm4.tagged_instances|length == 2 # 2 remaining with label test-mode
- terminate_vm4.instances[0].mode == "640"
- terminate_vm4.instances[1].mode == "640"
- terminate_vm4.tagged_instances[0].mode == "660"
- terminate_vm4.tagged_instances[1].mode == "660"
always:
- name: Delete VM
one_vm:
instance_ids: '{{ deployed_vm.instances_ids }}'
state: absent
hard: yes
- name: Delete VMs
one_vm:
instance_ids: '{{ deployed_vm4.instances_ids }}'
state: absent
hard: yes
tags: test-chmod
- block:
- name: Deploy VM
one_vm:
template_id: '{{ one_template_id }}'
register: deployed_vm
- name: Change VM's group
one_vm:
instance_ids: '{{ deployed_vm.instances_ids }}'
group_id: 1
register: changed_group
- name: Verify group changing
assert:
that:
- deployed_vm is changed
- changed_group is changed
- deployed_vm.instances|length == 1
- changed_group.instances|length == 1
- changed_group.instances[0].owner_id == deployed_vm.instances[0].owner_id
- changed_group.instances[0].group_id != deployed_vm.instances[0].group_id
- name: Try to set non-existent group
one_vm:
instance_ids: '{{ deployed_vm.instances_ids }}'
group_id: -999
register: changed_group
failed_when: changed_group is not failed
always:
- name: Delete VM
one_vm:
instance_ids: '{{ deployed_vm.instances_ids }}'
state: absent
hard: yes
tags: test-chown
- name: Test images creation - name: Test images creation
block: block:
- name: Set fact image name - name: Set fact image name

Loading…
Cancel
Save