one_vm: moved to pyone added Persisten Deployment (#57393)

* moved to pyone added Persisten Deployment

Moved from python-oca to pyone.
Added Persistent deployment of VMs.

* Cleanup fixed missing whitespace

* cleanup whitesüace and indent

* corrected Versions and fixed disk count in error msg

* inc version

* wrong version for vm_start_on_hold

* added datastore for new instances

* added multiple Disks

* fixed missing info
pull/57393/merge
Jan Meerkamp 5 years ago committed by ansibot
parent 7448084858
commit 94c23136be

@ -7,6 +7,7 @@ __metaclass__ = type
""" """
(c) 2017, Milan Ilic <milani@nordeus.com> (c) 2017, Milan Ilic <milani@nordeus.com>
(c) 2019, Jan Meerkamp <meerkamp@dvv.de>
This file is part of Ansible This file is part of Ansible
@ -36,7 +37,7 @@ description:
- Manages OpenNebula instances - Manages OpenNebula instances
version_added: "2.6" version_added: "2.6"
requirements: requirements:
- python-oca - pyone
options: options:
api_url: api_url:
description: description:
@ -151,8 +152,8 @@ options:
disk_size: disk_size:
description: description:
- The size of the disk created for new instances (in MB, GB, TB,...). - The size of the disk created for new instances (in MB, GB, TB,...).
- NOTE':' This option can be used only if the VM template specified with - NOTE':' If The Template hats Mutiple Disks the Order of the Sizes is
- C(template_id)/C(template_name) has exactly one disk. - matched against the order specified in C(template_id)/C(template_name).
cpu: cpu:
description: description:
- Percentage of CPU divided by 100 required for the new instance. Half a - Percentage of CPU divided by 100 required for the new instance. Half a
@ -172,8 +173,23 @@ options:
- I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed) - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
- and the VM has to be in the C(poweredoff) state. - and the VM has to be in the C(poweredoff) state.
- Also this operation will fail if an image with specified C(name) already exists. - Also this operation will fail if an image with specified C(name) already exists.
persistent:
description:
- Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy.
default: NO
type: bool
version_added: '2.10'
datastore_id:
description:
- Name of Datastore to use to create a new instace
version_added: '2.10'
datastore_name:
description:
- Name of Datastore to use to create a new instace
version_added: '2.10'
author: author:
- "Milan Ilic (@ilicmilan)" - "Milan Ilic (@ilicmilan)"
- "Jan Meerkamp (@meerkampdvv)"
''' '''
@ -204,6 +220,11 @@ EXAMPLES = '''
group_id: 16 group_id: 16
mode: 660 mode: 660
# Deploy a new VM as persistent
- one_vm:
template_id: 90
persistent: yes
# Change VM's permissions to 640 # Change VM's permissions to 640
- one_vm: - one_vm:
instance_ids: 5 instance_ids: 5
@ -224,6 +245,18 @@ EXAMPLES = '''
- NETWORK_ID: 27 - NETWORK_ID: 27
SECURITY_GROUPS: "10" SECURITY_GROUPS: "10"
# Deploy a new instance which uses a Template with two Disks
- one_vm:
template_id: 42
disk_size:
- 35.2 GB
- 50 GB
memory: 4 GB
vcpu: 4
count: 1
networks:
- NETWORK_ID: 27
# Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo' # Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'
- one_vm: - one_vm:
template_id: 53 template_id: 53
@ -347,7 +380,7 @@ EXAMPLES = '''
# Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image' # Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'
- one_vm: - one_vm:
instance_ids: 351 instance_ids: 351
state: powered-off state: poweredoff
disk_saveas: disk_saveas:
name: foo-image name: foo-image
@ -527,8 +560,11 @@ tagged_instances:
sample: 4096 MB sample: 4096 MB
disk_size: disk_size:
description: The size of the disk in MB description: The size of the disk in MB
type: str type: list
sample: 20480 MB sample: [
"20480 MB",
"10240 MB"
]
networks: networks:
description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
type: list type: list
@ -568,30 +604,29 @@ tagged_instances:
} }
''' '''
try: try:
import oca import pyone
HAS_OCA = True HAS_PYONE = True
except ImportError: except ImportError:
HAS_OCA = False HAS_PYONE = False
from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import AnsibleModule
import os import os
def get_template(module, client, predicate): def get_template(module, client, predicate):
pool = oca.VmTemplatePool(client)
pool = client.templatepool.info(-2, -1, -1, -1)
# Filter -2 means fetch all templates user can Use # Filter -2 means fetch all templates user can Use
pool.info(filter=-2)
found = 0 found = 0
found_template = None found_template = None
template_name = '' template_name = ''
for template in pool: for template in pool.VMTEMPLATE:
if predicate(template): if predicate(template):
found = found + 1 found = found + 1
found_template = template found_template = template
template_name = template.name template_name = template.NAME
if found == 0: if found == 0:
return None return None
@ -601,37 +636,64 @@ def get_template(module, client, predicate):
def get_template_by_name(module, client, template_name): def get_template_by_name(module, client, template_name):
return get_template(module, client, lambda template: (template.name == template_name)) return get_template(module, client, lambda template: (template.NAME == template_name))
def get_template_by_id(module, client, template_id): def get_template_by_id(module, client, template_id):
return get_template(module, client, lambda template: (template.id == template_id)) return get_template(module, client, lambda template: (template.ID == template_id))
def get_template_id(module, client, requested_id, requested_name): def get_template_id(module, client, requested_id, requested_name):
template = get_template_by_id(module, client, requested_id) if requested_id else get_template_by_name(module, client, requested_name) template = get_template_by_id(module, client, requested_id) if requested_id else get_template_by_name(module, client, requested_name)
if template: if template:
return template.id return template.ID
else: else:
return None return None
def get_vm_by_id(client, vm_id): def get_datastore(module, client, predicate):
pool = oca.VirtualMachinePool(client) pool = client.datastorepool.info()
# Retrieves information for all or part of the vms pool found = 0
# -4: Vms belonging to the user's primary group found_datastore = None
# -3: Vms belonging to the user datastore_name = ''
# -2: All vms user can Use
# -1: Vms belonging to the user and any of his groups - default for datastore in pool.DATASTORE:
# >= 0: UID User's vms if predicate(datastore):
pool.info(filter=-2, range_start=int(vm_id), range_end=int(vm_id)) found = found + 1
found_datastore = datastore
if len(pool) == 1: datastore_name = datastore.NAME
return pool[0]
if found == 0:
return None
elif found > 1:
module.fail_json(msg='There are more datastores with name: ' + datastore_name)
return found_datastore
def get_datastore_by_name(module, client, datastore_name):
return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
def get_datastore_by_id(module, client, datastore_id):
return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
def get_datastore_id(module, client, requested_id, requested_name):
datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name)
if datastore:
return datastore.ID
else: else:
return None return None
def get_vm_by_id(client, vm_id):
try:
vm = client.vm.info(int(vm_id))
except BaseException:
return None
return vm
def get_vms_by_ids(module, client, state, ids): def get_vms_by_ids(module, client, state, ids):
vms = [] vms = []
@ -645,21 +707,31 @@ def get_vms_by_ids(module, client, state, ids):
def get_vm_info(client, vm): def get_vm_info(client, vm):
vm.info()
vm = client.vm.info(vm.ID)
networks_info = [] networks_info = []
disk_size = '' disk_size = []
if hasattr(vm.template, 'disks'): if 'DISK' in vm.TEMPLATE:
disk_size = vm.template.disks[0].size + ' MB' if isinstance(vm.TEMPLATE['DISK'], list):
for disk in vm.TEMPLATE['DISK']:
disk_size.append(disk['SIZE'] + ' MB')
else:
disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
if hasattr(vm.template, 'nics'): if 'NIC' in vm.TEMPLATE:
for nic in vm.template.nics: if isinstance(vm.TEMPLATE['NIC'], list):
networks_info.append({'ip': nic.ip, 'mac': nic.mac, 'name': nic.network, 'security_groups': nic.security_groups}) for nic in vm.TEMPLATE['NIC']:
networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']})
else:
networks_info.append(
{'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'],
'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']})
import time import time
current_time = time.localtime() current_time = time.localtime()
vm_start_time = time.localtime(vm.stime) vm_start_time = time.localtime(vm.STIME)
vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time) vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
vm_uptime /= (60 * 60) vm_uptime /= (60 * 60)
@ -668,25 +740,26 @@ def get_vm_info(client, vm):
# LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
vm_lcm_state = None vm_lcm_state = None
if vm.state == VM_STATES.index('ACTIVE'): if vm.STATE == VM_STATES.index('ACTIVE'):
vm_lcm_state = LCM_STATES[vm.lcm_state] vm_lcm_state = LCM_STATES[vm.LCM_STATE]
vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.id)
info = { info = {
'template_id': int(vm.template.template_id), 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
'vm_id': vm.id, 'vm_id': vm.ID,
'vm_name': vm.name, 'vm_name': vm.NAME,
'state': VM_STATES[vm.state], 'state': VM_STATES[vm.STATE],
'lcm_state': vm_lcm_state, 'lcm_state': vm_lcm_state,
'owner_name': vm.uname, 'owner_name': vm.UNAME,
'owner_id': vm.uid, 'owner_id': vm.UID,
'networks': networks_info, 'networks': networks_info,
'disk_size': disk_size, 'disk_size': disk_size,
'memory': vm.template.memory + ' MB', 'memory': vm.TEMPLATE['MEMORY'] + ' MB',
'vcpu': vm.template.vcpu, 'vcpu': vm.TEMPLATE['VCPU'],
'cpu': vm.template.cpu, 'cpu': vm.TEMPLATE['CPU'],
'group_name': vm.gname, 'group_name': vm.GNAME,
'group_id': vm.gid, 'group_id': vm.GID,
'uptime_h': int(vm_uptime), 'uptime_h': int(vm_uptime),
'attributes': vm_attributes, 'attributes': vm_attributes,
'mode': permissions_str, 'mode': permissions_str,
@ -697,37 +770,11 @@ def get_vm_info(client, vm):
def parse_vm_permissions(client, vm): def parse_vm_permissions(client, vm):
vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS
import xml.etree.ElementTree as ET owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A)
vm_XML = client.call('vm.info', vm.id) group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A)
root = ET.fromstring(vm_XML) other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A)
perm_dict = {}
root = root.find('PERMISSIONS')
for child in root:
perm_dict[child.tag] = child.text
'''
This is the structure of the 'PERMISSIONS' dictionary:
"PERMISSIONS": {
"OWNER_U": "1",
"OWNER_M": "1",
"OWNER_A": "0",
"GROUP_U": "0",
"GROUP_M": "0",
"GROUP_A": "0",
"OTHER_U": "0",
"OTHER_M": "0",
"OTHER_A": "0"
}
'''
owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"])
group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"])
other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"])
permissions = str(owner_octal) + str(group_octal) + str(other_octal) permissions = str(owner_octal) + str(group_octal) + str(other_octal)
@ -738,8 +785,7 @@ def set_vm_permissions(module, client, vms, permissions):
changed = False changed = False
for vm in vms: for vm in vms:
vm.info() vm = client.vm.info(vm.ID)
print(vm.id)
old_permissions = parse_vm_permissions(client, vm) old_permissions = parse_vm_permissions(client, vm)
changed = changed or old_permissions != permissions changed = changed or old_permissions != permissions
@ -747,9 +793,9 @@ def set_vm_permissions(module, client, vms, permissions):
permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000 permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
mode_bits = [int(d) for d in permissions_str] mode_bits = [int(d) for d in permissions_str]
try: try:
client.call('vm.chmod', vm.id, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], client.vm.chmod(
mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8]) vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
except oca.OpenNebulaException: except pyone.OneAuthorizationException:
module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.") module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
return changed return changed
@ -759,18 +805,18 @@ def set_vm_ownership(module, client, vms, owner_id, group_id):
changed = False changed = False
for vm in vms: for vm in vms:
vm.info() vm = client.vm.info(vm.ID)
if owner_id is None: if owner_id is None:
owner_id = vm.uid owner_id = vm.UID
if group_id is None: if group_id is None:
group_id = vm.gid group_id = vm.GID
changed = changed or owner_id != vm.uid or group_id != vm.gid changed = changed or owner_id != vm.UID or group_id != vm.GID
if not module.check_mode and (owner_id != vm.uid or group_id != vm.gid): if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID):
try: try:
client.call('vm.chown', vm.id, owner_id, group_id) client.vm.chown(vm.ID, owner_id, group_id)
except oca.OpenNebulaException: except pyone.OneAuthorizationException:
module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.") module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
return changed return changed
@ -803,33 +849,41 @@ def get_size_in_MB(module, size_str):
return size_in_MB return size_in_MB
def create_disk_str(module, client, template_id, disk_size_str): def create_disk_str(module, client, template_id, disk_size_list):
if not disk_size_str: if not disk_size_list:
return '' return ''
import xml.etree.ElementTree as ET template = client.template.info(template_id)
if isinstance(template.TEMPLATE['DISK'], list):
template_XML = client.call('template.info', template_id) # check if the number of disks is correct
root = ET.fromstring(template_XML) if len(template.TEMPLATE['DISK']) != len(disk_size_list):
module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list)))
disks_num = 0 result = ''
disk = None index = 0
for DISKS in template.TEMPLATE['DISK']:
for child in root.find('TEMPLATE').findall('DISK'): disk = {}
disks_num += 1 diskresult = ''
root = child # Get all info about existed disk e.g. IMAGE_ID,...
for key, value in DISKS.items():
if disks_num != 1: disk[key] = value
module.fail_json(msg='You can pass disk_size only if template has exact one disk. This template has ' + str(disks_num) + ' disks.') # copy disk attributes if it is not the size attribute
diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
disk = {} # Set the Disk Size
# Get all info about existed disk e.g. IMAGE_ID,... diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n'
for child in root: result += diskresult
disk[child.tag] = child.text index += 1
else:
result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE') if len(disk_size_list) > 1:
result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_str))) + ']\n' module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list)))
disk = {}
# Get all info about existed disk e.g. IMAGE_ID,...
for key, value in template.TEMPLATE['DISK'].items():
disk[key] = value
# copy disk attributes if it is not the size attribute
result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
# Set the Disk Size
result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n'
return result return result
@ -857,14 +911,17 @@ def create_nics_str(network_attrs_list):
return nics_str return nics_str
def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold): def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent):
if attributes_dict: if attributes_dict:
vm_name = attributes_dict.get('NAME', '') vm_name = attributes_dict.get('NAME', '')
disk_str = create_disk_str(module, client, template_id, disk_size) disk_str = create_disk_str(module, client, template_id, disk_size)
vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str
vm_id = client.call('template.instantiate', template_id, vm_name, vm_start_on_hold, vm_extra_template_str) try:
vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent)
except pyone.OneException as e:
module.fail_json(msg=str(e))
vm = get_vm_by_id(client, vm_id) vm = get_vm_by_id(client, vm_id)
return get_vm_info(client, vm) return get_vm_info(client, vm)
@ -882,34 +939,23 @@ def generate_next_index(vm_filled_indexes_list, num_sign_cnt):
def get_vm_labels_and_attributes_dict(client, vm_id): def get_vm_labels_and_attributes_dict(client, vm_id):
import xml.etree.ElementTree as ET vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE
vm_XML = client.call('vm.info', vm_id)
root = ET.fromstring(vm_XML)
attrs_dict = {} attrs_dict = {}
labels_list = [] labels_list = []
root = root.find('USER_TEMPLATE') for key, value in vm_USER_TEMPLATE.items():
if key != 'LABELS':
for child in root: attrs_dict[key] = value
if child.tag != 'LABELS':
attrs_dict[child.tag] = child.text
else: else:
if child.text is not None: if key is not None:
labels_list = child.text.split(',') labels_list = value.split(',')
return labels_list, attrs_dict return labels_list, attrs_dict
def get_all_vms_by_attributes(client, attributes_dict, labels_list): def get_all_vms_by_attributes(client, attributes_dict, labels_list):
pool = oca.VirtualMachinePool(client) pool = client.vmpool.info(-2, -1, -1, -1).VM
# Retrieves information for all or part of the vms pool
# -4: Vms belonging to the user's primary group
# -3: Vms belonging to the user
# -2: All vms user can Use
# -1: Vms belonging to the user and any of his groups - default
# >= 0: UID User's vms
pool.info(filter=-2)
vm_list = [] vm_list = []
name = '' name = ''
if attributes_dict: if attributes_dict:
@ -921,11 +967,11 @@ def get_all_vms_by_attributes(client, attributes_dict, labels_list):
with_hash = name.endswith('#') with_hash = name.endswith('#')
for vm in pool: for vm in pool:
if vm.name.startswith(base_name): if vm.NAME.startswith(base_name):
if with_hash and vm.name[len(base_name):].isdigit(): if with_hash and vm.NAME[len(base_name):].isdigit():
# If the name has indexed format and after base_name it has only digits it'll be matched # If the name has indexed format and after base_name it has only digits it'll be matched
vm_list.append(vm) vm_list.append(vm)
elif not with_hash and vm.name == name: elif not with_hash and vm.NAME == name:
# If the name is not indexed it has to be same # If the name is not indexed it has to be same
vm_list.append(vm) vm_list.append(vm)
pool = vm_list pool = vm_list
@ -935,28 +981,33 @@ def get_all_vms_by_attributes(client, attributes_dict, labels_list):
vm_list = copy.copy(pool) vm_list = copy.copy(pool)
for vm in pool: for vm in pool:
vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.id) remove_list = []
vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID)
if attributes_dict and len(attributes_dict) > 0: if attributes_dict and len(attributes_dict) > 0:
for key, val in attributes_dict.items(): for key, val in attributes_dict.items():
if key in vm_attributes_dict: if key in vm_attributes_dict:
if val and vm_attributes_dict[key] != val and vm in vm_list: if val and vm_attributes_dict[key] != val:
vm_list.remove(vm) remove_list.append(vm)
break break
else: else:
if vm in vm_list: remove_list.append(vm)
vm_list.remove(vm)
break break
vm_list = list(set(vm_list).difference(set(remove_list)))
remove_list = []
if labels_list and len(labels_list) > 0: if labels_list and len(labels_list) > 0:
for label in labels_list: for label in labels_list:
if label not in vm_labels_list and vm in vm_list: if label not in vm_labels_list:
vm_list.remove(vm) remove_list.append(vm)
break break
vm_list = list(set(vm_list).difference(set(remove_list)))
return vm_list return vm_list
def create_count_of_vms(module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold): def create_count_of_vms(
module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent):
new_vms_list = [] new_vms_list = []
vm_name = '' vm_name = ''
@ -974,7 +1025,7 @@ def create_count_of_vms(module, client, template_id, count, attributes_dict, lab
base_name = vm_name[:len(vm_name) - num_sign_cnt] base_name = vm_name[:len(vm_name) - num_sign_cnt]
vm_name = base_name vm_name = base_name
# Make list which contains used indexes in format ['000', '001',...] # Make list which contains used indexes in format ['000', '001',...]
vm_filled_indexes_list = list((vm.name[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list) vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list)
while count > 0: while count > 0:
new_vm_name = vm_name new_vm_name = vm_name
@ -985,7 +1036,7 @@ def create_count_of_vms(module, client, template_id, count, attributes_dict, lab
new_vm_name += next_index new_vm_name += next_index
# Update NAME value in the attributes in case there is index # Update NAME value in the attributes in case there is index
attributes_dict['NAME'] = new_vm_name attributes_dict['NAME'] = new_vm_name
new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold) new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent)
new_vm_id = new_vm_dict.get('vm_id') new_vm_id = new_vm_dict.get('vm_id')
new_vm = get_vm_by_id(client, new_vm_id) new_vm = get_vm_by_id(client, new_vm_id)
new_vms_list.append(new_vm) new_vms_list.append(new_vm)
@ -994,17 +1045,17 @@ def create_count_of_vms(module, client, template_id, count, attributes_dict, lab
if vm_start_on_hold: if vm_start_on_hold:
if wait: if wait:
for vm in new_vms_list: for vm in new_vms_list:
wait_for_hold(module, vm, wait_timeout) wait_for_hold(module, client, vm, wait_timeout)
else: else:
if wait: if wait:
for vm in new_vms_list: for vm in new_vms_list:
wait_for_running(module, vm, wait_timeout) wait_for_running(module, client, vm, wait_timeout)
return True, new_vms_list, [] return True, new_vms_list, []
def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict, def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold): labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent):
vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list) vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
@ -1021,7 +1072,8 @@ def create_exact_count_of_vms(module, client, template_id, exact_count, attribut
if vm_count_diff > 0: if vm_count_diff > 0:
# Add more VMs # Add more VMs
changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict, changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold) labels_list, disk_size, network_attrs_list, wait, wait_timeout,
vm_start_on_hold, vm_persistent)
tagged_instances_list += instances_list tagged_instances_list += instances_list
elif vm_count_diff < 0: elif vm_count_diff < 0:
@ -1036,7 +1088,7 @@ def create_exact_count_of_vms(module, client, template_id, exact_count, attribut
if wait: if wait:
for vm in old_vms_list: for vm in old_vms_list:
wait_for_done(module, vm, wait_timeout) wait_for_done(module, client, vm, wait_timeout)
instances_list = old_vms_list instances_list = old_vms_list
# store only the remaining instances # store only the remaining instances
@ -1054,19 +1106,19 @@ LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY'] 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY']
def wait_for_state(module, vm, wait_timeout, state_predicate): def wait_for_state(module, client, vm, wait_timeout, state_predicate):
import time import time
start_time = time.time() start_time = time.time()
while (time.time() - start_time) < wait_timeout: while (time.time() - start_time) < wait_timeout:
vm.info() vm = client.vm.info(vm.ID)
state = vm.state state = vm.STATE
lcm_state = vm.lcm_state lcm_state = vm.LCM_STATE
if state_predicate(state, lcm_state): if state_predicate(state, lcm_state):
return vm return vm
elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'), elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
VM_STATES.index('ACTIVE'), VM_STATES.index('POWEROFF')]: VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state]) module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
time.sleep(1) time.sleep(1)
@ -1074,21 +1126,21 @@ def wait_for_state(module, vm, wait_timeout, state_predicate):
module.fail_json(msg="Wait timeout has expired!") module.fail_json(msg="Wait timeout has expired!")
def wait_for_running(module, vm, wait_timeout): def wait_for_running(module, client, vm, wait_timeout):
return wait_for_state(module, vm, wait_timeout, lambda state, return wait_for_state(module, client, vm, wait_timeout, lambda state,
lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')])) lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')]))
def wait_for_done(module, vm, wait_timeout): def wait_for_done(module, client, vm, wait_timeout):
return wait_for_state(module, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')])) return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')]))
def wait_for_hold(module, vm, wait_timeout): def wait_for_hold(module, client, vm, wait_timeout):
return wait_for_state(module, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')])) return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')]))
def wait_for_poweroff(module, vm, wait_timeout): def wait_for_poweroff(module, client, vm, wait_timeout):
return wait_for_state(module, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')])) return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')]))
def terminate_vm(module, client, vm, hard=False): def terminate_vm(module, client, vm, hard=False):
@ -1101,9 +1153,9 @@ def terminate_vm(module, client, vm, hard=False):
if not module.check_mode: if not module.check_mode:
if hard: if hard:
client.call('vm.action', 'terminate-hard', vm.id) client.vm.action('terminate-hard', vm.ID)
else: else:
client.call('vm.action', 'terminate', vm.id) client.vm.action('terminate', vm.ID)
return changed return changed
@ -1117,21 +1169,21 @@ def terminate_vms(module, client, vms, hard):
return changed return changed
def poweroff_vm(module, vm, hard): def poweroff_vm(module, client, vm, hard):
vm.info() vm = client.vm.info(vm.ID)
changed = False changed = False
lcm_state = vm.lcm_state lcm_state = vm.LCM_STATE
state = vm.state state = vm.STATE
if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
changed = True changed = True
if changed and not module.check_mode: if changed and not module.check_mode:
if not hard: if not hard:
vm.poweroff() client.vm.action('poweroff', vm.ID)
else: else:
vm.poweroff_hard() client.vm.action('poweroff-hard', vm.ID)
return changed return changed
@ -1140,7 +1192,7 @@ def poweroff_vms(module, client, vms, hard):
changed = False changed = False
for vm in vms: for vm in vms:
changed = poweroff_vm(module, vm, hard) or changed changed = poweroff_vm(module, client, vm, hard) or changed
return changed return changed
@ -1150,27 +1202,27 @@ def reboot_vms(module, client, vms, wait_timeout, hard):
if not module.check_mode: if not module.check_mode:
# Firstly, power-off all instances # Firstly, power-off all instances
for vm in vms: for vm in vms:
vm.info() vm = client.vm.info(vm.ID)
lcm_state = vm.lcm_state lcm_state = vm.LCM_STATE
state = vm.state state = vm.STATE
if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]: if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
poweroff_vm(module, vm, hard) poweroff_vm(module, client, vm, hard)
# Wait for all to be power-off # Wait for all to be power-off
for vm in vms: for vm in vms:
wait_for_poweroff(module, vm, wait_timeout) wait_for_poweroff(module, client, vm, wait_timeout)
for vm in vms: for vm in vms:
resume_vm(module, vm) resume_vm(module, client, vm)
return True return True
def resume_vm(module, vm): def resume_vm(module, client, vm):
vm.info() vm = client.vm.info(vm.ID)
changed = False changed = False
lcm_state = vm.lcm_state lcm_state = vm.LCM_STATE
if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'): if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
module.fail_json(msg="Cannot perform action 'resume' because this action is not available " + module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
"for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly") "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
@ -1178,7 +1230,7 @@ def resume_vm(module, vm):
changed = True changed = True
if changed and not module.check_mode: if changed and not module.check_mode:
vm.resume() client.vm.action('resume', vm.ID)
return changed return changed
@ -1187,7 +1239,7 @@ def resume_vms(module, client, vms):
changed = False changed = False
for vm in vms: for vm in vms:
changed = resume_vm(module, vm) or changed changed = resume_vm(module, client, vm) or changed
return changed return changed
@ -1221,10 +1273,13 @@ def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
disk_id = disk_saveas.get('disk_id', 0) disk_id = disk_saveas.get('disk_id', 0)
if not module.check_mode: if not module.check_mode:
if vm.state != VM_STATES.index('POWEROFF'): if vm.STATE != VM_STATES.index('POWEROFF'):
module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state") module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
client.call('vm.disksaveas', vm.id, disk_id, image_name, 'OS', -1) try:
wait_for_poweroff(module, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1)
except pyone.OneException as e:
module.fail_json(msg=str(e))
wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
def get_connection_info(module): def get_connection_info(module):
@ -1242,6 +1297,18 @@ def get_connection_info(module):
if not password: if not password:
password = os.environ.get('ONE_PASSWORD') password = os.environ.get('ONE_PASSWORD')
if not username:
if not password:
authfile = os.environ.get('ONE_AUTH')
if authfile is not None:
try:
authstring = open(authfile, "r").read().rstrip()
username = authstring.split(":")[0]
password = authstring.split(":")[1]
except BaseException:
module.fail_json(msg="Could not read ONE_AUTH file")
else:
module.fail_json(msg="No Credentials are set")
if not url: if not url:
module.fail_json(msg="Opennebula API url (api_url) is not specified") module.fail_json(msg="Opennebula API url (api_url) is not specified")
from collections import namedtuple from collections import namedtuple
@ -1274,7 +1341,9 @@ def main():
"memory": {"required": False, "type": "str"}, "memory": {"required": False, "type": "str"},
"cpu": {"required": False, "type": "float"}, "cpu": {"required": False, "type": "float"},
"vcpu": {"required": False, "type": "int"}, "vcpu": {"required": False, "type": "int"},
"disk_size": {"required": False, "type": "str"}, "disk_size": {"required": False, "type": "list"},
"datastore_name": {"required": False, "type": "str"},
"datastore_id": {"required": False, "type": "int"},
"networks": {"default": [], "type": "list"}, "networks": {"default": [], "type": "list"},
"count": {"default": 1, "type": "int"}, "count": {"default": 1, "type": "int"},
"exact_count": {"required": False, "type": "int"}, "exact_count": {"required": False, "type": "int"},
@ -1282,7 +1351,8 @@ def main():
"count_attributes": {"required": False, "type": "dict"}, "count_attributes": {"required": False, "type": "dict"},
"labels": {"default": [], "type": "list"}, "labels": {"default": [], "type": "list"},
"count_labels": {"required": False, "type": "list"}, "count_labels": {"required": False, "type": "list"},
"disk_saveas": {"type": "dict"} "disk_saveas": {"type": "dict"},
"persistent": {"default": False, "type": "bool"}
} }
module = AnsibleModule(argument_spec=fields, module = AnsibleModule(argument_spec=fields,
@ -1300,12 +1370,13 @@ def main():
['count', 'hard'], ['count', 'hard'],
['instance_ids', 'cpu'], ['instance_ids', 'vcpu'], ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'],
['instance_ids', 'memory'], ['instance_ids', 'disk_size'], ['instance_ids', 'memory'], ['instance_ids', 'disk_size'],
['instance_ids', 'networks'] ['instance_ids', 'networks'],
['persistent', 'disk_size']
], ],
supports_check_mode=True) supports_check_mode=True)
if not HAS_OCA: if not HAS_PYONE:
module.fail_json(msg='This module requires python-oca to work!') module.fail_json(msg='This module requires pyone to work!')
auth = get_connection_info(module) auth = get_connection_info(module)
params = module.params params = module.params
@ -1324,6 +1395,8 @@ def main():
cpu = params.get('cpu') cpu = params.get('cpu')
vcpu = params.get('vcpu') vcpu = params.get('vcpu')
disk_size = params.get('disk_size') disk_size = params.get('disk_size')
requested_datastore_id = params.get('datastore_id')
requested_datastore_name = params.get('datastore_name')
networks = params.get('networks') networks = params.get('networks')
count = params.get('count') count = params.get('count')
exact_count = params.get('exact_count') exact_count = params.get('exact_count')
@ -1332,11 +1405,12 @@ def main():
labels = params.get('labels') labels = params.get('labels')
count_labels = params.get('count_labels') count_labels = params.get('count_labels')
disk_saveas = params.get('disk_saveas') disk_saveas = params.get('disk_saveas')
persistent = params.get('persistent')
if not (auth.username and auth.password): if not (auth.username and auth.password):
client = oca.Client(None, auth.url) module.warn("Credentials missing")
else: else:
client = oca.Client(auth.username + ':' + auth.password, auth.url) one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
if attributes: if attributes:
attributes = dict((key.upper(), value) for key, value in attributes.items()) attributes = dict((key.upper(), value) for key, value in attributes.items())
@ -1357,13 +1431,25 @@ def main():
# Fetch template # Fetch template
template_id = None template_id = None
if requested_template_id or requested_template_name: if requested_template_id or requested_template_name:
template_id = get_template_id(module, client, requested_template_id, requested_template_name) template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
if template_id is None: if template_id is None:
if requested_template_id: if requested_template_id:
module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id)) module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
elif requested_template_name: elif requested_template_name:
module.fail_json(msg="There is no template with name: " + requested_template_name) module.fail_json(msg="There is no template with name: " + requested_template_name)
# Fetch datastore
datastore_id = None
if requested_datastore_id or requested_datastore_name:
datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
if datastore_id is None:
if requested_datastore_id:
module.fail_json(msg='There is no datastore with template_id: ' + str(requested_datastore_id))
elif requested_datastore_name:
module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
else:
attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
if exact_count and template_id is None: if exact_count and template_id is None:
module.fail_json(msg='Option `exact_count` needs template_id or template_name') module.fail_json(msg='Option `exact_count` needs template_id or template_name')
@ -1395,14 +1481,15 @@ def main():
if exact_count is not None: if exact_count is not None:
# Deploy an exact count of VMs # Deploy an exact count of VMs
changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, client, template_id, exact_count, attributes, changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
count_attributes, labels, count_labels, disk_size, count_attributes, labels, count_labels, disk_size,
networks, hard, wait, wait_timeout, put_vm_on_hold) networks, hard, wait, wait_timeout, put_vm_on_hold, persistent)
vms = tagged_instances_list vms = tagged_instances_list
elif template_id is not None and state == 'present': elif template_id is not None and state == 'present':
# Deploy count VMs # Deploy count VMs
changed, instances_list, tagged_instances_list = create_count_of_vms(module, client, template_id, count, changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
attributes, labels, disk_size, networks, wait, wait_timeout, put_vm_on_hold) attributes, labels, disk_size, networks, wait, wait_timeout,
put_vm_on_hold, persistent)
# instances_list - new instances # instances_list - new instances
# tagged_instances_list - all instances with specified `count_attributes` and `count_labels` # tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
vms = instances_list vms = instances_list
@ -1422,10 +1509,10 @@ def main():
changed = False changed = False
if instance_ids: if instance_ids:
vms = get_vms_by_ids(module, client, state, instance_ids) vms = get_vms_by_ids(module, one_client, state, instance_ids)
else: else:
tagged = True tagged = True
vms = get_all_vms_by_attributes(client, attributes, labels) vms = get_all_vms_by_attributes(one_client, attributes, labels)
if len(vms) == 0 and state != 'absent' and state != 'present': if len(vms) == 0 and state != 'absent' and state != 'present':
module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`') module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
@ -1437,22 +1524,22 @@ def main():
module.fail_json(msg='Option `instance_ids` is required when state is `absent`.') module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
if state == 'absent': if state == 'absent':
changed = terminate_vms(module, client, vms, hard) changed = terminate_vms(module, one_client, vms, hard)
elif state == 'rebooted': elif state == 'rebooted':
changed = reboot_vms(module, client, vms, wait_timeout, hard) changed = reboot_vms(module, one_client, vms, wait_timeout, hard)
elif state == 'poweredoff': elif state == 'poweredoff':
changed = poweroff_vms(module, client, vms, hard) changed = poweroff_vms(module, one_client, vms, hard)
elif state == 'running': elif state == 'running':
changed = resume_vms(module, client, vms) changed = resume_vms(module, one_client, vms)
instances_list = vms instances_list = vms
tagged_instances_list = [] tagged_instances_list = []
if permissions is not None: if permissions is not None:
changed = set_vm_permissions(module, client, vms, permissions) or changed changed = set_vm_permissions(module, one_client, vms, permissions) or changed
if owner_id is not None or group_id is not None: if owner_id is not None or group_id is not None:
changed = set_vm_ownership(module, client, vms, owner_id, group_id) or changed changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
if wait and not module.check_mode and state != 'present': if wait and not module.check_mode and state != 'present':
wait_for = { wait_for = {
@ -1463,19 +1550,19 @@ def main():
} }
for vm in vms: for vm in vms:
if vm is not None: if vm is not None:
wait_for[state](module, vm, wait_timeout) wait_for[state](module, one_client, vm, wait_timeout)
if disk_saveas is not None: if disk_saveas is not None:
if len(vms) == 0: if len(vms) == 0:
module.fail_json(msg="There is no VM whose disk will be saved.") module.fail_json(msg="There is no VM whose disk will be saved.")
disk_save_as(module, client, vms[0], disk_saveas, wait_timeout) disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout)
changed = True changed = True
# instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
instances = list(get_vm_info(client, vm) for vm in instances_list if vm is not None) instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None)
instances_ids = list(vm.id for vm in instances_list if vm is not None) instances_ids = list(vm.ID for vm in instances_list if vm is not None)
# tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels) # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
tagged_instances = list(get_vm_info(client, vm) for vm in tagged_instances_list if vm is not None) tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None)
result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances} result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}

Loading…
Cancel
Save