mirror of https://github.com/ansible/ansible.git
Migrated to community.vmware
parent
f8c4936c10
commit
40218535ee
@ -1,50 +0,0 @@
|
||||
# Ansible VMware external inventory script settings
|
||||
|
||||
[defaults]
|
||||
|
||||
# If true (the default), return only guest VMs. If false, also return host
|
||||
# systems in the results.
|
||||
guests_only = True
|
||||
|
||||
# Specify an alternate group name for guest VMs. If not defined, defaults to
|
||||
# the basename of the inventory script + "_vm", e.g. "vmware_vm".
|
||||
#vm_group = vm_group_name
|
||||
|
||||
# Specify an alternate group name for host systems when guests_only=false.
|
||||
# If not defined, defaults to the basename of the inventory script + "_hw",
|
||||
# e.g. "vmware_hw".
|
||||
#hw_group = hw_group_name
|
||||
|
||||
# Specify the number of seconds to use the inventory cache before it is
|
||||
# considered stale. If not defined, defaults to 0 seconds.
|
||||
#cache_max_age = 3600
|
||||
|
||||
# Specify the directory used for storing the inventory cache. If not defined,
|
||||
# caching will be disabled.
|
||||
#cache_dir = ~/.cache/ansible
|
||||
|
||||
# Specify a prefix filter. Any VMs with names beginning with this string will
|
||||
# not be returned.
|
||||
# prefix_filter = test_
|
||||
|
||||
# Specify a cluster filter list (colon delimited). Only clusters matching by
|
||||
# name will be scanned for virtualmachines
|
||||
#clusters = cluster1,cluster2
|
||||
|
||||
[auth]
|
||||
|
||||
# Specify hostname or IP address of vCenter/ESXi server. A port may be
|
||||
# included with the hostname, e.g.: vcenter.example.com:8443. This setting
|
||||
# may also be defined via the VMWARE_HOST environment variable.
|
||||
host = vcenter.example.com
|
||||
|
||||
# Specify a username to access the vCenter host. This setting may also be
|
||||
# defined with the VMWARE_USER environment variable.
|
||||
user = ihasaccess
|
||||
|
||||
# Specify a password to access the vCenter host. This setting may also be
|
||||
# defined with the VMWARE_PASSWORD environment variable.
|
||||
password = ssshverysecret
|
||||
|
||||
# Force SSL certificate checking by default or ignore self-signed certs.
|
||||
#sslcheck=True
|
@ -1,472 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
VMware Inventory Script
|
||||
=======================
|
||||
|
||||
Retrieve information about virtual machines from a vCenter server or
|
||||
standalone ESX host. When `group_by=false` (in the INI file), host systems
|
||||
are also returned in addition to VMs.
|
||||
|
||||
This script will attempt to read configuration from an INI file with the same
|
||||
base filename if present, or `vmware.ini` if not. It is possible to create
|
||||
symlinks to the inventory script to support multiple configurations, e.g.:
|
||||
|
||||
* `vmware.py` (this script)
|
||||
* `vmware.ini` (default configuration, will be read by `vmware.py`)
|
||||
* `vmware_test.py` (symlink to `vmware.py`)
|
||||
* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`)
|
||||
* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no
|
||||
`vmware_other.ini` exists)
|
||||
|
||||
The path to an INI file may also be specified via the `VMWARE_INI` environment
|
||||
variable, in which case the filename matching rules above will not apply.
|
||||
|
||||
Host and authentication parameters may be specified via the `VMWARE_HOST`,
|
||||
`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will
|
||||
take precedence over options present in the INI file. An INI file is not
|
||||
required if these options are specified using environment variables.
|
||||
'''
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import json
|
||||
import logging
|
||||
import optparse
|
||||
import os
|
||||
import ssl
|
||||
import sys
|
||||
import time
|
||||
|
||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from ansible.module_utils.six import integer_types, text_type, string_types
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
|
||||
# Disable logging message trigged by pSphere/suds.
|
||||
try:
|
||||
from logging import NullHandler
|
||||
except ImportError:
|
||||
from logging import Handler
|
||||
|
||||
class NullHandler(Handler):
|
||||
def emit(self, record):
|
||||
pass
|
||||
|
||||
logging.getLogger('psphere').addHandler(NullHandler())
|
||||
logging.getLogger('suds').addHandler(NullHandler())
|
||||
|
||||
from psphere.client import Client
|
||||
from psphere.errors import ObjectNotFoundError
|
||||
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network, ClusterComputeResource
|
||||
from suds.sudsobject import Object as SudsObject
|
||||
|
||||
|
||||
class VMwareInventory(object):
|
||||
|
||||
def __init__(self, guests_only=None):
|
||||
self.config = configparser.SafeConfigParser()
|
||||
if os.environ.get('VMWARE_INI', ''):
|
||||
config_files = [os.environ['VMWARE_INI']]
|
||||
else:
|
||||
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
|
||||
for config_file in config_files:
|
||||
if os.path.exists(config_file):
|
||||
self.config.read(config_file)
|
||||
break
|
||||
|
||||
# Retrieve only guest VMs, or include host systems?
|
||||
if guests_only is not None:
|
||||
self.guests_only = guests_only
|
||||
elif self.config.has_option('defaults', 'guests_only'):
|
||||
self.guests_only = self.config.getboolean('defaults', 'guests_only')
|
||||
else:
|
||||
self.guests_only = True
|
||||
|
||||
# Read authentication information from VMware environment variables
|
||||
# (if set), otherwise from INI file.
|
||||
auth_host = os.environ.get('VMWARE_HOST')
|
||||
if not auth_host and self.config.has_option('auth', 'host'):
|
||||
auth_host = self.config.get('auth', 'host')
|
||||
auth_user = os.environ.get('VMWARE_USER')
|
||||
if not auth_user and self.config.has_option('auth', 'user'):
|
||||
auth_user = self.config.get('auth', 'user')
|
||||
auth_password = os.environ.get('VMWARE_PASSWORD')
|
||||
if not auth_password and self.config.has_option('auth', 'password'):
|
||||
auth_password = self.config.get('auth', 'password')
|
||||
sslcheck = os.environ.get('VMWARE_SSLCHECK')
|
||||
if not sslcheck and self.config.has_option('auth', 'sslcheck'):
|
||||
sslcheck = self.config.get('auth', 'sslcheck')
|
||||
if not sslcheck:
|
||||
sslcheck = True
|
||||
else:
|
||||
if sslcheck.lower() in ['no', 'false']:
|
||||
sslcheck = False
|
||||
else:
|
||||
sslcheck = True
|
||||
|
||||
# Limit the clusters being scanned
|
||||
self.filter_clusters = os.environ.get('VMWARE_CLUSTERS')
|
||||
if not self.filter_clusters and self.config.has_option('defaults', 'clusters'):
|
||||
self.filter_clusters = self.config.get('defaults', 'clusters')
|
||||
if self.filter_clusters:
|
||||
self.filter_clusters = [x.strip() for x in self.filter_clusters.split(',') if x.strip()]
|
||||
|
||||
# Override certificate checks
|
||||
if not sslcheck:
|
||||
if hasattr(ssl, '_create_unverified_context'):
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
# Create the VMware client connection.
|
||||
self.client = Client(auth_host, auth_user, auth_password)
|
||||
|
||||
def _put_cache(self, name, value):
|
||||
'''
|
||||
Saves the value to cache with the name given.
|
||||
'''
|
||||
if self.config.has_option('defaults', 'cache_dir'):
|
||||
cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir'))
|
||||
if not os.path.exists(cache_dir):
|
||||
os.makedirs(cache_dir)
|
||||
cache_file = os.path.join(cache_dir, name)
|
||||
with open(cache_file, 'w') as cache:
|
||||
json.dump(value, cache)
|
||||
|
||||
def _get_cache(self, name, default=None):
|
||||
'''
|
||||
Retrieves the value from cache for the given name.
|
||||
'''
|
||||
if self.config.has_option('defaults', 'cache_dir'):
|
||||
cache_dir = self.config.get('defaults', 'cache_dir')
|
||||
cache_file = os.path.join(cache_dir, name)
|
||||
if os.path.exists(cache_file):
|
||||
if self.config.has_option('defaults', 'cache_max_age'):
|
||||
cache_max_age = self.config.getint('defaults', 'cache_max_age')
|
||||
else:
|
||||
cache_max_age = 0
|
||||
cache_stat = os.stat(cache_file)
|
||||
if (cache_stat.st_mtime + cache_max_age) >= time.time():
|
||||
with open(cache_file) as cache:
|
||||
return json.load(cache)
|
||||
return default
|
||||
|
||||
def _flatten_dict(self, d, parent_key='', sep='_'):
|
||||
'''
|
||||
Flatten nested dicts by combining keys with a separator. Lists with
|
||||
only string items are included as is; any other lists are discarded.
|
||||
'''
|
||||
items = []
|
||||
for k, v in d.items():
|
||||
if k.startswith('_'):
|
||||
continue
|
||||
new_key = parent_key + sep + k if parent_key else k
|
||||
if isinstance(v, MutableMapping):
|
||||
items.extend(self._flatten_dict(v, new_key, sep).items())
|
||||
elif isinstance(v, (list, tuple)):
|
||||
if all([isinstance(x, string_types) for x in v]):
|
||||
items.append((new_key, v))
|
||||
else:
|
||||
items.append((new_key, v))
|
||||
return dict(items)
|
||||
|
||||
def _get_obj_info(self, obj, depth=99, seen=None):
|
||||
'''
|
||||
Recursively build a data structure for the given pSphere object (depth
|
||||
only applies to ManagedObject instances).
|
||||
'''
|
||||
seen = seen or set()
|
||||
if isinstance(obj, ManagedObject):
|
||||
try:
|
||||
obj_unicode = text_type(getattr(obj, 'name'))
|
||||
except AttributeError:
|
||||
obj_unicode = ()
|
||||
if obj in seen:
|
||||
return obj_unicode
|
||||
seen.add(obj)
|
||||
if depth <= 0:
|
||||
return obj_unicode
|
||||
d = {}
|
||||
for attr in dir(obj):
|
||||
if attr.startswith('_'):
|
||||
continue
|
||||
try:
|
||||
val = getattr(obj, attr)
|
||||
obj_info = self._get_obj_info(val, depth - 1, seen)
|
||||
if obj_info != ():
|
||||
d[attr] = obj_info
|
||||
except Exception as e:
|
||||
pass
|
||||
return d
|
||||
elif isinstance(obj, SudsObject):
|
||||
d = {}
|
||||
for key, val in iter(obj):
|
||||
obj_info = self._get_obj_info(val, depth, seen)
|
||||
if obj_info != ():
|
||||
d[key] = obj_info
|
||||
return d
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
l = []
|
||||
for val in iter(obj):
|
||||
obj_info = self._get_obj_info(val, depth, seen)
|
||||
if obj_info != ():
|
||||
l.append(obj_info)
|
||||
return l
|
||||
elif isinstance(obj, (type(None), bool, float) + string_types + integer_types):
|
||||
return obj
|
||||
else:
|
||||
return ()
|
||||
|
||||
def _get_host_info(self, host, prefix='vmware'):
|
||||
'''
|
||||
Return a flattened dict with info about the given host system.
|
||||
'''
|
||||
host_info = {
|
||||
'name': host.name,
|
||||
}
|
||||
for attr in ('datastore', 'network', 'vm'):
|
||||
try:
|
||||
value = getattr(host, attr)
|
||||
host_info['%ss' % attr] = self._get_obj_info(value, depth=0)
|
||||
except AttributeError:
|
||||
host_info['%ss' % attr] = []
|
||||
for k, v in self._get_obj_info(host.summary, depth=0).items():
|
||||
if isinstance(v, MutableMapping):
|
||||
for k2, v2 in v.items():
|
||||
host_info[k2] = v2
|
||||
elif k != 'host':
|
||||
host_info[k] = v
|
||||
try:
|
||||
host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress
|
||||
except Exception as e:
|
||||
print(e, file=sys.stderr)
|
||||
host_info = self._flatten_dict(host_info, prefix)
|
||||
if ('%s_ipAddress' % prefix) in host_info:
|
||||
host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix]
|
||||
return host_info
|
||||
|
||||
def _get_vm_info(self, vm, prefix='vmware'):
|
||||
'''
|
||||
Return a flattened dict with info about the given virtual machine.
|
||||
'''
|
||||
vm_info = {
|
||||
'name': vm.name,
|
||||
}
|
||||
for attr in ('datastore', 'network'):
|
||||
try:
|
||||
value = getattr(vm, attr)
|
||||
vm_info['%ss' % attr] = self._get_obj_info(value, depth=0)
|
||||
except AttributeError:
|
||||
vm_info['%ss' % attr] = []
|
||||
try:
|
||||
vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0)
|
||||
except AttributeError:
|
||||
vm_info['resourcePool'] = ''
|
||||
try:
|
||||
vm_info['guestState'] = vm.guest.guestState
|
||||
except AttributeError:
|
||||
vm_info['guestState'] = ''
|
||||
for k, v in self._get_obj_info(vm.summary, depth=0).items():
|
||||
if isinstance(v, MutableMapping):
|
||||
for k2, v2 in v.items():
|
||||
if k2 == 'host':
|
||||
k2 = 'hostSystem'
|
||||
vm_info[k2] = v2
|
||||
elif k != 'vm':
|
||||
vm_info[k] = v
|
||||
vm_info = self._flatten_dict(vm_info, prefix)
|
||||
if ('%s_ipAddress' % prefix) in vm_info:
|
||||
vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix]
|
||||
return vm_info
|
||||
|
||||
def _add_host(self, inv, parent_group, host_name):
|
||||
'''
|
||||
Add the host to the parent group in the given inventory.
|
||||
'''
|
||||
p_group = inv.setdefault(parent_group, [])
|
||||
if isinstance(p_group, dict):
|
||||
group_hosts = p_group.setdefault('hosts', [])
|
||||
else:
|
||||
group_hosts = p_group
|
||||
if host_name not in group_hosts:
|
||||
group_hosts.append(host_name)
|
||||
|
||||
def _add_child(self, inv, parent_group, child_group):
|
||||
'''
|
||||
Add a child group to a parent group in the given inventory.
|
||||
'''
|
||||
if parent_group != 'all':
|
||||
p_group = inv.setdefault(parent_group, {})
|
||||
if not isinstance(p_group, dict):
|
||||
inv[parent_group] = {'hosts': p_group}
|
||||
p_group = inv[parent_group]
|
||||
group_children = p_group.setdefault('children', [])
|
||||
if child_group not in group_children:
|
||||
group_children.append(child_group)
|
||||
inv.setdefault(child_group, [])
|
||||
|
||||
def get_inventory(self, meta_hostvars=True):
|
||||
'''
|
||||
Reads the inventory from cache or VMware API via pSphere.
|
||||
'''
|
||||
# Use different cache names for guests only vs. all hosts.
|
||||
if self.guests_only:
|
||||
cache_name = '__inventory_guests__'
|
||||
else:
|
||||
cache_name = '__inventory_all__'
|
||||
|
||||
inv = self._get_cache(cache_name, None)
|
||||
if inv is not None:
|
||||
return inv
|
||||
|
||||
inv = {'all': {'hosts': []}}
|
||||
if meta_hostvars:
|
||||
inv['_meta'] = {'hostvars': {}}
|
||||
|
||||
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
|
||||
|
||||
if not self.guests_only:
|
||||
if self.config.has_option('defaults', 'hw_group'):
|
||||
hw_group = self.config.get('defaults', 'hw_group')
|
||||
else:
|
||||
hw_group = default_group + '_hw'
|
||||
|
||||
if self.config.has_option('defaults', 'vm_group'):
|
||||
vm_group = self.config.get('defaults', 'vm_group')
|
||||
else:
|
||||
vm_group = default_group + '_vm'
|
||||
|
||||
if self.config.has_option('defaults', 'prefix_filter'):
|
||||
prefix_filter = self.config.get('defaults', 'prefix_filter')
|
||||
else:
|
||||
prefix_filter = None
|
||||
|
||||
if self.filter_clusters:
|
||||
# Loop through clusters and find hosts:
|
||||
hosts = []
|
||||
for cluster in ClusterComputeResource.all(self.client):
|
||||
if cluster.name in self.filter_clusters:
|
||||
for host in cluster.host:
|
||||
hosts.append(host)
|
||||
else:
|
||||
# Get list of all physical hosts
|
||||
hosts = HostSystem.all(self.client)
|
||||
|
||||
# Loop through physical hosts:
|
||||
for host in hosts:
|
||||
|
||||
if not self.guests_only:
|
||||
self._add_host(inv, 'all', host.name)
|
||||
self._add_host(inv, hw_group, host.name)
|
||||
host_info = self._get_host_info(host)
|
||||
if meta_hostvars:
|
||||
inv['_meta']['hostvars'][host.name] = host_info
|
||||
self._put_cache(host.name, host_info)
|
||||
|
||||
# Loop through all VMs on physical host.
|
||||
for vm in host.vm:
|
||||
if prefix_filter:
|
||||
if vm.name.startswith(prefix_filter):
|
||||
continue
|
||||
self._add_host(inv, 'all', vm.name)
|
||||
self._add_host(inv, vm_group, vm.name)
|
||||
vm_info = self._get_vm_info(vm)
|
||||
if meta_hostvars:
|
||||
inv['_meta']['hostvars'][vm.name] = vm_info
|
||||
self._put_cache(vm.name, vm_info)
|
||||
|
||||
# Group by resource pool.
|
||||
vm_resourcePool = vm_info.get('vmware_resourcePool', None)
|
||||
if vm_resourcePool:
|
||||
self._add_child(inv, vm_group, 'resource_pools')
|
||||
self._add_child(inv, 'resource_pools', vm_resourcePool)
|
||||
self._add_host(inv, vm_resourcePool, vm.name)
|
||||
|
||||
# Group by datastore.
|
||||
for vm_datastore in vm_info.get('vmware_datastores', []):
|
||||
self._add_child(inv, vm_group, 'datastores')
|
||||
self._add_child(inv, 'datastores', vm_datastore)
|
||||
self._add_host(inv, vm_datastore, vm.name)
|
||||
|
||||
# Group by network.
|
||||
for vm_network in vm_info.get('vmware_networks', []):
|
||||
self._add_child(inv, vm_group, 'networks')
|
||||
self._add_child(inv, 'networks', vm_network)
|
||||
self._add_host(inv, vm_network, vm.name)
|
||||
|
||||
# Group by guest OS.
|
||||
vm_guestId = vm_info.get('vmware_guestId', None)
|
||||
if vm_guestId:
|
||||
self._add_child(inv, vm_group, 'guests')
|
||||
self._add_child(inv, 'guests', vm_guestId)
|
||||
self._add_host(inv, vm_guestId, vm.name)
|
||||
|
||||
# Group all VM templates.
|
||||
vm_template = vm_info.get('vmware_template', False)
|
||||
if vm_template:
|
||||
self._add_child(inv, vm_group, 'templates')
|
||||
self._add_host(inv, 'templates', vm.name)
|
||||
|
||||
self._put_cache(cache_name, inv)
|
||||
return inv
|
||||
|
||||
def get_host(self, hostname):
|
||||
'''
|
||||
Read info about a specific host or VM from cache or VMware API.
|
||||
'''
|
||||
inv = self._get_cache(hostname, None)
|
||||
if inv is not None:
|
||||
return inv
|
||||
|
||||
if not self.guests_only:
|
||||
try:
|
||||
host = HostSystem.get(self.client, name=hostname)
|
||||
inv = self._get_host_info(host)
|
||||
except ObjectNotFoundError:
|
||||
pass
|
||||
|
||||
if inv is None:
|
||||
try:
|
||||
vm = VirtualMachine.get(self.client, name=hostname)
|
||||
inv = self._get_vm_info(vm)
|
||||
except ObjectNotFoundError:
|
||||
pass
|
||||
|
||||
if inv is not None:
|
||||
self._put_cache(hostname, inv)
|
||||
return inv or {}
|
||||
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('--list', action='store_true', dest='list',
|
||||
default=False, help='Output inventory groups and hosts')
|
||||
parser.add_option('--host', dest='host', default=None, metavar='HOST',
|
||||
help='Output variables only for the given hostname')
|
||||
# Additional options for use when running the script standalone, but never
|
||||
# used by Ansible.
|
||||
parser.add_option('--pretty', action='store_true', dest='pretty',
|
||||
default=False, help='Output nicely-formatted JSON')
|
||||
parser.add_option('--include-host-systems', action='store_true',
|
||||
dest='include_host_systems', default=False,
|
||||
help='Include host systems in addition to VMs')
|
||||
parser.add_option('--no-meta-hostvars', action='store_false',
|
||||
dest='meta_hostvars', default=True,
|
||||
help='Exclude [\'_meta\'][\'hostvars\'] with --list')
|
||||
options, args = parser.parse_args()
|
||||
|
||||
if options.include_host_systems:
|
||||
vmware_inventory = VMwareInventory(guests_only=False)
|
||||
else:
|
||||
vmware_inventory = VMwareInventory()
|
||||
if options.host is not None:
|
||||
inventory = vmware_inventory.get_host(options.host)
|
||||
else:
|
||||
inventory = vmware_inventory.get_inventory(options.meta_hostvars)
|
||||
|
||||
json_kwargs = {}
|
||||
if options.pretty:
|
||||
json_kwargs.update({'indent': 4, 'sort_keys': True})
|
||||
json.dump(inventory, sys.stdout, **json_kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,127 +0,0 @@
|
||||
# Ansible VMware external inventory script settings
|
||||
|
||||
[vmware]
|
||||
|
||||
# The resolvable hostname or ip address of the vsphere
|
||||
server=vcenter
|
||||
|
||||
# The port for the vsphere API
|
||||
#port=443
|
||||
|
||||
# The username with access to the vsphere API. This setting
|
||||
# may also be defined via the VMWARE_USERNAME environment variable.
|
||||
username=administrator@vsphere.local
|
||||
|
||||
# The password for the vsphere API. This setting
|
||||
# may also be defined via the VMWARE_PASSWORD environment variable.
|
||||
password=vmware
|
||||
|
||||
# Verify the server's SSL certificate
|
||||
#validate_certs = True
|
||||
|
||||
# Specify the number of seconds to use the inventory cache before it is
|
||||
# considered stale. If not defined, defaults to 0 seconds.
|
||||
#cache_max_age = 3600
|
||||
|
||||
|
||||
# Specify the directory used for storing the inventory cache. If not defined,
|
||||
# caching will be disabled.
|
||||
#cache_path = ~/.cache/ansible
|
||||
|
||||
|
||||
# Max object level refers to the level of recursion the script will delve into
|
||||
# the objects returned from pyvomi to find serializable facts. The default
|
||||
# level of 0 is sufficient for most tasks and will be the most performant.
|
||||
# Beware that the recursion can exceed python's limit (causing traceback),
|
||||
# cause sluggish script performance and return huge blobs of facts.
|
||||
# If you do not know what you are doing, leave this set to 1.
|
||||
#max_object_level=1
|
||||
|
||||
|
||||
# Lower the keynames for facts to make addressing them easier.
|
||||
#lower_var_keys=True
|
||||
|
||||
|
||||
# Don't retrieve and process some VMware attribute keys
|
||||
# Default values permit to sanitize inventory meta and to improve a little bit
|
||||
# performance by removing non-common group attributes.
|
||||
#skip_keys = declaredalarmstate,disabledmethod,dynamicproperty,dynamictype,environmentbrowser,managedby,parent,childtype,resourceconfig
|
||||
|
||||
|
||||
# Host alias for objects in the inventory. VMware allows duplicate VM names
|
||||
# so they can not be considered unique. Use this setting to alter the alias
|
||||
# returned for the hosts. Any atributes for the guest can be used to build
|
||||
# this alias. The default combines the config name and the config uuid and
|
||||
# expects that the ansible_host will be set by the host_pattern.
|
||||
#alias_pattern={{ config.name + '_' + config.uuid }}
|
||||
|
||||
|
||||
# Host pattern is the value set for ansible_host and ansible_ssh_host, which
|
||||
# needs to be a hostname or ipaddress the ansible controlhost can reach.
|
||||
#host_pattern={{ guest.ipaddress }}
|
||||
|
||||
|
||||
# Host filters are a comma separated list of jinja patterns to remove
|
||||
# non-matching hosts from the final result.
|
||||
# EXAMPLES:
|
||||
# host_filters={{ config.guestid == 'rhel7_64Guest' }}
|
||||
# host_filters={{ config.cpuhotremoveenabled != False }},{{ runtime.maxmemoryusage >= 512 }}
|
||||
# host_filters={{ config.cpuhotremoveenabled != False }},{{ runtime.maxmemoryusage >= 512 }}
|
||||
# host_filters={{ runtime.powerstate == "poweredOn" }}
|
||||
# host_filters={{ guest.gueststate == "notRunning" }}
|
||||
# The default value is powerstate of virtual machine equal to "poweredOn". (Changed in version 2.5)
|
||||
# Runtime state does not require to have vmware tools installed as compared to "guest.gueststate"
|
||||
#host_filters={{ runtime.powerstate == "poweredOn" }}
|
||||
|
||||
|
||||
|
||||
# Groupby patterns enable the user to create groups via any possible jinja
|
||||
# expression. The resulting value will the groupname and the host will be added
|
||||
# to that group. Be careful to not make expressions that simply return True/False
|
||||
# because those values will become the literal group name. The patterns can be
|
||||
# comma delimited to create as many groups as necessary
|
||||
#groupby_patterns={{ guest.guestid }},{{ 'templates' if config.template else 'guests'}}
|
||||
|
||||
# Group by custom fields will use VMware custom fields to generate hostgroups
|
||||
# based on {{ custom_field_group_prefix }} + field_name + _ + field_value
|
||||
# Set groupby_custom_field to True will enable this feature
|
||||
# If custom field value is comma separated, multiple groups are created.
|
||||
# Warning: This required max_object_level to be set to 2 or greater.
|
||||
#groupby_custom_field = False
|
||||
|
||||
# You can customize prefix used by custom field hostgroups generation here.
|
||||
# vmware_tag_ prefix is the default and consistent with ec2_tag_
|
||||
#custom_field_group_prefix = vmware_tag_
|
||||
|
||||
# You can blacklist custom fields so that they are not included in the
|
||||
# groupby_custom_field option. This is useful when you have custom fields that
|
||||
# have values that are unique to individual hosts. Timestamps for example.
|
||||
# The groupby_custom_field_excludes option should be a comma separated list of custom
|
||||
# field keys to be blacklisted.
|
||||
#groupby_custom_field_excludes=<custom_field_1>,<custom_field_2>,<custom_field_3>
|
||||
|
||||
# The script attempts to recurse into virtualmachine objects and serialize
|
||||
# all available data. The serialization is comprehensive but slow. If the
|
||||
# vcenter environment is large and the desired properties are known, create
|
||||
# a 'properties' section in this config and make an arbitrary list of
|
||||
# key=value settings where the value is a path to a specific property. If
|
||||
# If this feature is enabled, be sure to fetch every property that is used
|
||||
# in the jinja expressions defined above. For performance tuning, reduce
|
||||
# the number of properties to the smallest amount possible and limit the
|
||||
# use of properties that are not direct attributes of vim.VirtualMachine
|
||||
#[properties]
|
||||
#prop01=name
|
||||
#prop02=config.cpuHotAddEnabled
|
||||
#prop03=config.cpuHotRemoveEnabled
|
||||
#prop04=config.instanceUuid
|
||||
#prop05=config.hardware.numCPU
|
||||
#prop06=config.template
|
||||
#prop07=config.name
|
||||
#prop08=guest.hostName
|
||||
#prop09=guest.ipAddress
|
||||
#prop10=guest.guestId
|
||||
#prop11=guest.guestState
|
||||
#prop12=runtime.maxMemoryUsage
|
||||
# In order to populate `customValue` (virtual machine's custom attributes) inside hostvars,
|
||||
# uncomment following property. Please see - https://github.com/ansible/ansible/issues/41395
|
||||
#prop13=customValue
|
@ -1,793 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C): 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# Requirements
|
||||
# - pyvmomi >= 6.0.0.2016.4
|
||||
|
||||
# TODO:
|
||||
# * more jq examples
|
||||
# * optional folder hierarchy
|
||||
|
||||
"""
|
||||
$ jq '._meta.hostvars[].config' data.json | head
|
||||
{
|
||||
"alternateguestname": "",
|
||||
"instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675",
|
||||
"memoryhotaddenabled": false,
|
||||
"guestfullname": "Red Hat Enterprise Linux 7 (64-bit)",
|
||||
"changeversion": "2016-05-16T18:43:14.977925Z",
|
||||
"uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4",
|
||||
"cpuhotremoveenabled": false,
|
||||
"vpmcenabled": false,
|
||||
"firmware": "bios",
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import atexit
|
||||
import datetime
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import ssl
|
||||
import sys
|
||||
import uuid
|
||||
from time import time
|
||||
|
||||
from jinja2 import Environment
|
||||
|
||||
from ansible.module_utils.six import integer_types, PY3
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
|
||||
try:
|
||||
import argparse
|
||||
except ImportError:
|
||||
sys.exit('Error: This inventory script required "argparse" python module. Please install it or upgrade to python-2.7')
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
from pyVim.connect import SmartConnect, Disconnect
|
||||
except ImportError:
|
||||
sys.exit("ERROR: This inventory script required 'pyVmomi' Python module, it was not able to load it")
|
||||
|
||||
|
||||
def regex_match(s, pattern):
|
||||
'''Custom filter for regex matching'''
|
||||
reg = re.compile(pattern)
|
||||
if reg.match(s):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def select_chain_match(inlist, key, pattern):
|
||||
'''Get a key from a list of dicts, squash values to a single list, then filter'''
|
||||
outlist = [x[key] for x in inlist]
|
||||
outlist = list(itertools.chain(*outlist))
|
||||
outlist = [x for x in outlist if regex_match(x, pattern)]
|
||||
return outlist
|
||||
|
||||
|
||||
class VMwareMissingHostException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class VMWareInventory(object):
|
||||
__name__ = 'VMWareInventory'
|
||||
|
||||
guest_props = False
|
||||
instances = []
|
||||
debug = False
|
||||
load_dumpfile = None
|
||||
write_dumpfile = None
|
||||
maxlevel = 1
|
||||
lowerkeys = True
|
||||
config = None
|
||||
cache_max_age = None
|
||||
cache_path_cache = None
|
||||
cache_path_index = None
|
||||
cache_dir = None
|
||||
server = None
|
||||
port = None
|
||||
username = None
|
||||
password = None
|
||||
validate_certs = True
|
||||
host_filters = []
|
||||
skip_keys = []
|
||||
groupby_patterns = []
|
||||
groupby_custom_field_excludes = []
|
||||
|
||||
safe_types = [bool, str, float, None] + list(integer_types)
|
||||
iter_types = [dict, list]
|
||||
|
||||
bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
|
||||
|
||||
vimTableMaxDepth = {
|
||||
"vim.HostSystem": 2,
|
||||
"vim.VirtualMachine": 2,
|
||||
}
|
||||
|
||||
custom_fields = {}
|
||||
|
||||
# use jinja environments to allow for custom filters
|
||||
env = Environment()
|
||||
env.filters['regex_match'] = regex_match
|
||||
env.filters['select_chain_match'] = select_chain_match
|
||||
|
||||
# translation table for attributes to fetch for known vim types
|
||||
|
||||
vimTable = {
|
||||
vim.Datastore: ['_moId', 'name'],
|
||||
vim.ResourcePool: ['_moId', 'name'],
|
||||
vim.HostSystem: ['_moId', 'name'],
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _empty_inventory():
|
||||
return {"_meta": {"hostvars": {}}}
|
||||
|
||||
def __init__(self, load=True):
|
||||
self.inventory = VMWareInventory._empty_inventory()
|
||||
|
||||
if load:
|
||||
# Read settings and parse CLI arguments
|
||||
self.parse_cli_args()
|
||||
self.read_settings()
|
||||
|
||||
# Check the cache
|
||||
cache_valid = self.is_cache_valid()
|
||||
|
||||
# Handle Cache
|
||||
if self.args.refresh_cache or not cache_valid:
|
||||
self.do_api_calls_update_cache()
|
||||
else:
|
||||
self.debugl('loading inventory from cache')
|
||||
self.inventory = self.get_inventory_from_cache()
|
||||
|
||||
def debugl(self, text):
|
||||
if self.args.debug:
|
||||
try:
|
||||
text = str(text)
|
||||
except UnicodeEncodeError:
|
||||
text = text.encode('utf-8')
|
||||
print('%s %s' % (datetime.datetime.now(), text))
|
||||
|
||||
def show(self):
|
||||
# Data to print
|
||||
self.debugl('dumping results')
|
||||
data_to_print = None
|
||||
if self.args.host:
|
||||
data_to_print = self.get_host_info(self.args.host)
|
||||
elif self.args.list:
|
||||
# Display list of instances for inventory
|
||||
data_to_print = self.inventory
|
||||
return json.dumps(data_to_print, indent=2)
|
||||
|
||||
def is_cache_valid(self):
|
||||
''' Determines if the cache files have expired, or if it is still valid '''
|
||||
|
||||
valid = False
|
||||
|
||||
if os.path.isfile(self.cache_path_cache):
|
||||
mod_time = os.path.getmtime(self.cache_path_cache)
|
||||
current_time = time()
|
||||
if (mod_time + self.cache_max_age) > current_time:
|
||||
valid = True
|
||||
|
||||
return valid
|
||||
|
||||
def do_api_calls_update_cache(self):
|
||||
''' Get instances and cache the data '''
|
||||
self.inventory = self.instances_to_inventory(self.get_instances())
|
||||
self.write_to_cache(self.inventory)
|
||||
|
||||
def write_to_cache(self, data):
|
||||
''' Dump inventory to json file '''
|
||||
with open(self.cache_path_cache, 'w') as f:
|
||||
f.write(json.dumps(data, indent=2))
|
||||
|
||||
def get_inventory_from_cache(self):
|
||||
''' Read in jsonified inventory '''
|
||||
|
||||
jdata = None
|
||||
with open(self.cache_path_cache, 'r') as f:
|
||||
jdata = f.read()
|
||||
return json.loads(jdata)
|
||||
|
||||
def read_settings(self):
|
||||
''' Reads the settings from the vmware_inventory.ini file '''
|
||||
|
||||
scriptbasename = __file__
|
||||
scriptbasename = os.path.basename(scriptbasename)
|
||||
scriptbasename = scriptbasename.replace('.py', '')
|
||||
|
||||
defaults = {'vmware': {
|
||||
'server': '',
|
||||
'port': 443,
|
||||
'username': '',
|
||||
'password': '',
|
||||
'validate_certs': True,
|
||||
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
|
||||
'cache_name': 'ansible-vmware',
|
||||
'cache_path': '~/.ansible/tmp',
|
||||
'cache_max_age': 3600,
|
||||
'max_object_level': 1,
|
||||
'skip_keys': 'declaredalarmstate,'
|
||||
'disabledmethod,'
|
||||
'dynamicproperty,'
|
||||
'dynamictype,'
|
||||
'environmentbrowser,'
|
||||
'managedby,'
|
||||
'parent,'
|
||||
'childtype,'
|
||||
'resourceconfig',
|
||||
'alias_pattern': '{{ config.name + "_" + config.uuid }}',
|
||||
'host_pattern': '{{ guest.ipaddress }}',
|
||||
'host_filters': '{{ runtime.powerstate == "poweredOn" }}',
|
||||
'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
|
||||
'lower_var_keys': True,
|
||||
'custom_field_group_prefix': 'vmware_tag_',
|
||||
'groupby_custom_field_excludes': '',
|
||||
'groupby_custom_field': False}
|
||||
}
|
||||
|
||||
if PY3:
|
||||
config = configparser.ConfigParser()
|
||||
else:
|
||||
config = configparser.SafeConfigParser()
|
||||
|
||||
# where is the config?
|
||||
vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
|
||||
vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
|
||||
config.read(vmware_ini_path)
|
||||
|
||||
if 'vmware' not in config.sections():
|
||||
config.add_section('vmware')
|
||||
|
||||
# apply defaults
|
||||
for k, v in defaults['vmware'].items():
|
||||
if not config.has_option('vmware', k):
|
||||
config.set('vmware', k, str(v))
|
||||
|
||||
# where is the cache?
|
||||
self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
|
||||
if self.cache_dir and not os.path.exists(self.cache_dir):
|
||||
os.makedirs(self.cache_dir)
|
||||
|
||||
# set the cache filename and max age
|
||||
cache_name = config.get('vmware', 'cache_name')
|
||||
self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
|
||||
self.debugl('cache path is %s' % self.cache_path_cache)
|
||||
self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))
|
||||
|
||||
# mark the connection info
|
||||
self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server'))
|
||||
self.debugl('server is %s' % self.server)
|
||||
self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
|
||||
self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username'))
|
||||
self.debugl('username is %s' % self.username)
|
||||
self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password', raw=True))
|
||||
self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
|
||||
if self.validate_certs in ['no', 'false', 'False', False]:
|
||||
self.validate_certs = False
|
||||
|
||||
self.debugl('cert validation is %s' % self.validate_certs)
|
||||
|
||||
# behavior control
|
||||
self.maxlevel = int(config.get('vmware', 'max_object_level'))
|
||||
self.debugl('max object level is %s' % self.maxlevel)
|
||||
self.lowerkeys = config.get('vmware', 'lower_var_keys')
|
||||
if type(self.lowerkeys) != bool:
|
||||
if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
|
||||
self.lowerkeys = True
|
||||
else:
|
||||
self.lowerkeys = False
|
||||
self.debugl('lower keys is %s' % self.lowerkeys)
|
||||
self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
|
||||
self.debugl('skip keys is %s' % self.skip_keys)
|
||||
temp_host_filters = list(config.get('vmware', 'host_filters').split('}},'))
|
||||
for host_filter in temp_host_filters:
|
||||
host_filter = host_filter.rstrip()
|
||||
if host_filter != "":
|
||||
if not host_filter.endswith("}}"):
|
||||
host_filter += "}}"
|
||||
self.host_filters.append(host_filter)
|
||||
self.debugl('host filters are %s' % self.host_filters)
|
||||
|
||||
temp_groupby_patterns = list(config.get('vmware', 'groupby_patterns').split('}},'))
|
||||
for groupby_pattern in temp_groupby_patterns:
|
||||
groupby_pattern = groupby_pattern.rstrip()
|
||||
if groupby_pattern != "":
|
||||
if not groupby_pattern.endswith("}}"):
|
||||
groupby_pattern += "}}"
|
||||
self.groupby_patterns.append(groupby_pattern)
|
||||
self.debugl('groupby patterns are %s' % self.groupby_patterns)
|
||||
temp_groupby_custom_field_excludes = config.get('vmware', 'groupby_custom_field_excludes')
|
||||
self.groupby_custom_field_excludes = [x.strip('"') for x in [y.strip("'") for y in temp_groupby_custom_field_excludes.split(",")]]
|
||||
self.debugl('groupby exclude strings are %s' % self.groupby_custom_field_excludes)
|
||||
|
||||
# Special feature to disable the brute force serialization of the
|
||||
# virtual machine objects. The key name for these properties does not
|
||||
# matter because the values are just items for a larger list.
|
||||
if config.has_section('properties'):
|
||||
self.guest_props = []
|
||||
for prop in config.items('properties'):
|
||||
self.guest_props.append(prop[1])
|
||||
|
||||
# save the config
|
||||
self.config = config
|
||||
|
||||
def parse_cli_args(self):
|
||||
''' Command line argument processing '''
|
||||
|
||||
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
|
||||
parser.add_argument('--debug', action='store_true', default=False,
|
||||
help='show debug info')
|
||||
parser.add_argument('--list', action='store_true', default=True,
|
||||
help='List instances (default: True)')
|
||||
parser.add_argument('--host', action='store',
|
||||
help='Get all the variables about a specific instance')
|
||||
parser.add_argument('--refresh-cache', action='store_true', default=False,
|
||||
help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)')
|
||||
parser.add_argument('--max-instances', default=None, type=int,
|
||||
help='maximum number of instances to retrieve')
|
||||
self.args = parser.parse_args()
|
||||
|
||||
def get_instances(self):
|
||||
''' Get a list of vm instances with pyvmomi '''
|
||||
kwargs = {'host': self.server,
|
||||
'user': self.username,
|
||||
'pwd': self.password,
|
||||
'port': int(self.port)}
|
||||
|
||||
if self.validate_certs and hasattr(ssl, 'SSLContext'):
|
||||
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
context.verify_mode = ssl.CERT_REQUIRED
|
||||
context.check_hostname = True
|
||||
kwargs['sslContext'] = context
|
||||
elif self.validate_certs and not hasattr(ssl, 'SSLContext'):
|
||||
sys.exit('pyVim does not support changing verification mode with python < 2.7.9. Either update '
|
||||
'python or use validate_certs=false.')
|
||||
elif not self.validate_certs and hasattr(ssl, 'SSLContext'):
|
||||
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
context.check_hostname = False
|
||||
kwargs['sslContext'] = context
|
||||
elif not self.validate_certs and not hasattr(ssl, 'SSLContext'):
|
||||
# Python 2.7.9 < or RHEL/CentOS 7.4 <
|
||||
pass
|
||||
|
||||
return self._get_instances(kwargs)
|
||||
|
||||
def _get_instances(self, inkwargs):
|
||||
''' Make API calls '''
|
||||
instances = []
|
||||
si = None
|
||||
try:
|
||||
si = SmartConnect(**inkwargs)
|
||||
except ssl.SSLError as connection_error:
|
||||
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and self.validate_certs:
|
||||
sys.exit("Unable to connect to ESXi server due to %s, "
|
||||
"please specify validate_certs=False and try again" % connection_error)
|
||||
|
||||
except Exception as exc:
|
||||
self.debugl("Unable to connect to ESXi server due to %s" % exc)
|
||||
sys.exit("Unable to connect to ESXi server due to %s" % exc)
|
||||
|
||||
self.debugl('retrieving all instances')
|
||||
if not si:
|
||||
sys.exit("Could not connect to the specified host using specified "
|
||||
"username and password")
|
||||
atexit.register(Disconnect, si)
|
||||
content = si.RetrieveContent()
|
||||
|
||||
# Create a search container for virtualmachines
|
||||
self.debugl('creating containerview for virtualmachines')
|
||||
container = content.rootFolder
|
||||
viewType = [vim.VirtualMachine]
|
||||
recursive = True
|
||||
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
|
||||
children = containerView.view
|
||||
for child in children:
|
||||
# If requested, limit the total number of instances
|
||||
if self.args.max_instances:
|
||||
if len(instances) >= self.args.max_instances:
|
||||
break
|
||||
instances.append(child)
|
||||
self.debugl("%s total instances in container view" % len(instances))
|
||||
|
||||
if self.args.host:
|
||||
instances = [x for x in instances if x.name == self.args.host]
|
||||
|
||||
instance_tuples = []
|
||||
for instance in instances:
|
||||
if self.guest_props:
|
||||
ifacts = self.facts_from_proplist(instance)
|
||||
else:
|
||||
ifacts = self.facts_from_vobj(instance)
|
||||
instance_tuples.append((instance, ifacts))
|
||||
self.debugl('facts collected for all instances')
|
||||
|
||||
try:
|
||||
cfm = content.customFieldsManager
|
||||
if cfm is not None and cfm.field:
|
||||
for f in cfm.field:
|
||||
if not f.managedObjectType or f.managedObjectType == vim.VirtualMachine:
|
||||
self.custom_fields[f.key] = f.name
|
||||
self.debugl('%d custom fields collected' % len(self.custom_fields))
|
||||
except vmodl.RuntimeFault as exc:
|
||||
self.debugl("Unable to gather custom fields due to %s" % exc.msg)
|
||||
except IndexError as exc:
|
||||
self.debugl("Unable to gather custom fields due to %s" % exc)
|
||||
|
||||
return instance_tuples
|
||||
|
||||
def instances_to_inventory(self, instances):
|
||||
''' Convert a list of vm objects into a json compliant inventory '''
|
||||
self.debugl('re-indexing instances based on ini settings')
|
||||
inventory = VMWareInventory._empty_inventory()
|
||||
inventory['all'] = {}
|
||||
inventory['all']['hosts'] = []
|
||||
for idx, instance in enumerate(instances):
|
||||
# make a unique id for this object to avoid vmware's
|
||||
# numerous uuid's which aren't all unique.
|
||||
thisid = str(uuid.uuid4())
|
||||
idata = instance[1]
|
||||
|
||||
# Put it in the inventory
|
||||
inventory['all']['hosts'].append(thisid)
|
||||
inventory['_meta']['hostvars'][thisid] = idata.copy()
|
||||
inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid
|
||||
|
||||
# Make a map of the uuid to the alias the user wants
|
||||
name_mapping = self.create_template_mapping(
|
||||
inventory,
|
||||
self.config.get('vmware', 'alias_pattern')
|
||||
)
|
||||
|
||||
# Make a map of the uuid to the ssh hostname the user wants
|
||||
host_mapping = self.create_template_mapping(
|
||||
inventory,
|
||||
self.config.get('vmware', 'host_pattern')
|
||||
)
|
||||
|
||||
# Reset the inventory keys
|
||||
for k, v in name_mapping.items():
|
||||
|
||||
if not host_mapping or k not in host_mapping:
|
||||
continue
|
||||
|
||||
# set ansible_host (2.x)
|
||||
try:
|
||||
inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
|
||||
# 1.9.x backwards compliance
|
||||
inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k]
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if k == v:
|
||||
continue
|
||||
|
||||
# add new key
|
||||
inventory['all']['hosts'].append(v)
|
||||
inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][k]
|
||||
|
||||
# cleanup old key
|
||||
inventory['all']['hosts'].remove(k)
|
||||
inventory['_meta']['hostvars'].pop(k, None)
|
||||
|
||||
self.debugl('pre-filtered hosts:')
|
||||
for i in inventory['all']['hosts']:
|
||||
self.debugl(' * %s' % i)
|
||||
# Apply host filters
|
||||
for hf in self.host_filters:
|
||||
if not hf:
|
||||
continue
|
||||
self.debugl('filter: %s' % hf)
|
||||
filter_map = self.create_template_mapping(inventory, hf, dtype='boolean')
|
||||
for k, v in filter_map.items():
|
||||
if not v:
|
||||
# delete this host
|
||||
inventory['all']['hosts'].remove(k)
|
||||
inventory['_meta']['hostvars'].pop(k, None)
|
||||
|
||||
self.debugl('post-filter hosts:')
|
||||
for i in inventory['all']['hosts']:
|
||||
self.debugl(' * %s' % i)
|
||||
|
||||
# Create groups
|
||||
for gbp in self.groupby_patterns:
|
||||
groupby_map = self.create_template_mapping(inventory, gbp)
|
||||
for k, v in groupby_map.items():
|
||||
if v not in inventory:
|
||||
inventory[v] = {}
|
||||
inventory[v]['hosts'] = []
|
||||
if k not in inventory[v]['hosts']:
|
||||
inventory[v]['hosts'].append(k)
|
||||
|
||||
if self.config.get('vmware', 'groupby_custom_field'):
|
||||
for k, v in inventory['_meta']['hostvars'].items():
|
||||
if 'customvalue' in v:
|
||||
for tv in v['customvalue']:
|
||||
newkey = None
|
||||
field_name = self.custom_fields[tv['key']] if tv['key'] in self.custom_fields else tv['key']
|
||||
if field_name in self.groupby_custom_field_excludes:
|
||||
continue
|
||||
values = []
|
||||
keylist = map(lambda x: x.strip(), tv['value'].split(','))
|
||||
for kl in keylist:
|
||||
try:
|
||||
newkey = "%s%s_%s" % (self.config.get('vmware', 'custom_field_group_prefix'), str(field_name), kl)
|
||||
newkey = newkey.strip()
|
||||
except Exception as e:
|
||||
self.debugl(e)
|
||||
values.append(newkey)
|
||||
for tag in values:
|
||||
if not tag:
|
||||
continue
|
||||
if tag not in inventory:
|
||||
inventory[tag] = {}
|
||||
inventory[tag]['hosts'] = []
|
||||
if k not in inventory[tag]['hosts']:
|
||||
inventory[tag]['hosts'].append(k)
|
||||
|
||||
return inventory
|
||||
|
||||
def create_template_mapping(self, inventory, pattern, dtype='string'):
|
||||
''' Return a hash of uuid to templated string from pattern '''
|
||||
mapping = {}
|
||||
for k, v in inventory['_meta']['hostvars'].items():
|
||||
t = self.env.from_string(pattern)
|
||||
newkey = None
|
||||
try:
|
||||
newkey = t.render(v)
|
||||
newkey = newkey.strip()
|
||||
except Exception as e:
|
||||
self.debugl(e)
|
||||
if not newkey:
|
||||
continue
|
||||
elif dtype == 'integer':
|
||||
newkey = int(newkey)
|
||||
elif dtype == 'boolean':
|
||||
if newkey.lower() == 'false':
|
||||
newkey = False
|
||||
elif newkey.lower() == 'true':
|
||||
newkey = True
|
||||
elif dtype == 'string':
|
||||
pass
|
||||
mapping[k] = newkey
|
||||
return mapping
|
||||
|
||||
def facts_from_proplist(self, vm):
|
||||
'''Get specific properties instead of serializing everything'''
|
||||
|
||||
rdata = {}
|
||||
for prop in self.guest_props:
|
||||
self.debugl('getting %s property for %s' % (prop, vm.name))
|
||||
key = prop
|
||||
if self.lowerkeys:
|
||||
key = key.lower()
|
||||
|
||||
if '.' not in prop:
|
||||
# props without periods are direct attributes of the parent
|
||||
vm_property = getattr(vm, prop)
|
||||
if isinstance(vm_property, vim.CustomFieldsManager.Value.Array):
|
||||
temp_vm_property = []
|
||||
for vm_prop in vm_property:
|
||||
temp_vm_property.append({'key': vm_prop.key,
|
||||
'value': vm_prop.value})
|
||||
rdata[key] = temp_vm_property
|
||||
else:
|
||||
rdata[key] = vm_property
|
||||
else:
|
||||
# props with periods are subkeys of parent attributes
|
||||
parts = prop.split('.')
|
||||
total = len(parts) - 1
|
||||
|
||||
# pointer to the current object
|
||||
val = None
|
||||
# pointer to the current result key
|
||||
lastref = rdata
|
||||
|
||||
for idx, x in enumerate(parts):
|
||||
|
||||
if isinstance(val, dict):
|
||||
if x in val:
|
||||
val = val.get(x)
|
||||
elif x.lower() in val:
|
||||
val = val.get(x.lower())
|
||||
else:
|
||||
# if the val wasn't set yet, get it from the parent
|
||||
if not val:
|
||||
try:
|
||||
val = getattr(vm, x)
|
||||
except AttributeError as e:
|
||||
self.debugl(e)
|
||||
else:
|
||||
# in a subkey, get the subprop from the previous attrib
|
||||
try:
|
||||
val = getattr(val, x)
|
||||
except AttributeError as e:
|
||||
self.debugl(e)
|
||||
|
||||
# make sure it serializes
|
||||
val = self._process_object_types(val)
|
||||
|
||||
# lowercase keys if requested
|
||||
if self.lowerkeys:
|
||||
x = x.lower()
|
||||
|
||||
# change the pointer or set the final value
|
||||
if idx != total:
|
||||
if x not in lastref:
|
||||
lastref[x] = {}
|
||||
lastref = lastref[x]
|
||||
else:
|
||||
lastref[x] = val
|
||||
if self.args.debug:
|
||||
self.debugl("For %s" % vm.name)
|
||||
for key in list(rdata.keys()):
|
||||
if isinstance(rdata[key], dict):
|
||||
for ikey in list(rdata[key].keys()):
|
||||
self.debugl("Property '%s.%s' has value '%s'" % (key, ikey, rdata[key][ikey]))
|
||||
else:
|
||||
self.debugl("Property '%s' has value '%s'" % (key, rdata[key]))
|
||||
return rdata
|
||||
|
||||
def facts_from_vobj(self, vobj, level=0):
|
||||
''' Traverse a VM object and return a json compliant data structure '''
|
||||
|
||||
# pyvmomi objects are not yet serializable, but may be one day ...
|
||||
# https://github.com/vmware/pyvmomi/issues/21
|
||||
|
||||
# WARNING:
|
||||
# Accessing an object attribute will trigger a SOAP call to the remote.
|
||||
# Increasing the attributes collected or the depth of recursion greatly
|
||||
# increases runtime duration and potentially memory+network utilization.
|
||||
|
||||
if level == 0:
|
||||
try:
|
||||
self.debugl("get facts for %s" % vobj.name)
|
||||
except Exception as e:
|
||||
self.debugl(e)
|
||||
|
||||
rdata = {}
|
||||
|
||||
methods = dir(vobj)
|
||||
methods = [str(x) for x in methods if not x.startswith('_')]
|
||||
methods = [x for x in methods if x not in self.bad_types]
|
||||
methods = [x for x in methods if not x.lower() in self.skip_keys]
|
||||
methods = sorted(methods)
|
||||
|
||||
for method in methods:
|
||||
# Attempt to get the method, skip on fail
|
||||
try:
|
||||
methodToCall = getattr(vobj, method)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
# Skip callable methods
|
||||
if callable(methodToCall):
|
||||
continue
|
||||
|
||||
if self.lowerkeys:
|
||||
method = method.lower()
|
||||
|
||||
rdata[method] = self._process_object_types(
|
||||
methodToCall,
|
||||
thisvm=vobj,
|
||||
inkey=method,
|
||||
)
|
||||
|
||||
return rdata
|
||||
|
||||
def _process_object_types(self, vobj, thisvm=None, inkey='', level=0):
|
||||
''' Serialize an object '''
|
||||
rdata = {}
|
||||
|
||||
if type(vobj).__name__ in self.vimTableMaxDepth and level >= self.vimTableMaxDepth[type(vobj).__name__]:
|
||||
return rdata
|
||||
|
||||
if vobj is None:
|
||||
rdata = None
|
||||
elif type(vobj) in self.vimTable:
|
||||
rdata = {}
|
||||
for key in self.vimTable[type(vobj)]:
|
||||
try:
|
||||
rdata[key] = getattr(vobj, key)
|
||||
except Exception as e:
|
||||
self.debugl(e)
|
||||
|
||||
elif issubclass(type(vobj), str) or isinstance(vobj, str):
|
||||
if vobj.isalnum():
|
||||
rdata = vobj
|
||||
else:
|
||||
rdata = vobj.encode('utf-8').decode('utf-8')
|
||||
elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
|
||||
rdata = vobj
|
||||
elif issubclass(type(vobj), integer_types) or isinstance(vobj, integer_types):
|
||||
rdata = vobj
|
||||
elif issubclass(type(vobj), float) or isinstance(vobj, float):
|
||||
rdata = vobj
|
||||
elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
|
||||
rdata = []
|
||||
try:
|
||||
vobj = sorted(vobj)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for idv, vii in enumerate(vobj):
|
||||
if level + 1 <= self.maxlevel:
|
||||
vid = self._process_object_types(
|
||||
vii,
|
||||
thisvm=thisvm,
|
||||
inkey=inkey + '[' + str(idv) + ']',
|
||||
level=(level + 1)
|
||||
)
|
||||
|
||||
if vid:
|
||||
rdata.append(vid)
|
||||
|
||||
elif issubclass(type(vobj), dict):
|
||||
pass
|
||||
|
||||
elif issubclass(type(vobj), object):
|
||||
methods = dir(vobj)
|
||||
methods = [str(x) for x in methods if not x.startswith('_')]
|
||||
methods = [x for x in methods if x not in self.bad_types]
|
||||
methods = [x for x in methods if not inkey + '.' + x.lower() in self.skip_keys]
|
||||
methods = sorted(methods)
|
||||
|
||||
for method in methods:
|
||||
# Attempt to get the method, skip on fail
|
||||
try:
|
||||
methodToCall = getattr(vobj, method)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
if callable(methodToCall):
|
||||
continue
|
||||
|
||||
if self.lowerkeys:
|
||||
method = method.lower()
|
||||
if level + 1 <= self.maxlevel:
|
||||
try:
|
||||
rdata[method] = self._process_object_types(
|
||||
methodToCall,
|
||||
thisvm=thisvm,
|
||||
inkey=inkey + '.' + method,
|
||||
level=(level + 1)
|
||||
)
|
||||
except vim.fault.NoPermission:
|
||||
self.debugl("Skipping method %s (NoPermission)" % method)
|
||||
else:
|
||||
pass
|
||||
|
||||
return rdata
|
||||
|
||||
def get_host_info(self, host):
|
||||
''' Return hostvars for a single host '''
|
||||
|
||||
if host in self.inventory['_meta']['hostvars']:
|
||||
return self.inventory['_meta']['hostvars'][host]
|
||||
elif self.args.host and self.inventory['_meta']['hostvars']:
|
||||
match = None
|
||||
for k, v in self.inventory['_meta']['hostvars'].items():
|
||||
if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host:
|
||||
match = k
|
||||
break
|
||||
if match:
|
||||
return self.inventory['_meta']['hostvars'][match]
|
||||
else:
|
||||
raise VMwareMissingHostException('%s not found' % host)
|
||||
else:
|
||||
raise VMwareMissingHostException('%s not found' % host)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run the script
|
||||
print(VMWareInventory().show())
|
@ -1,337 +0,0 @@
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
PYVCLOUD_IMP_ERR = None
|
||||
try:
|
||||
from pyvcloud.vcloudair import VCA
|
||||
HAS_PYVCLOUD = True
|
||||
except ImportError:
|
||||
PYVCLOUD_IMP_ERR = traceback.format_exc()
|
||||
HAS_PYVCLOUD = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'}
|
||||
LOGIN_HOST = {'vca': 'vca.vmware.com', 'vchs': 'vchs.vmware.com'}
|
||||
|
||||
DEFAULT_SERVICE_TYPE = 'vca'
|
||||
DEFAULT_VERSION = '5.7'
|
||||
|
||||
|
||||
class VcaError(Exception):
|
||||
|
||||
def __init__(self, msg, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
super(VcaError, self).__init__(msg)
|
||||
|
||||
|
||||
def vca_argument_spec():
|
||||
return dict(
|
||||
username=dict(type='str', aliases=['user'], required=True),
|
||||
password=dict(type='str', aliases=['pass', 'passwd'], required=True, no_log=True),
|
||||
org=dict(),
|
||||
service_id=dict(),
|
||||
instance_id=dict(),
|
||||
host=dict(),
|
||||
api_version=dict(default=DEFAULT_VERSION),
|
||||
service_type=dict(default=DEFAULT_SERVICE_TYPE, choices=SERVICE_MAP.keys()),
|
||||
vdc_name=dict(),
|
||||
gateway_name=dict(default='gateway'),
|
||||
validate_certs=dict(type='bool', default=True, aliases=['verify_certs'])
|
||||
)
|
||||
|
||||
|
||||
class VcaAnsibleModule(AnsibleModule):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
argument_spec = vca_argument_spec()
|
||||
argument_spec.update(kwargs.get('argument_spec', dict()))
|
||||
kwargs['argument_spec'] = argument_spec
|
||||
|
||||
super(VcaAnsibleModule, self).__init__(*args, **kwargs)
|
||||
|
||||
if not HAS_PYVCLOUD:
|
||||
self.fail(missing_required_lib('pyvcloud'),
|
||||
exception=PYVCLOUD_IMP_ERR)
|
||||
|
||||
self._vca = self.create_instance()
|
||||
self.login()
|
||||
|
||||
self._gateway = None
|
||||
self._vdc = None
|
||||
|
||||
@property
|
||||
def vca(self):
|
||||
return self._vca
|
||||
|
||||
@property
|
||||
def gateway(self):
|
||||
if self._gateway is not None:
|
||||
return self._gateway
|
||||
vdc_name = self.params['vdc_name']
|
||||
gateway_name = self.params['gateway_name']
|
||||
_gateway = self.vca.get_gateway(vdc_name, gateway_name)
|
||||
if not _gateway:
|
||||
raise VcaError('vca instance has no gateway named %s' % gateway_name)
|
||||
self._gateway = _gateway
|
||||
return _gateway
|
||||
|
||||
@property
|
||||
def vdc(self):
|
||||
if self._vdc is not None:
|
||||
return self._vdc
|
||||
vdc_name = self.params['vdc_name']
|
||||
_vdc = self.vca.get_vdc(vdc_name)
|
||||
if not _vdc:
|
||||
raise VcaError('vca instance has no vdc named %s' % vdc_name)
|
||||
self._vdc = _vdc
|
||||
return _vdc
|
||||
|
||||
def get_vapp(self, vapp_name):
|
||||
vapp = self.vca.get_vapp(self.vdc, vapp_name)
|
||||
if not vapp:
|
||||
raise VcaError('vca instance has no vapp named %s' % vapp_name)
|
||||
return vapp
|
||||
|
||||
def get_vm(self, vapp_name, vm_name):
|
||||
vapp = self.get_vapp(vapp_name)
|
||||
children = vapp.me.get_Children()
|
||||
vms = [vm for vm in children.get_Vm() if vm.name == vm_name]
|
||||
try:
|
||||
return vms[0]
|
||||
except IndexError:
|
||||
raise VcaError('vapp has no vm named %s' % vm_name)
|
||||
|
||||
def create_instance(self):
|
||||
service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
|
||||
if service_type == 'vcd':
|
||||
host = self.params['host']
|
||||
else:
|
||||
host = LOGIN_HOST[service_type]
|
||||
username = self.params['username']
|
||||
|
||||
version = self.params.get('api_version')
|
||||
if service_type == 'vchs':
|
||||
version = '5.6'
|
||||
|
||||
verify = self.params.get('validate_certs')
|
||||
|
||||
return VCA(host=host, username=username,
|
||||
service_type=SERVICE_MAP[service_type],
|
||||
version=version, verify=verify)
|
||||
|
||||
def login(self):
|
||||
service_type = self.params['service_type']
|
||||
password = self.params['password']
|
||||
|
||||
login_org = None
|
||||
if service_type == 'vcd':
|
||||
login_org = self.params['org']
|
||||
|
||||
if not self.vca.login(password=password, org=login_org):
|
||||
self.fail('Login to VCA failed', response=self.vca.response.content)
|
||||
|
||||
try:
|
||||
method_name = 'login_%s' % service_type
|
||||
meth = getattr(self, method_name)
|
||||
meth()
|
||||
except AttributeError:
|
||||
self.fail('no login method exists for service_type %s' % service_type)
|
||||
except VcaError as e:
|
||||
self.fail(e.message, response=self.vca.response.content, **e.kwargs)
|
||||
|
||||
def login_vca(self):
|
||||
instance_id = self.params['instance_id']
|
||||
if not instance_id:
|
||||
raise VcaError('missing required instance_id for service_type vca')
|
||||
self.vca.login_to_instance_sso(instance=instance_id)
|
||||
|
||||
def login_vchs(self):
|
||||
service_id = self.params['service_id']
|
||||
if not service_id:
|
||||
raise VcaError('missing required service_id for service_type vchs')
|
||||
|
||||
org = self.params['org']
|
||||
if not org:
|
||||
raise VcaError('missing required org for service_type vchs')
|
||||
|
||||
self.vca.login_to_org(service_id, org)
|
||||
|
||||
def login_vcd(self):
|
||||
org = self.params['org']
|
||||
if not org:
|
||||
raise VcaError('missing required org for service_type vcd')
|
||||
|
||||
if not self.vca.token:
|
||||
raise VcaError('unable to get token for service_type vcd')
|
||||
|
||||
if not self.vca.vcloud_session.org_url:
|
||||
raise VcaError('unable to get org_url for service_type vcd')
|
||||
|
||||
self.vca.login(token=self.vca.token, org=org,
|
||||
org_url=self.vca.vcloud_session.org_url)
|
||||
|
||||
def save_services_config(self, blocking=True):
|
||||
task = self.gateway.save_services_configuration()
|
||||
if not task:
|
||||
self.fail(msg='unable to save gateway services configuration')
|
||||
if blocking:
|
||||
self.vca.block_until_completed(task)
|
||||
|
||||
def fail(self, msg, **kwargs):
|
||||
self.fail_json(msg=msg, **kwargs)
|
||||
|
||||
def exit(self, **kwargs):
|
||||
self.exit_json(**kwargs)
|
||||
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# 9/18/2015 @privateip
|
||||
# All of the functions below here were migrated from the original
|
||||
# vca_* modules. All functions below should be considered deprecated
|
||||
# and will be removed once all of the vca_* modules have been updated
|
||||
# to use the new instance module above
|
||||
# -------------------------------------------------------------
|
||||
|
||||
VCA_REQ_ARGS = ['instance_id', 'vdc_name']
|
||||
VCHS_REQ_ARGS = ['service_id']
|
||||
VCD_REQ_ARGS = []
|
||||
|
||||
|
||||
def _validate_module(module):
|
||||
if not HAS_PYVCLOUD:
|
||||
module.fail_json(msg=missing_required_lib("pyvcloud"),
|
||||
exception=PYVCLOUD_IMP_ERR)
|
||||
|
||||
service_type = module.params.get('service_type', DEFAULT_SERVICE_TYPE)
|
||||
|
||||
if service_type == 'vca':
|
||||
for arg in VCA_REQ_ARGS:
|
||||
if module.params.get(arg) is None:
|
||||
module.fail_json(msg="argument %s is mandatory when service type "
|
||||
"is vca" % arg)
|
||||
|
||||
if service_type == 'vchs':
|
||||
for arg in VCHS_REQ_ARGS:
|
||||
if module.params.get(arg) is None:
|
||||
module.fail_json(msg="argument %s is mandatory when service type "
|
||||
"is vchs" % arg)
|
||||
|
||||
if service_type == 'vcd':
|
||||
for arg in VCD_REQ_ARGS:
|
||||
if module.params.get(arg) is None:
|
||||
module.fail_json(msg="argument %s is mandatory when service type "
|
||||
"is vcd" % arg)
|
||||
|
||||
|
||||
def serialize_instances(instance_list):
|
||||
instances = []
|
||||
for i in instance_list:
|
||||
instances.append(dict(apiUrl=i['apiUrl'], instance_id=i['id']))
|
||||
return instances
|
||||
|
||||
|
||||
def _vca_login(vca, password, instance):
|
||||
if not vca.login(password=password):
|
||||
raise VcaError("Login Failed: Please check username or password",
|
||||
error=vca.response.content)
|
||||
|
||||
if not vca.login_to_instance_sso(instance=instance):
|
||||
s_json = serialize_instances(vca.instances)
|
||||
raise VcaError("Login to Instance failed: Seems like instance_id provided "
|
||||
"is wrong .. Please check", valid_instances=s_json)
|
||||
|
||||
return vca
|
||||
|
||||
|
||||
def _vchs_login(vca, password, service, org):
|
||||
if not vca.login(password=password):
|
||||
raise VcaError("Login Failed: Please check username or password",
|
||||
error=vca.response.content)
|
||||
|
||||
if not vca.login_to_org(service, org):
|
||||
raise VcaError("Failed to login to org, Please check the orgname",
|
||||
error=vca.response.content)
|
||||
|
||||
|
||||
def _vcd_login(vca, password, org):
|
||||
# TODO: this function needs to be refactored
|
||||
if not vca.login(password=password, org=org):
|
||||
raise VcaError("Login Failed: Please check username or password "
|
||||
"or host parameters")
|
||||
|
||||
if not vca.login(password=password, org=org):
|
||||
raise VcaError("Failed to get the token",
|
||||
error=vca.response.content)
|
||||
|
||||
if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url):
|
||||
raise VcaError("Failed to login to org", error=vca.response.content)
|
||||
|
||||
|
||||
def vca_login(module):
|
||||
service_type = module.params.get('service_type')
|
||||
username = module.params.get('username')
|
||||
password = module.params.get('password')
|
||||
instance = module.params.get('instance_id')
|
||||
org = module.params.get('org')
|
||||
vdc_name = module.params.get('vdc_name')
|
||||
service = module.params.get('service_id')
|
||||
version = module.params.get('api_version')
|
||||
verify = module.params.get('validate_certs')
|
||||
|
||||
_validate_module(module)
|
||||
|
||||
if not vdc_name and service_type == 'vchs':
|
||||
vdc_name = module.params.get('service_id')
|
||||
|
||||
if not org and service_type == 'vchs':
|
||||
org = vdc_name or service
|
||||
|
||||
if service_type == 'vcd':
|
||||
host = module.params.get('host')
|
||||
else:
|
||||
host = LOGIN_HOST[service_type]
|
||||
|
||||
username = os.environ.get('VCA_USER', username)
|
||||
password = os.environ.get('VCA_PASS', password)
|
||||
|
||||
if not username or not password:
|
||||
msg = "Either the username or password is not set, please check args"
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
if service_type == 'vchs':
|
||||
version = '5.6'
|
||||
elif service_type == 'vcd' and not version:
|
||||
version = '5.6'
|
||||
|
||||
vca = VCA(host=host, username=username,
|
||||
service_type=SERVICE_MAP[service_type],
|
||||
version=version, verify=verify)
|
||||
|
||||
try:
|
||||
if service_type == 'vca':
|
||||
_vca_login(vca, password, instance)
|
||||
elif service_type == 'vchs':
|
||||
_vchs_login(vca, password, service, org)
|
||||
elif service_type == 'vcd':
|
||||
_vcd_login(vca, password, org)
|
||||
except VcaError as e:
|
||||
module.fail_json(msg=e.message, **e.kwargs)
|
||||
|
||||
return vca
|
File diff suppressed because it is too large
Load Diff
@ -1,432 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import traceback
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
HAS_REQUESTS = False
|
||||
|
||||
PYVMOMI_IMP_ERR = None
|
||||
try:
|
||||
from pyVim import connect
|
||||
from pyVmomi import vim, vmodl
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
PYVMOMI_IMP_ERR = traceback.format_exc()
|
||||
HAS_PYVMOMI = False
|
||||
|
||||
VSPHERE_IMP_ERR = None
|
||||
try:
|
||||
from com.vmware.vapi.std_client import DynamicID
|
||||
from vmware.vapi.vsphere.client import create_vsphere_client
|
||||
from com.vmware.vapi.std.errors_client import Unauthorized
|
||||
from com.vmware.content.library_client import Item
|
||||
from com.vmware.vcenter_client import (Folder,
|
||||
Datacenter,
|
||||
ResourcePool,
|
||||
Datastore,
|
||||
Cluster,
|
||||
Host)
|
||||
HAS_VSPHERE = True
|
||||
except ImportError:
|
||||
VSPHERE_IMP_ERR = traceback.format_exc()
|
||||
HAS_VSPHERE = False
|
||||
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
|
||||
|
||||
class VmwareRestClient(object):
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Constructor
|
||||
|
||||
"""
|
||||
self.module = module
|
||||
self.params = module.params
|
||||
self.check_required_library()
|
||||
self.api_client = self.connect_to_vsphere_client()
|
||||
|
||||
# Helper function
|
||||
def get_error_message(self, error):
|
||||
"""
|
||||
Helper function to show human readable error messages.
|
||||
"""
|
||||
err_msg = []
|
||||
if not error.messages:
|
||||
if isinstance(error, Unauthorized):
|
||||
return "Authorization required."
|
||||
return "Generic error occurred."
|
||||
|
||||
for err in error.messages:
|
||||
err_msg.append(err.default_message % err.args)
|
||||
|
||||
return " ,".join(err_msg)
|
||||
|
||||
def check_required_library(self):
|
||||
"""
|
||||
Check required libraries
|
||||
|
||||
"""
|
||||
if not HAS_REQUESTS:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'),
|
||||
exception=REQUESTS_IMP_ERR)
|
||||
if not HAS_PYVMOMI:
|
||||
self.module.fail_json(msg=missing_required_lib('PyVmomi'),
|
||||
exception=PYVMOMI_IMP_ERR)
|
||||
if not HAS_VSPHERE:
|
||||
self.module.fail_json(
|
||||
msg=missing_required_lib('vSphere Automation SDK',
|
||||
url='https://code.vmware.com/web/sdk/65/vsphere-automation-python'),
|
||||
exception=VSPHERE_IMP_ERR)
|
||||
|
||||
@staticmethod
|
||||
def vmware_client_argument_spec():
|
||||
return dict(
|
||||
hostname=dict(type='str',
|
||||
fallback=(env_fallback, ['VMWARE_HOST'])),
|
||||
username=dict(type='str',
|
||||
fallback=(env_fallback, ['VMWARE_USER']),
|
||||
aliases=['user', 'admin']),
|
||||
password=dict(type='str',
|
||||
fallback=(env_fallback, ['VMWARE_PASSWORD']),
|
||||
aliases=['pass', 'pwd'],
|
||||
no_log=True),
|
||||
port=dict(type='int',
|
||||
default=443,
|
||||
fallback=(env_fallback, ['VMWARE_PORT'])),
|
||||
protocol=dict(type='str',
|
||||
default='https',
|
||||
choices=['https', 'http']),
|
||||
validate_certs=dict(type='bool',
|
||||
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS']),
|
||||
default=True),
|
||||
)
|
||||
|
||||
def connect_to_vsphere_client(self):
|
||||
"""
|
||||
Connect to vSphere API Client with Username and Password
|
||||
|
||||
"""
|
||||
username = self.params.get('username')
|
||||
password = self.params.get('password')
|
||||
hostname = self.params.get('hostname')
|
||||
port = self.params.get('port')
|
||||
session = requests.Session()
|
||||
session.verify = self.params.get('validate_certs')
|
||||
|
||||
if not all([hostname, username, password]):
|
||||
self.module.fail_json(msg="Missing one of the following : hostname, username, password."
|
||||
" Please read the documentation for more information.")
|
||||
|
||||
client = create_vsphere_client(
|
||||
server="%s:%s" % (hostname, port),
|
||||
username=username,
|
||||
password=password,
|
||||
session=session)
|
||||
if client is None:
|
||||
self.module.fail_json(msg="Failed to login to %s" % hostname)
|
||||
|
||||
return client
|
||||
|
||||
def get_tags_for_object(self, tag_service=None, tag_assoc_svc=None, dobj=None):
|
||||
"""
|
||||
Return list of tag objects associated with an object
|
||||
Args:
|
||||
dobj: Dynamic object
|
||||
tag_service: Tag service object
|
||||
tag_assoc_svc: Tag Association object
|
||||
Returns: List of tag objects associated with the given object
|
||||
"""
|
||||
# This method returns list of tag objects only,
|
||||
# Please use get_tags_for_dynamic_obj for more object details
|
||||
tags = []
|
||||
if not dobj:
|
||||
return tags
|
||||
|
||||
if not tag_service:
|
||||
tag_service = self.api_client.tagging.Tag
|
||||
|
||||
if not tag_assoc_svc:
|
||||
tag_assoc_svc = self.api_client.tagging.TagAssociation
|
||||
|
||||
tag_ids = tag_assoc_svc.list_attached_tags(dobj)
|
||||
|
||||
for tag_id in tag_ids:
|
||||
tags.append(tag_service.get(tag_id))
|
||||
|
||||
return tags
|
||||
|
||||
def get_tags_for_dynamic_obj(self, dobj=None):
|
||||
"""
|
||||
Return list of tag object details associated with object
|
||||
Args:
|
||||
mid: Dynamic object for specified object
|
||||
|
||||
Returns: List of tag object details associated with the given object
|
||||
|
||||
"""
|
||||
tags = []
|
||||
if dobj is None:
|
||||
return tags
|
||||
|
||||
temp_tags_model = self.get_tags_for_object(dobj)
|
||||
|
||||
category_service = self.api_client.tagging.Category
|
||||
|
||||
for tag_obj in temp_tags_model:
|
||||
tags.append({
|
||||
'id': tag_obj.id,
|
||||
'category_name': category_service.get(tag_obj.category_id).name,
|
||||
'name': tag_obj.name,
|
||||
'description': tag_obj.description,
|
||||
'category_id': tag_obj.category_id,
|
||||
})
|
||||
|
||||
return tags
|
||||
|
||||
def get_tags_for_cluster(self, cluster_mid=None):
|
||||
"""
|
||||
Return list of tag object associated with cluster
|
||||
Args:
|
||||
cluster_mid: Dynamic object for cluster
|
||||
|
||||
Returns: List of tag object associated with the given cluster
|
||||
|
||||
"""
|
||||
dobj = DynamicID(type='cluster', id=cluster_mid)
|
||||
return self.get_tags_for_dynamic_obj(dobj)
|
||||
|
||||
def get_tags_for_hostsystem(self, hostsystem_mid=None):
|
||||
"""
|
||||
Return list of tag object associated with host system
|
||||
Args:
|
||||
hostsystem_mid: Dynamic object for host system
|
||||
|
||||
Returns: List of tag object associated with the given host system
|
||||
|
||||
"""
|
||||
dobj = DynamicID(type='HostSystem', id=hostsystem_mid)
|
||||
return self.get_tags_for_dynamic_obj(dobj)
|
||||
|
||||
def get_tags_for_vm(self, vm_mid=None):
|
||||
"""
|
||||
Return list of tag object associated with virtual machine
|
||||
Args:
|
||||
vm_mid: Dynamic object for virtual machine
|
||||
|
||||
Returns: List of tag object associated with the given virtual machine
|
||||
|
||||
"""
|
||||
dobj = DynamicID(type='VirtualMachine', id=vm_mid)
|
||||
return self.get_tags_for_dynamic_obj(dobj)
|
||||
|
||||
def get_vm_tags(self, tag_service=None, tag_association_svc=None, vm_mid=None):
|
||||
"""
|
||||
Return list of tag name associated with virtual machine
|
||||
Args:
|
||||
tag_service: Tag service object
|
||||
tag_association_svc: Tag association object
|
||||
vm_mid: Dynamic object for virtual machine
|
||||
|
||||
Returns: List of tag names associated with the given virtual machine
|
||||
|
||||
"""
|
||||
# This API returns just names of tags
|
||||
# Please use get_tags_for_vm for more tag object details
|
||||
tags = []
|
||||
if vm_mid is None:
|
||||
return tags
|
||||
|
||||
temp_tags_model = self.get_tags_for_object(
|
||||
tag_service=tag_service,
|
||||
tag_assoc_svc=tag_association_svc,
|
||||
dobj=vm_mid
|
||||
)
|
||||
|
||||
for tag_obj in temp_tags_model:
|
||||
tags.append(tag_obj.name)
|
||||
|
||||
return tags
|
||||
|
||||
def get_library_item_by_name(self, name):
|
||||
"""
|
||||
Returns the identifier of the library item with the given name.
|
||||
|
||||
Args:
|
||||
name (str): The name of item to look for
|
||||
|
||||
Returns:
|
||||
str: The item ID or None if the item is not found
|
||||
"""
|
||||
find_spec = Item.FindSpec(name=name)
|
||||
item_ids = self.api_client.content.library.Item.find(find_spec)
|
||||
item_id = item_ids[0] if item_ids else None
|
||||
return item_id
|
||||
|
||||
def get_datacenter_by_name(self, datacenter_name):
|
||||
"""
|
||||
Returns the identifier of a datacenter
|
||||
Note: The method assumes only one datacenter with the mentioned name.
|
||||
"""
|
||||
filter_spec = Datacenter.FilterSpec(names=set([datacenter_name]))
|
||||
datacenter_summaries = self.api_client.vcenter.Datacenter.list(filter_spec)
|
||||
datacenter = datacenter_summaries[0].datacenter if len(datacenter_summaries) > 0 else None
|
||||
return datacenter
|
||||
|
||||
def get_folder_by_name(self, datacenter_name, folder_name):
|
||||
"""
|
||||
Returns the identifier of a folder
|
||||
with the mentioned names.
|
||||
"""
|
||||
datacenter = self.get_datacenter_by_name(datacenter_name)
|
||||
if not datacenter:
|
||||
return None
|
||||
filter_spec = Folder.FilterSpec(type=Folder.Type.VIRTUAL_MACHINE,
|
||||
names=set([folder_name]),
|
||||
datacenters=set([datacenter]))
|
||||
folder_summaries = self.api_client.vcenter.Folder.list(filter_spec)
|
||||
folder = folder_summaries[0].folder if len(folder_summaries) > 0 else None
|
||||
return folder
|
||||
|
||||
def get_resource_pool_by_name(self, datacenter_name, resourcepool_name):
|
||||
"""
|
||||
Returns the identifier of a resource pool
|
||||
with the mentioned names.
|
||||
"""
|
||||
datacenter = self.get_datacenter_by_name(datacenter_name)
|
||||
if not datacenter:
|
||||
return None
|
||||
names = set([resourcepool_name]) if resourcepool_name else None
|
||||
filter_spec = ResourcePool.FilterSpec(datacenters=set([datacenter]),
|
||||
names=names)
|
||||
resource_pool_summaries = self.api_client.vcenter.ResourcePool.list(filter_spec)
|
||||
resource_pool = resource_pool_summaries[0].resource_pool if len(resource_pool_summaries) > 0 else None
|
||||
return resource_pool
|
||||
|
||||
def get_datastore_by_name(self, datacenter_name, datastore_name):
|
||||
"""
|
||||
Returns the identifier of a datastore
|
||||
with the mentioned names.
|
||||
"""
|
||||
datacenter = self.get_datacenter_by_name(datacenter_name)
|
||||
if not datacenter:
|
||||
return None
|
||||
names = set([datastore_name]) if datastore_name else None
|
||||
filter_spec = Datastore.FilterSpec(datacenters=set([datacenter]),
|
||||
names=names)
|
||||
datastore_summaries = self.api_client.vcenter.Datastore.list(filter_spec)
|
||||
datastore = datastore_summaries[0].datastore if len(datastore_summaries) > 0 else None
|
||||
return datastore
|
||||
|
||||
def get_cluster_by_name(self, datacenter_name, cluster_name):
|
||||
"""
|
||||
Returns the identifier of a cluster
|
||||
with the mentioned names.
|
||||
"""
|
||||
datacenter = self.get_datacenter_by_name(datacenter_name)
|
||||
if not datacenter:
|
||||
return None
|
||||
names = set([cluster_name]) if cluster_name else None
|
||||
filter_spec = Cluster.FilterSpec(datacenters=set([datacenter]),
|
||||
names=names)
|
||||
cluster_summaries = self.api_client.vcenter.Cluster.list(filter_spec)
|
||||
cluster = cluster_summaries[0].cluster if len(cluster_summaries) > 0 else None
|
||||
return cluster
|
||||
|
||||
def get_host_by_name(self, datacenter_name, host_name):
|
||||
"""
|
||||
Returns the identifier of a Host
|
||||
with the mentioned names.
|
||||
"""
|
||||
datacenter = self.get_datacenter_by_name(datacenter_name)
|
||||
if not datacenter:
|
||||
return None
|
||||
names = set([host_name]) if host_name else None
|
||||
filter_spec = Host.FilterSpec(datacenters=set([datacenter]),
|
||||
names=names)
|
||||
host_summaries = self.api_client.vcenter.Host.list(filter_spec)
|
||||
host = host_summaries[0].host if len(host_summaries) > 0 else None
|
||||
return host
|
||||
|
||||
@staticmethod
|
||||
def search_svc_object_by_name(service, svc_obj_name=None):
|
||||
"""
|
||||
Return service object by name
|
||||
Args:
|
||||
service: Service object
|
||||
svc_obj_name: Name of service object to find
|
||||
|
||||
Returns: Service object if found else None
|
||||
|
||||
"""
|
||||
if not svc_obj_name:
|
||||
return None
|
||||
|
||||
for svc_object in service.list():
|
||||
svc_obj = service.get(svc_object)
|
||||
if svc_obj.name == svc_obj_name:
|
||||
return svc_obj
|
||||
return None
|
||||
|
||||
def get_tag_by_name(self, tag_name=None):
|
||||
"""
|
||||
Return tag object by name
|
||||
Args:
|
||||
tag_name: Name of tag
|
||||
|
||||
Returns: Tag object if found else None
|
||||
"""
|
||||
if not tag_name:
|
||||
return None
|
||||
|
||||
return self.search_svc_object_by_name(service=self.api_client.tagging.Tag, svc_obj_name=tag_name)
|
||||
|
||||
def get_category_by_name(self, category_name=None):
|
||||
"""
|
||||
Return category object by name
|
||||
Args:
|
||||
category_name: Name of category
|
||||
|
||||
Returns: Category object if found else None
|
||||
"""
|
||||
if not category_name:
|
||||
return None
|
||||
|
||||
return self.search_svc_object_by_name(service=self.api_client.tagging.Category, svc_obj_name=category_name)
|
||||
|
||||
def get_tag_by_category(self, tag_name=None, category_name=None):
|
||||
"""
|
||||
Return tag object by name and category name specified
|
||||
Args:
|
||||
tag_name: Name of tag
|
||||
category_name: Name of category
|
||||
|
||||
Returns: Tag object if found else None
|
||||
"""
|
||||
|
||||
if not tag_name:
|
||||
return None
|
||||
|
||||
if category_name:
|
||||
category_obj = self.get_category_by_name(category_name=category_name)
|
||||
|
||||
if not category_obj:
|
||||
return None
|
||||
|
||||
for tag_object in self.api_client.tagging.Tag.list():
|
||||
tag_obj = self.api_client.tagging.Tag.get(tag_object)
|
||||
|
||||
if tag_obj.name == tag_name and tag_obj.category_id == category_obj.id:
|
||||
return tag_obj
|
||||
else:
|
||||
return self.search_svc_object_by_name(service=self.api_client.tagging.Tag, svc_obj_name=tag_name)
|
@ -1,43 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, pbm
|
||||
from pyVim.connect import SoapStubAdapter
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.vmware import PyVmomi
|
||||
|
||||
|
||||
class SPBM(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(SPBM, self).__init__(module)
|
||||
self.spbm_content = None
|
||||
self.spbm_si = None
|
||||
self.version = "pbm.version.version2"
|
||||
|
||||
def get_spbm_connection(self):
|
||||
"""
|
||||
Creates a Service instance for VMware SPBM
|
||||
"""
|
||||
client_stub = self.si._GetStub()
|
||||
try:
|
||||
session_cookie = client_stub.cookie.split('"')[1]
|
||||
except IndexError:
|
||||
self.module.fail_json(msg="Failed to get session cookie")
|
||||
ssl_context = client_stub.schemeArgs.get('context')
|
||||
additional_headers = {'vcSessionCookie': session_cookie}
|
||||
hostname = self.module.params['hostname']
|
||||
if not hostname:
|
||||
self.module.fail_json(msg="Please specify required parameter - hostname")
|
||||
stub = SoapStubAdapter(host=hostname, path="/pbm/sdk", version=self.version,
|
||||
sslContext=ssl_context, requestContext=additional_headers)
|
||||
|
||||
self.spbm_si = pbm.ServiceInstance("ServiceInstance", stub)
|
||||
self.spbm_content = self.spbm_si.PbmRetrieveServiceContent()
|
@ -1,119 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vcenter_extension_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vcenter_extension_info) instead.
|
||||
short_description: Gather facts vCenter extensions
|
||||
description:
|
||||
- This module can be used to gather facts about vCenter extension.
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about vCenter Extensions
|
||||
vcenter_extension_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
register: ext_facts
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
extension_facts:
|
||||
description: List of extensions
|
||||
returned: success
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"extension_company": "VMware, Inc.",
|
||||
"extension_key": "com.vmware.vim.ls",
|
||||
"extension_label": "License Services",
|
||||
"extension_last_heartbeat_time": "2018-09-03T09:36:18.003768+00:00",
|
||||
"extension_subject_name": "",
|
||||
"extension_summary": "Provides various license services",
|
||||
"extension_type": "",
|
||||
"extension_version": "5.0"
|
||||
},
|
||||
{
|
||||
"extension_company": "VMware Inc.",
|
||||
"extension_key": "com.vmware.vim.sms",
|
||||
"extension_label": "VMware vCenter Storage Monitoring Service",
|
||||
"extension_last_heartbeat_time": "2018-09-03T09:36:18.005730+00:00",
|
||||
"extension_subject_name": "",
|
||||
"extension_summary": "Storage Monitoring and Reporting",
|
||||
"extension_type": "",
|
||||
"extension_version": "5.5"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareExtManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareExtManager, self).__init__(module)
|
||||
|
||||
def gather_plugin_facts(self):
|
||||
result = dict(changed=False, extension_facts=[])
|
||||
ext_manager = self.content.extensionManager
|
||||
if not ext_manager:
|
||||
self.module.exit_json(**result)
|
||||
|
||||
for ext in ext_manager.extensionList:
|
||||
ext_info = dict(
|
||||
extension_label=ext.description.label,
|
||||
extension_summary=ext.description.summary,
|
||||
extension_key=ext.key,
|
||||
extension_company=ext.company,
|
||||
extension_version=ext.version,
|
||||
extension_type=ext.type if ext.type else '',
|
||||
extension_subject_name=ext.subjectName if ext.subjectName else '',
|
||||
extension_last_heartbeat_time=ext.lastHeartbeatTime,
|
||||
)
|
||||
result['extension_facts'].append(ext_info)
|
||||
|
||||
self.module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vcenter_extension_facts_mgr = VmwareExtManager(module)
|
||||
vcenter_extension_facts_mgr.gather_plugin_facts()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,130 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_about_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_about_info) instead.
|
||||
short_description: Provides information about VMware server to which user is connecting to
|
||||
description:
|
||||
- This module can be used to gather information about VMware server to which user is trying to connect.
|
||||
version_added: 2.7
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Provide information about vCenter
|
||||
vmware_about_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
delegate_to: localhost
|
||||
register: vcenter_about_info
|
||||
|
||||
- name: Provide information about a standalone ESXi server
|
||||
vmware_about_facts:
|
||||
hostname: '{{ esxi_hostname }}'
|
||||
username: '{{ esxi_username }}'
|
||||
password: '{{ esxi_password }}'
|
||||
delegate_to: localhost
|
||||
register: esxi_about_info
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
about_facts:
|
||||
description:
|
||||
- dict about VMware server
|
||||
returned: success
|
||||
type: str
|
||||
sample:
|
||||
{
|
||||
"api_type": "VirtualCenter",
|
||||
"api_version": "6.5",
|
||||
"build": "5973321",
|
||||
"instance_uuid": "dbed6e0c-bd88-4ef6-b594-21283e1c677f",
|
||||
"license_product_name": "VMware VirtualCenter Server",
|
||||
"license_product_version": "6.0",
|
||||
"locale_build": "000",
|
||||
"locale_version": "INTL",
|
||||
"os_type": "darwin-amd64",
|
||||
"product_full_name": "VMware vCenter Server 6.5.0 build-5973321",
|
||||
"product_line_id": "vpx",
|
||||
"product_name": "VMware vCenter Server (govmomi simulator)",
|
||||
"vendor": "VMware, Inc.",
|
||||
"version": "6.5.0"
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareAboutManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareAboutManager, self).__init__(module)
|
||||
|
||||
def gather_about_facts(self):
|
||||
|
||||
if not self.content:
|
||||
self.module.exit_json(changed=False, about_facts=dict())
|
||||
|
||||
about = self.content.about
|
||||
|
||||
self.module.exit_json(
|
||||
changed=False,
|
||||
about_facts=dict(
|
||||
product_name=about.name,
|
||||
product_full_name=about.fullName,
|
||||
vendor=about.vendor,
|
||||
version=about.version,
|
||||
build=about.build,
|
||||
locale_version=about.localeVersion,
|
||||
locale_build=about.localeBuild,
|
||||
os_type=about.osType,
|
||||
product_line_id=about.productLineId,
|
||||
api_type=about.apiType,
|
||||
api_version=about.apiVersion,
|
||||
instance_uuid=about.instanceUuid,
|
||||
license_product_name=about.licenseProductName,
|
||||
license_product_version=about.licenseProductVersion,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_about_facts_mgr = VmwareAboutManager(module)
|
||||
vmware_about_facts_mgr.gather_about_facts()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,132 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_category_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_category_info) instead.
|
||||
short_description: Gather facts about VMware tag categories
|
||||
description:
|
||||
- This module can be used to gather facts about VMware tag categories.
|
||||
- Tag feature is introduced in vSphere 6 version, so this module is not supported in earlier versions of vSphere.
|
||||
- All variables and VMware object names are case sensitive.
|
||||
version_added: '2.7'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
- vSphere Automation SDK
|
||||
extends_documentation_fragment: vmware_rest_client.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about tag categories
|
||||
vmware_category_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
delegate_to: localhost
|
||||
register: all_tag_category_facts
|
||||
|
||||
- name: Gather category id from given tag category
|
||||
vmware_category_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
delegate_to: localhost
|
||||
register: tag_category_results
|
||||
|
||||
- set_fact:
|
||||
category_id: "{{ item.category_id }}"
|
||||
loop: "{{ tag_category_results.tag_category_facts|json_query(query) }}"
|
||||
vars:
|
||||
query: "[?category_name==`Category0001`]"
|
||||
- debug: var=category_id
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
tag_category_facts:
|
||||
description: metadata of tag categories
|
||||
returned: always
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"category_associable_types": [],
|
||||
"category_cardinality": "MULTIPLE",
|
||||
"category_description": "awesome description",
|
||||
"category_id": "urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL",
|
||||
"category_name": "Category0001",
|
||||
"category_used_by": []
|
||||
},
|
||||
{
|
||||
"category_associable_types": [
|
||||
"VirtualMachine"
|
||||
],
|
||||
"category_cardinality": "SINGLE",
|
||||
"category_description": "another awesome description",
|
||||
"category_id": "urn:vmomi:InventoryServiceCategory:ae5b7c6c-e622-4671-9b96-76e93adb70f2:GLOBAL",
|
||||
"category_name": "template_tag",
|
||||
"category_used_by": []
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware_rest_client import VmwareRestClient
|
||||
|
||||
|
||||
class VmwareCategoryFactsManager(VmwareRestClient):
|
||||
def __init__(self, module):
|
||||
super(VmwareCategoryFactsManager, self).__init__(module)
|
||||
self.category_service = self.api_client.tagging.Category
|
||||
|
||||
def get_all_tag_categories(self):
|
||||
"""Retrieve all tag category information."""
|
||||
global_tag_categories = []
|
||||
for category in self.category_service.list():
|
||||
category_obj = self.category_service.get(category)
|
||||
global_tag_categories.append(
|
||||
dict(
|
||||
category_description=category_obj.description,
|
||||
category_used_by=category_obj.used_by,
|
||||
category_cardinality=str(category_obj.cardinality),
|
||||
category_associable_types=category_obj.associable_types,
|
||||
category_id=category_obj.id,
|
||||
category_name=category_obj.name,
|
||||
)
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=False, tag_category_facts=global_tag_categories)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = VmwareRestClient.vmware_client_argument_spec()
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
vmware_category_facts = VmwareCategoryFactsManager(module)
|
||||
vmware_category_facts.get_all_tag_categories()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1 +0,0 @@
|
||||
vmware_cluster_info.py
|
@ -1 +0,0 @@
|
||||
vmware_datastore_info.py
|
@ -1,130 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_dns_config
|
||||
short_description: Manage VMware ESXi DNS Configuration
|
||||
description:
|
||||
- Manage VMware ESXi DNS Configuration
|
||||
version_added: 2.0
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
notes:
|
||||
- Tested on vSphere 5.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
deprecated:
|
||||
removed_in: '2.14'
|
||||
why: Will be replaced with new module vmware_host_dns.
|
||||
alternative: Use M(vmware_host_dns) instead.
|
||||
options:
|
||||
change_hostname_to:
|
||||
description:
|
||||
- The hostname that an ESXi host should be changed to.
|
||||
required: True
|
||||
type: str
|
||||
domainname:
|
||||
description:
|
||||
- The domain the ESXi host should be apart of.
|
||||
required: True
|
||||
type: str
|
||||
dns_servers:
|
||||
description:
|
||||
- The DNS servers that the host should be configured to use.
|
||||
required: True
|
||||
type: list
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Configure ESXi hostname and DNS servers
|
||||
vmware_dns_config:
|
||||
hostname: '{{ esxi_hostname }}'
|
||||
username: '{{ esxi_username }}'
|
||||
password: '{{ esxi_password }}'
|
||||
change_hostname_to: esx01
|
||||
domainname: foo.org
|
||||
dns_servers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
delegate_to: localhost
|
||||
'''
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
HAS_PYVMOMI = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec
|
||||
|
||||
|
||||
def configure_dns(host_system, hostname, domainname, dns_servers):
|
||||
|
||||
changed = False
|
||||
host_config_manager = host_system.configManager
|
||||
host_network_system = host_config_manager.networkSystem
|
||||
config = host_network_system.dnsConfig
|
||||
|
||||
config.dhcp = False
|
||||
|
||||
if config.address != dns_servers:
|
||||
config.address = dns_servers
|
||||
changed = True
|
||||
if config.domainName != domainname:
|
||||
config.domainName = domainname
|
||||
changed = True
|
||||
if config.hostName != hostname:
|
||||
config.hostName = hostname
|
||||
changed = True
|
||||
if changed:
|
||||
host_network_system.UpdateDnsConfig(config)
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'),
|
||||
domainname=dict(required=True, type='str'),
|
||||
dns_servers=dict(required=True, type='list')))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
|
||||
|
||||
if not HAS_PYVMOMI:
|
||||
module.fail_json(msg='pyvmomi is required for this module')
|
||||
|
||||
change_hostname_to = module.params['change_hostname_to']
|
||||
domainname = module.params['domainname']
|
||||
dns_servers = module.params['dns_servers']
|
||||
try:
|
||||
content = connect_to_api(module)
|
||||
host = get_all_objs(content, [vim.HostSystem])
|
||||
if not host:
|
||||
module.fail_json(msg="Unable to locate Physical Host.")
|
||||
host_system = list(host)[0]
|
||||
changed = configure_dns(host_system, change_hostname_to, domainname, dns_servers)
|
||||
module.exit_json(changed=changed)
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
module.fail_json(msg=runtime_fault.msg)
|
||||
except vmodl.MethodFault as method_fault:
|
||||
module.fail_json(msg=method_fault.msg)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,285 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Karsten Kaj Jakobsen <kj@patientsky.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_drs_group_info) instead.
|
||||
author:
|
||||
- "Karsten Kaj Jakobsen (@karstenjakobsen)"
|
||||
description:
|
||||
- "This module can be used to gather facts about DRS VM/HOST groups from the given cluster."
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
module: vmware_drs_group_facts
|
||||
notes:
|
||||
- "Tested on vSphere 6.5 and 6.7"
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- "Cluster to search for VM/Host groups."
|
||||
- "If set, facts of DRS groups belonging this cluster will be returned."
|
||||
- "Not needed if C(datacenter) is set."
|
||||
required: false
|
||||
type: str
|
||||
datacenter:
|
||||
aliases:
|
||||
- datacenter_name
|
||||
description:
|
||||
- "Datacenter to search for DRS VM/Host groups."
|
||||
required: true
|
||||
type: str
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
short_description: "Gathers facts about DRS VM/Host groups on the given cluster"
|
||||
version_added: "2.8"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
---
|
||||
- name: "Gather DRS facts about given Cluster"
|
||||
register: cluster_drs_group_facts
|
||||
vmware_drs_group_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
cluster_name: "{{ cluster_name }}"
|
||||
datacenter: "{{ datacenter }}"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: "Gather DRS group facts about all clusters in given datacenter"
|
||||
register: cluster_drs_group_facts
|
||||
vmware_drs_group_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
datacenter: "{{ datacenter }}"
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
drs_group_facts:
|
||||
description: Metadata about DRS group from given cluster / datacenter
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
"drs_group_facts": {
|
||||
"DC0_C0": [
|
||||
{
|
||||
"group_name": "GROUP_HOST_S01",
|
||||
"hosts": [
|
||||
"vm-01.zone",
|
||||
"vm-02.zone"
|
||||
],
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"group_name": "GROUP_HOST_S02",
|
||||
"hosts": [
|
||||
"vm-03.zone",
|
||||
"vm-04.zone"
|
||||
],
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"group_name": "GROUP_VM_S01",
|
||||
"type": "vm",
|
||||
"vms": [
|
||||
"test-node01"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "GROUP_VM_S02",
|
||||
"type": "vm",
|
||||
"vms": [
|
||||
"test-node02"
|
||||
]
|
||||
}
|
||||
],
|
||||
"DC0_C1": []
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs
|
||||
|
||||
|
||||
class VmwareDrsGroupFactManager(PyVmomi):
|
||||
|
||||
def __init__(self, module, datacenter_name, cluster_name=None):
|
||||
"""
|
||||
Doctring: Init
|
||||
"""
|
||||
|
||||
super(VmwareDrsGroupFactManager, self).__init__(module)
|
||||
|
||||
self.__datacenter_name = datacenter_name
|
||||
self.__datacenter_obj = None
|
||||
self.__cluster_name = cluster_name
|
||||
self.__cluster_obj = None
|
||||
self.__msg = 'Nothing to see here...'
|
||||
self.__result = dict()
|
||||
self.__changed = False
|
||||
|
||||
if datacenter_name:
|
||||
|
||||
datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
|
||||
self.cluster_obj_list = []
|
||||
|
||||
if datacenter_obj:
|
||||
folder = datacenter_obj.hostFolder
|
||||
self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder)
|
||||
else:
|
||||
raise Exception("Datacenter '%s' not found" % self.__datacenter_name)
|
||||
|
||||
if cluster_name:
|
||||
|
||||
cluster_obj = self.find_cluster_by_name(cluster_name=self.__cluster_name)
|
||||
|
||||
if cluster_obj is None:
|
||||
raise Exception("Cluster '%s' not found" % self.__cluster_name)
|
||||
else:
|
||||
self.cluster_obj_list = [cluster_obj]
|
||||
|
||||
def get_result(self):
|
||||
"""
|
||||
Docstring
|
||||
"""
|
||||
return self.__result
|
||||
|
||||
def __set_result(self, result):
|
||||
"""
|
||||
Sets result
|
||||
Args:
|
||||
result: drs group result list
|
||||
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
self.__result = result
|
||||
|
||||
def __get_all_from_group(self, group_obj, host_group=False):
|
||||
"""
|
||||
Return all VM / Host names using given group
|
||||
Args:
|
||||
group_obj: Group object
|
||||
host_group: True if we want only host name from group
|
||||
|
||||
Returns: List of VM / Host names belonging to given group object
|
||||
|
||||
"""
|
||||
obj_name_list = []
|
||||
|
||||
if not all([group_obj]):
|
||||
return obj_name_list
|
||||
|
||||
if not host_group and isinstance(group_obj, vim.cluster.VmGroup):
|
||||
obj_name_list = [vm.name for vm in group_obj.vm]
|
||||
elif host_group and isinstance(group_obj, vim.cluster.HostGroup):
|
||||
obj_name_list = [host.name for host in group_obj.host]
|
||||
|
||||
return obj_name_list
|
||||
|
||||
def __normalize_group_data(self, group_obj):
|
||||
"""
|
||||
Return human readable group spec
|
||||
Args:
|
||||
group_obj: Group object
|
||||
|
||||
Returns: Dictionary with DRS groups
|
||||
|
||||
"""
|
||||
if not all([group_obj]):
|
||||
return {}
|
||||
|
||||
# Check if group is a host group
|
||||
if hasattr(group_obj, 'host'):
|
||||
return dict(
|
||||
group_name=group_obj.name,
|
||||
hosts=self.__get_all_from_group(group_obj=group_obj, host_group=True),
|
||||
type="host"
|
||||
)
|
||||
else:
|
||||
return dict(
|
||||
group_name=group_obj.name,
|
||||
vms=self.__get_all_from_group(group_obj=group_obj),
|
||||
type="vm"
|
||||
)
|
||||
|
||||
def gather_facts(self):
|
||||
"""
|
||||
Gather DRS group facts about given cluster
|
||||
Returns: Dictionary of clusters with DRS groups
|
||||
|
||||
"""
|
||||
cluster_group_facts = dict()
|
||||
|
||||
for cluster_obj in self.cluster_obj_list:
|
||||
|
||||
cluster_group_facts[cluster_obj.name] = []
|
||||
|
||||
for drs_group in cluster_obj.configurationEx.group:
|
||||
cluster_group_facts[cluster_obj.name].append(self.__normalize_group_data(drs_group))
|
||||
|
||||
self.__set_result(cluster_group_facts)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = vmware_argument_spec()
|
||||
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str', required=False, aliases=['datacenter_name']),
|
||||
cluster_name=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=[['cluster_name', 'datacenter']],
|
||||
mutually_exclusive=[['cluster_name', 'datacenter']],
|
||||
)
|
||||
|
||||
try:
|
||||
# Create instance of VmwareDrsGroupManager
|
||||
vmware_drs_group_facts = VmwareDrsGroupFactManager(module=module,
|
||||
datacenter_name=module.params.get('datacenter'),
|
||||
cluster_name=module.params.get('cluster_name', None))
|
||||
|
||||
vmware_drs_group_facts.gather_facts()
|
||||
|
||||
# Set results
|
||||
results = dict(failed=False,
|
||||
drs_group_facts=vmware_drs_group_facts.get_result())
|
||||
|
||||
except Exception as error:
|
||||
results = dict(failed=True, msg="Error: %s" % error)
|
||||
|
||||
if results['failed']:
|
||||
module.fail_json(**results)
|
||||
else:
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,265 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_drs_rule_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_drs_rule_info) instead.
|
||||
short_description: Gathers facts about DRS rule on the given cluster
|
||||
description:
|
||||
- 'This module can be used to gather facts about DRS VM-VM and VM-HOST rules from the given cluster.'
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- DRS facts for the given cluster will be returned.
|
||||
- This is required parameter if C(datacenter) parameter is not provided.
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- Name of the datacenter.
|
||||
- DRS facts for all the clusters from the given datacenter will be returned.
|
||||
- This is required parameter if C(cluster_name) parameter is not provided.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather DRS facts about given Cluster
|
||||
vmware_drs_rule_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
delegate_to: localhost
|
||||
register: cluster_drs_facts
|
||||
|
||||
- name: Gather DRS facts about all Clusters in given datacenter
|
||||
vmware_drs_rule_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: '{{ datacenter_name }}'
|
||||
delegate_to: localhost
|
||||
register: datacenter_drs_facts
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
drs_rule_facts:
|
||||
description: metadata about DRS rule from given cluster / datacenter
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"DC0_C0": [
|
||||
{
|
||||
"rule_affinity": false,
|
||||
"rule_enabled": true,
|
||||
"rule_key": 1,
|
||||
"rule_mandatory": true,
|
||||
"rule_name": "drs_rule_0001",
|
||||
"rule_type": "vm_vm_rule",
|
||||
"rule_uuid": "52be5061-665a-68dc-3d25-85cd2d37e114",
|
||||
"rule_vms": [
|
||||
"VM_65",
|
||||
"VM_146"
|
||||
]
|
||||
},
|
||||
],
|
||||
"DC1_C1": [
|
||||
{
|
||||
"rule_affine_host_group_name": "host_group_1",
|
||||
"rule_affine_hosts": [
|
||||
"10.76.33.204"
|
||||
],
|
||||
"rule_anti_affine_host_group_name": null,
|
||||
"rule_anti_affine_hosts": [],
|
||||
"rule_enabled": true,
|
||||
"rule_key": 1,
|
||||
"rule_mandatory": false,
|
||||
"rule_name": "vm_host_rule_0001",
|
||||
"rule_type": "vm_host_rule",
|
||||
"rule_uuid": "52687108-4d3a-76f2-d29c-b708c40dbe40",
|
||||
"rule_vm_group_name": "test_vm_group_1",
|
||||
"rule_vms": [
|
||||
"VM_8916",
|
||||
"VM_4010"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs
|
||||
|
||||
|
||||
class VmwareDrsFactManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareDrsFactManager, self).__init__(module)
|
||||
|
||||
datacenter_name = self.params.get('datacenter', None)
|
||||
if datacenter_name:
|
||||
datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
|
||||
self.cluster_obj_list = []
|
||||
if datacenter_obj:
|
||||
folder = datacenter_obj.hostFolder
|
||||
self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder)
|
||||
else:
|
||||
self.module.fail_json(changed=False, msg="Datacenter '%s' not found" % datacenter_name)
|
||||
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
if cluster_name:
|
||||
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
|
||||
if cluster_obj is None:
|
||||
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
|
||||
else:
|
||||
self.cluster_obj_list = [cluster_obj]
|
||||
|
||||
def get_all_from_group(self, group_name=None, cluster_obj=None, hostgroup=False):
|
||||
"""
|
||||
Return all VM / Host names using given group name
|
||||
Args:
|
||||
group_name: Rule name
|
||||
cluster_obj: Cluster managed object
|
||||
hostgroup: True if we want only host name from group
|
||||
|
||||
Returns: List of VM / Host names belonging to given group object
|
||||
|
||||
"""
|
||||
obj_name_list = []
|
||||
if not all([group_name, cluster_obj]):
|
||||
return obj_name_list
|
||||
|
||||
for group in cluster_obj.configurationEx.group:
|
||||
if group.name == group_name:
|
||||
if not hostgroup and isinstance(group, vim.cluster.VmGroup):
|
||||
obj_name_list = [vm.name for vm in group.vm]
|
||||
break
|
||||
elif hostgroup and isinstance(group, vim.cluster.HostGroup):
|
||||
obj_name_list = [host.name for host in group.host]
|
||||
break
|
||||
|
||||
return obj_name_list
|
||||
|
||||
@staticmethod
|
||||
def normalize_vm_vm_rule_spec(rule_obj=None):
|
||||
"""
|
||||
Return human readable rule spec
|
||||
Args:
|
||||
rule_obj: Rule managed object
|
||||
|
||||
Returns: Dictionary with DRS VM VM Rule info
|
||||
|
||||
"""
|
||||
if rule_obj is None:
|
||||
return {}
|
||||
return dict(rule_key=rule_obj.key,
|
||||
rule_enabled=rule_obj.enabled,
|
||||
rule_name=rule_obj.name,
|
||||
rule_mandatory=rule_obj.mandatory,
|
||||
rule_uuid=rule_obj.ruleUuid,
|
||||
rule_vms=[vm.name for vm in rule_obj.vm],
|
||||
rule_type="vm_vm_rule",
|
||||
rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False,
|
||||
)
|
||||
|
||||
def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None):
|
||||
"""
|
||||
Return human readable rule spec
|
||||
Args:
|
||||
rule_obj: Rule managed object
|
||||
cluster_obj: Cluster managed object
|
||||
|
||||
Returns: Dictionary with DRS VM HOST Rule info
|
||||
|
||||
"""
|
||||
if not all([rule_obj, cluster_obj]):
|
||||
return {}
|
||||
return dict(rule_key=rule_obj.key,
|
||||
rule_enabled=rule_obj.enabled,
|
||||
rule_name=rule_obj.name,
|
||||
rule_mandatory=rule_obj.mandatory,
|
||||
rule_uuid=rule_obj.ruleUuid,
|
||||
rule_vm_group_name=rule_obj.vmGroupName,
|
||||
rule_affine_host_group_name=rule_obj.affineHostGroupName,
|
||||
rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName,
|
||||
rule_vms=self.get_all_from_group(group_name=rule_obj.vmGroupName,
|
||||
cluster_obj=cluster_obj),
|
||||
rule_affine_hosts=self.get_all_from_group(group_name=rule_obj.affineHostGroupName,
|
||||
cluster_obj=cluster_obj,
|
||||
hostgroup=True),
|
||||
rule_anti_affine_hosts=self.get_all_from_group(group_name=rule_obj.antiAffineHostGroupName,
|
||||
cluster_obj=cluster_obj,
|
||||
hostgroup=True),
|
||||
rule_type="vm_host_rule",
|
||||
)
|
||||
|
||||
def gather_drs_rule_facts(self):
|
||||
"""
|
||||
Gather DRS rule facts about given cluster
|
||||
Returns: Dictionary of clusters with DRS facts
|
||||
|
||||
"""
|
||||
cluster_rule_facts = dict()
|
||||
for cluster_obj in self.cluster_obj_list:
|
||||
cluster_rule_facts[cluster_obj.name] = []
|
||||
for drs_rule in cluster_obj.configuration.rule:
|
||||
if isinstance(drs_rule, vim.cluster.VmHostRuleInfo):
|
||||
cluster_rule_facts[cluster_obj.name].append(self.normalize_vm_host_rule_spec(rule_obj=drs_rule,
|
||||
cluster_obj=cluster_obj))
|
||||
else:
|
||||
cluster_rule_facts[cluster_obj.name].append(self.normalize_vm_vm_rule_spec(rule_obj=drs_rule))
|
||||
|
||||
return cluster_rule_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str', required=False),
|
||||
cluster_name=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'datacenter'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_drs_facts = VmwareDrsFactManager(module)
|
||||
module.exit_json(changed=False, drs_rule_facts=vmware_drs_facts.gather_drs_rule_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,278 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_dvs_portgroup_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_dvs_portgroup_info) instead.
|
||||
short_description: Gathers facts DVS portgroup configurations
|
||||
description:
|
||||
- This module can be used to gather facts about DVS portgroup configurations.
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter:
|
||||
description:
|
||||
- Name of the datacenter.
|
||||
required: true
|
||||
type: str
|
||||
dvswitch:
|
||||
description:
|
||||
- Name of a dvswitch to look for.
|
||||
required: false
|
||||
type: str
|
||||
version_added: "2.9"
|
||||
show_network_policy:
|
||||
description:
|
||||
- Show or hide network policies of DVS portgroup.
|
||||
type: bool
|
||||
default: True
|
||||
show_port_policy:
|
||||
description:
|
||||
- Show or hide port policies of DVS portgroup.
|
||||
type: bool
|
||||
default: True
|
||||
show_teaming_policy:
|
||||
description:
|
||||
- Show or hide teaming policies of DVS portgroup.
|
||||
type: bool
|
||||
default: True
|
||||
show_vlan_info:
|
||||
description:
|
||||
- Show or hide vlan information of the DVS portgroup.
|
||||
type: bool
|
||||
default: False
|
||||
version_added: "2.9"
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get facts about DVPG
|
||||
vmware_dvs_portgroup_facts:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
register: dvpg_facts
|
||||
- name: Get number of ports for portgroup 'dvpg_001' in 'dvs_001'
|
||||
debug:
|
||||
msg: "{{ item.num_ports }}"
|
||||
with_items:
|
||||
- "{{ dvpg_facts.dvs_portgroup_facts['dvs_001'] | json_query(query) }}"
|
||||
vars:
|
||||
query: "[?portgroup_name=='dvpg_001']"
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
dvs_portgroup_facts:
|
||||
description: metadata about DVS portgroup configuration
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"dvs_0":[
|
||||
{
|
||||
"description": null,
|
||||
"dvswitch_name": "dvs_001",
|
||||
"network_policy": {
|
||||
"forged_transmits": false,
|
||||
"mac_changes": false,
|
||||
"promiscuous": false
|
||||
},
|
||||
"num_ports": 8,
|
||||
"port_policy": {
|
||||
"block_override": true,
|
||||
"ipfix_override": false,
|
||||
"live_port_move": false,
|
||||
"network_rp_override": false,
|
||||
"port_config_reset_at_disconnect": true,
|
||||
"security_override": false,
|
||||
"shaping_override": false,
|
||||
"traffic_filter_override": false,
|
||||
"uplink_teaming_override": false,
|
||||
"vendor_config_override": false,
|
||||
"vlan_override": false
|
||||
},
|
||||
"portgroup_name": "dvpg_001",
|
||||
"teaming_policy": {
|
||||
"inbound_policy": true,
|
||||
"notify_switches": true,
|
||||
"policy": "loadbalance_srcid",
|
||||
"rolling_order": false
|
||||
},
|
||||
"vlan_info": {
|
||||
"trunk": false,
|
||||
"pvlan": false,
|
||||
"vlan_id": 0
|
||||
},
|
||||
"type": "earlyBinding"
|
||||
},
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError as e:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs, find_dvs_by_name
|
||||
|
||||
|
||||
class DVSPortgroupFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(DVSPortgroupFactsManager, self).__init__(module)
|
||||
self.dc_name = self.params['datacenter']
|
||||
self.dvs_name = self.params['dvswitch']
|
||||
|
||||
datacenter = self.find_datacenter_by_name(self.dc_name)
|
||||
if datacenter is None:
|
||||
self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name)
|
||||
if self.dvs_name:
|
||||
# User specified specific dvswitch name to gather information
|
||||
dvsn = find_dvs_by_name(self.content, self.dvs_name)
|
||||
if dvsn is None:
|
||||
self.module.fail_json(msg="Failed to find the dvswitch %s" % self.dvs_name)
|
||||
|
||||
self.dvsls = [dvsn]
|
||||
else:
|
||||
# default behaviour, gather information about all dvswitches
|
||||
self.dvsls = get_all_objs(self.content, [vim.DistributedVirtualSwitch], folder=datacenter.networkFolder)
|
||||
|
||||
def get_vlan_info(self, vlan_obj=None):
|
||||
"""
|
||||
Return vlan information from given object
|
||||
Args:
|
||||
vlan_obj: vlan managed object
|
||||
Returns: Dict of vlan details of the specific object
|
||||
"""
|
||||
|
||||
vdret = dict()
|
||||
if not vlan_obj:
|
||||
return vdret
|
||||
|
||||
if isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec):
|
||||
vlan_id_list = []
|
||||
for vli in vlan_obj.vlanId:
|
||||
if vli.start == vli.end:
|
||||
vlan_id_list.append(str(vli.start))
|
||||
else:
|
||||
vlan_id_list.append(str(vli.start) + "-" + str(vli.end))
|
||||
vdret = dict(trunk=True, pvlan=False, vlan_id=vlan_id_list)
|
||||
elif isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec):
|
||||
vdret = dict(trunk=False, pvlan=True, vlan_id=str(vlan_obj.pvlanId))
|
||||
else:
|
||||
vdret = dict(trunk=False, pvlan=False, vlan_id=str(vlan_obj.vlanId))
|
||||
|
||||
return vdret
|
||||
|
||||
def gather_dvs_portgroup_facts(self):
|
||||
dvs_lists = self.dvsls
|
||||
result = dict()
|
||||
for dvs in dvs_lists:
|
||||
result[dvs.name] = list()
|
||||
for dvs_pg in dvs.portgroup:
|
||||
network_policy = dict()
|
||||
teaming_policy = dict()
|
||||
port_policy = dict()
|
||||
vlan_info = dict()
|
||||
|
||||
if self.module.params['show_network_policy'] and dvs_pg.config.defaultPortConfig.securityPolicy:
|
||||
network_policy = dict(
|
||||
forged_transmits=dvs_pg.config.defaultPortConfig.securityPolicy.forgedTransmits.value,
|
||||
promiscuous=dvs_pg.config.defaultPortConfig.securityPolicy.allowPromiscuous.value,
|
||||
mac_changes=dvs_pg.config.defaultPortConfig.securityPolicy.macChanges.value
|
||||
)
|
||||
if self.module.params['show_teaming_policy']:
|
||||
# govcsim does not have uplinkTeamingPolicy, remove this check once
|
||||
# PR https://github.com/vmware/govmomi/pull/1524 merged.
|
||||
if dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy:
|
||||
teaming_policy = dict(
|
||||
policy=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.policy.value,
|
||||
inbound_policy=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.reversePolicy.value,
|
||||
notify_switches=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.notifySwitches.value,
|
||||
rolling_order=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.rollingOrder.value,
|
||||
)
|
||||
|
||||
if self.params['show_port_policy']:
|
||||
# govcsim does not have port policy
|
||||
if dvs_pg.config.policy:
|
||||
port_policy = dict(
|
||||
block_override=dvs_pg.config.policy.blockOverrideAllowed,
|
||||
ipfix_override=dvs_pg.config.policy.ipfixOverrideAllowed,
|
||||
live_port_move=dvs_pg.config.policy.livePortMovingAllowed,
|
||||
network_rp_override=dvs_pg.config.policy.networkResourcePoolOverrideAllowed,
|
||||
port_config_reset_at_disconnect=dvs_pg.config.policy.portConfigResetAtDisconnect,
|
||||
security_override=dvs_pg.config.policy.securityPolicyOverrideAllowed,
|
||||
shaping_override=dvs_pg.config.policy.shapingOverrideAllowed,
|
||||
traffic_filter_override=dvs_pg.config.policy.trafficFilterOverrideAllowed,
|
||||
uplink_teaming_override=dvs_pg.config.policy.uplinkTeamingOverrideAllowed,
|
||||
vendor_config_override=dvs_pg.config.policy.vendorConfigOverrideAllowed,
|
||||
vlan_override=dvs_pg.config.policy.vlanOverrideAllowed
|
||||
)
|
||||
|
||||
if self.params['show_vlan_info']:
|
||||
vlan_info = self.get_vlan_info(dvs_pg.config.defaultPortConfig.vlan)
|
||||
|
||||
dvpg_details = dict(
|
||||
portgroup_name=dvs_pg.name,
|
||||
num_ports=dvs_pg.config.numPorts,
|
||||
dvswitch_name=dvs_pg.config.distributedVirtualSwitch.name,
|
||||
description=dvs_pg.config.description,
|
||||
type=dvs_pg.config.type,
|
||||
teaming_policy=teaming_policy,
|
||||
port_policy=port_policy,
|
||||
network_policy=network_policy,
|
||||
vlan_info=vlan_info,
|
||||
)
|
||||
result[dvs.name].append(dvpg_details)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str', required=True),
|
||||
show_network_policy=dict(type='bool', default=True),
|
||||
show_teaming_policy=dict(type='bool', default=True),
|
||||
show_port_policy=dict(type='bool', default=True),
|
||||
dvswitch=dict(),
|
||||
show_vlan_info=dict(type='bool', default=False),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
dvs_pg_mgr = DVSPortgroupFactsManager(module)
|
||||
module.exit_json(changed=False,
|
||||
dvs_portgroup_facts=dvs_pg_mgr.gather_dvs_portgroup_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,220 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_boot_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_guest_boot_info) instead.
|
||||
short_description: Gather facts about boot options for the given virtual machine
|
||||
description:
|
||||
- Gather facts about boot options for the given virtual machine.
|
||||
version_added: 2.7
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the VM to work with.
|
||||
- This is required if C(uuid) or C(moid) parameter is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
|
||||
- This is required if C(name) or C(moid) parameter is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
name_match:
|
||||
description:
|
||||
- If multiple virtual machines matching the name, use the first or last found.
|
||||
default: 'first'
|
||||
choices: ['first', 'last']
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about virtual machine's boot order and related parameters
|
||||
vmware_guest_boot_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
name: "{{ vm_name }}"
|
||||
register: vm_boot_order_facts
|
||||
|
||||
- name: Gather facts about virtual machine's boot order using MoID
|
||||
vmware_guest_boot_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
moid: "vm-42"
|
||||
register: vm_moid_boot_order_facts
|
||||
'''
|
||||
|
||||
RETURN = r"""
|
||||
vm_boot_facts:
|
||||
description: metadata about boot order of virtual machine
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"current_boot_order": [
|
||||
"floppy",
|
||||
"disk",
|
||||
"ethernet",
|
||||
"cdrom"
|
||||
],
|
||||
"current_boot_delay": 2000,
|
||||
"current_boot_retry_delay": 22300,
|
||||
"current_boot_retry_enabled": true,
|
||||
"current_enter_bios_setup": true,
|
||||
"current_boot_firmware": "bios",
|
||||
"current_secure_boot_enabled": false,
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, VmomiSupport
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class VmBootFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmBootFactsManager, self).__init__(module)
|
||||
self.name = self.params['name']
|
||||
self.uuid = self.params['uuid']
|
||||
self.moid = self.params['moid']
|
||||
self.use_instance_uuid = self.params['use_instance_uuid']
|
||||
self.vm = None
|
||||
|
||||
def _get_vm(self):
|
||||
vms = []
|
||||
|
||||
if self.uuid:
|
||||
if self.use_instance_uuid:
|
||||
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="use_instance_uuid")
|
||||
else:
|
||||
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
|
||||
if vm_obj is None:
|
||||
self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
|
||||
vms = [vm_obj]
|
||||
|
||||
elif self.name:
|
||||
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
|
||||
for temp_vm_object in objects:
|
||||
if temp_vm_object.obj.name == self.name:
|
||||
vms.append(temp_vm_object.obj)
|
||||
|
||||
elif self.moid:
|
||||
vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.module.params['moid'], self.si._stub)
|
||||
if vm_obj:
|
||||
vms.append(vm_obj)
|
||||
|
||||
if vms:
|
||||
if self.params.get('name_match') == 'first':
|
||||
self.vm = vms[0]
|
||||
elif self.params.get('name_match') == 'last':
|
||||
self.vm = vms[-1]
|
||||
else:
|
||||
self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid or self.moid))
|
||||
|
||||
@staticmethod
|
||||
def humanize_boot_order(boot_order):
|
||||
results = []
|
||||
for device in boot_order:
|
||||
if isinstance(device, vim.vm.BootOptions.BootableCdromDevice):
|
||||
results.append('cdrom')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice):
|
||||
results.append('disk')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice):
|
||||
results.append('ethernet')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice):
|
||||
results.append('floppy')
|
||||
return results
|
||||
|
||||
def ensure(self):
|
||||
self._get_vm()
|
||||
|
||||
results = dict()
|
||||
if self.vm and self.vm.config:
|
||||
results = dict(
|
||||
current_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
|
||||
current_boot_delay=self.vm.config.bootOptions.bootDelay,
|
||||
current_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup,
|
||||
current_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled,
|
||||
current_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay,
|
||||
current_boot_firmware=self.vm.config.firmware,
|
||||
current_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=False, vm_boot_facts=results)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
name_match=dict(
|
||||
choices=['first', 'last'],
|
||||
default='first'
|
||||
),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
mutually_exclusive=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
pyv = VmBootFactsManager(module)
|
||||
pyv.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,197 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_customization_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_guest_customization_info) instead.
|
||||
short_description: Gather facts about VM customization specifications
|
||||
description:
|
||||
- This module can be used to gather facts about customization specifications.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.0 and 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
spec_name:
|
||||
description:
|
||||
- Name of customization specification to find.
|
||||
required: False
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather facts about all customization specification
|
||||
vmware_guest_customization_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
delegate_to: localhost
|
||||
register: all_custom_spec_facts
|
||||
|
||||
- name: Gather facts about customization specification with the given name
|
||||
vmware_guest_customization_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
spec_name: custom_linux_spec
|
||||
delegate_to: localhost
|
||||
register: custom_spec_facts
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
custom_spec_facts:
|
||||
description: metadata about the customization specification
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"assignip-eee0d684-44b7-457c-8c55-2585590b0d99": {
|
||||
"change_version": "1523438001",
|
||||
"description": "sample description",
|
||||
"dns_server_list": [],
|
||||
"dns_suffix_list": [],
|
||||
"domain": "None",
|
||||
"hostname": "sample1",
|
||||
"hw_clock_utc": null,
|
||||
"last_updated_time": "2018-04-11T09:13:21+00:00",
|
||||
"name": "sample",
|
||||
"nic_setting_map": [
|
||||
{
|
||||
"dns_domain": null,
|
||||
"gateway": [],
|
||||
"ip_address": "192.168.10.10",
|
||||
"net_bios": null,
|
||||
"nic_dns_server_list": [],
|
||||
"primary_wins": null,
|
||||
"secondry_wins": null,
|
||||
"subnet_mask": "255.255.255.0"
|
||||
}
|
||||
],
|
||||
"time_zone": null,
|
||||
"type": "Linux"
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
|
||||
class VmwareCustomSpecManger(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareCustomSpecManger, self).__init__(module)
|
||||
self.cc_mgr = self.content.customizationSpecManager
|
||||
if self.cc_mgr is None:
|
||||
self.module.fail_json(msg="Failed to get customization spec manager.")
|
||||
|
||||
def gather_custom_spec_facts(self):
|
||||
"""
|
||||
Gather facts about customization specifications
|
||||
"""
|
||||
|
||||
spec_name = self.params.get('spec_name', None)
|
||||
specs_list = []
|
||||
if spec_name:
|
||||
if self.cc_mgr.DoesCustomizationSpecExist(name=spec_name):
|
||||
specs_list.append(spec_name)
|
||||
else:
|
||||
self.module.fail_json(msg="Unable to find customization specification named '%s'" % spec_name)
|
||||
else:
|
||||
available_specs = self.cc_mgr.info
|
||||
for spec_info in available_specs:
|
||||
specs_list.append(spec_info.name)
|
||||
|
||||
spec_facts = dict()
|
||||
for spec in specs_list:
|
||||
current_spec = self.cc_mgr.GetCustomizationSpec(name=spec)
|
||||
adapter_mapping_list = []
|
||||
for nic in current_spec.spec.nicSettingMap:
|
||||
temp_data = dict(
|
||||
mac_address=nic.macAddress,
|
||||
ip_address=nic.adapter.ip.ipAddress,
|
||||
subnet_mask=nic.adapter.subnetMask,
|
||||
gateway=[gw for gw in nic.adapter.gateway],
|
||||
nic_dns_server_list=[ndsl for ndsl in nic.adapter.dnsServerList],
|
||||
dns_domain=nic.adapter.dnsDomain,
|
||||
primary_wins=nic.adapter.primaryWINS,
|
||||
secondry_wins=nic.adapter.secondaryWINS,
|
||||
net_bios=nic.adapter.netBIOS,
|
||||
)
|
||||
adapter_mapping_list.append(temp_data)
|
||||
|
||||
current_hostname = None
|
||||
if isinstance(current_spec.spec.identity.hostName, vim.vm.customization.PrefixNameGenerator):
|
||||
current_hostname = current_spec.spec.identity.hostName.base
|
||||
elif isinstance(current_spec.spec.identity.hostName, vim.vm.customization.FixedName):
|
||||
current_hostname = current_spec.spec.identity.hostName.name
|
||||
|
||||
spec_facts[spec] = dict(
|
||||
# Spec
|
||||
name=current_spec.info.name,
|
||||
description=current_spec.info.description,
|
||||
type=current_spec.info.type,
|
||||
last_updated_time=current_spec.info.lastUpdateTime,
|
||||
change_version=current_spec.info.changeVersion,
|
||||
# Identity
|
||||
hostname=current_hostname,
|
||||
domain=current_spec.spec.identity.domain,
|
||||
time_zone=current_spec.spec.identity.timeZone,
|
||||
hw_clock_utc=current_spec.spec.identity.hwClockUTC,
|
||||
# global IP Settings
|
||||
dns_suffix_list=[i for i in current_spec.spec.globalIPSettings.dnsSuffixList],
|
||||
dns_server_list=[i for i in current_spec.spec.globalIPSettings.dnsServerList],
|
||||
# NIC setting map
|
||||
nic_setting_map=adapter_mapping_list,
|
||||
)
|
||||
return spec_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
spec_name=dict(type='str'),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
pyv = VmwareCustomSpecManger(module)
|
||||
try:
|
||||
module.exit_json(custom_spec_facts=pyv.gather_custom_spec_facts())
|
||||
except Exception as exc:
|
||||
module.fail_json(msg="Failed to gather facts with exception : %s" % to_text(exc))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,332 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, NAER William Leemans (@bushvin) <willie@elaba.net>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_disk_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_guest_disk_info) instead.
|
||||
short_description: Gather facts about disks of given virtual machine
|
||||
description:
|
||||
- This module can be used to gather facts about disks belonging to given virtual machine.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: 2.6
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.0 and 6.5.
|
||||
- Disk UUID information is added in version 2.8.
|
||||
- Additional information about guest disk backings added in version 2.8.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine.
|
||||
- This is required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to gather facts if known, this is VMware's unique identifier.
|
||||
- This is required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- This is required parameter, only if multiple VMs are found with same name.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter name to which virtual machine belongs to.
|
||||
required: True
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather disk facts from virtual machine using UUID
|
||||
vmware_guest_disk_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: ha-datacenter
|
||||
validate_certs: no
|
||||
uuid: 421e4592-c069-924d-ce20-7e7533fab926
|
||||
delegate_to: localhost
|
||||
register: disk_facts
|
||||
|
||||
- name: Gather disk facts from virtual machine using name
|
||||
vmware_guest_disk_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: ha-datacenter
|
||||
validate_certs: no
|
||||
name: VM_225
|
||||
delegate_to: localhost
|
||||
register: disk_facts
|
||||
|
||||
- name: Gather disk facts from virtual machine using moid
|
||||
vmware_guest_disk_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: ha-datacenter
|
||||
validate_certs: no
|
||||
moid: vm-42
|
||||
delegate_to: localhost
|
||||
register: disk_facts
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
guest_disk_facts:
|
||||
description: metadata about the virtual machine's disks
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"0": {
|
||||
"backing_datastore": "datastore2",
|
||||
"backing_disk_mode": "persistent",
|
||||
"backing_eagerlyscrub": false,
|
||||
"backing_filename": "[datastore2] VM_225/VM_225.vmdk",
|
||||
"backing_thinprovisioned": false,
|
||||
"backing_type": "FlatVer2",
|
||||
"backing_writethrough": false,
|
||||
"backing_uuid": "200C3A00-f82a-97af-02ff-62a595f0020a",
|
||||
"capacity_in_bytes": 10485760,
|
||||
"capacity_in_kb": 10240,
|
||||
"controller_bus_number": 0,
|
||||
"controller_key": 1000,
|
||||
"controller_type": "paravirtual",
|
||||
"key": 2000,
|
||||
"label": "Hard disk 1",
|
||||
"summary": "10,240 KB",
|
||||
"unit_number": 0
|
||||
},
|
||||
"1": {
|
||||
"backing_datastore": "datastore3",
|
||||
"backing_devicename": "vml.012345678901234567890123456789012345678901234567890123",
|
||||
"backing_disk_mode": "independent_persistent",
|
||||
"backing_filename": "[datastore3] VM_226/VM_226.vmdk",
|
||||
"backing_lunuuid": "012345678901234567890123456789012345678901234567890123",
|
||||
"backing_type": "RawDiskMappingVer1",
|
||||
"backing_uuid": null,
|
||||
"capacity_in_bytes": 15728640,
|
||||
"capacity_in_kb": 15360,
|
||||
"controller_bus_number": 0,
|
||||
"controller_key": 1000,
|
||||
"controller_type": "paravirtual",
|
||||
"key": 2001,
|
||||
"label": "Hard disk 3",
|
||||
"summary": "15,360 KB",
|
||||
"unit_number": 1
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
|
||||
def gather_disk_facts(self, vm_obj):
|
||||
"""
|
||||
Gather facts about VM's disks
|
||||
Args:
|
||||
vm_obj: Managed object of virtual machine
|
||||
|
||||
Returns: A list of dict containing disks information
|
||||
|
||||
"""
|
||||
controller_facts = dict()
|
||||
disks_facts = dict()
|
||||
if vm_obj is None:
|
||||
return disks_facts
|
||||
|
||||
controller_types = {
|
||||
vim.vm.device.VirtualLsiLogicController: 'lsilogic',
|
||||
vim.vm.device.ParaVirtualSCSIController: 'paravirtual',
|
||||
vim.vm.device.VirtualBusLogicController: 'buslogic',
|
||||
vim.vm.device.VirtualLsiLogicSASController: 'lsilogicsas',
|
||||
vim.vm.device.VirtualIDEController: 'ide'
|
||||
}
|
||||
|
||||
controller_index = 0
|
||||
for controller in vm_obj.config.hardware.device:
|
||||
if isinstance(controller, tuple(controller_types.keys())):
|
||||
controller_facts[controller_index] = dict(
|
||||
key=controller.key,
|
||||
controller_type=controller_types[type(controller)],
|
||||
bus_number=controller.busNumber,
|
||||
devices=controller.device
|
||||
)
|
||||
controller_index += 1
|
||||
|
||||
disk_index = 0
|
||||
for disk in vm_obj.config.hardware.device:
|
||||
if isinstance(disk, vim.vm.device.VirtualDisk):
|
||||
disks_facts[disk_index] = dict(
|
||||
key=disk.key,
|
||||
label=disk.deviceInfo.label,
|
||||
summary=disk.deviceInfo.summary,
|
||||
backing_filename=disk.backing.fileName,
|
||||
backing_datastore=disk.backing.datastore.name,
|
||||
controller_key=disk.controllerKey,
|
||||
unit_number=disk.unitNumber,
|
||||
capacity_in_kb=disk.capacityInKB,
|
||||
capacity_in_bytes=disk.capacityInBytes,
|
||||
)
|
||||
if isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'FlatVer1'
|
||||
disks_facts[disk_index]['backing_writethrough'] = disk.backing.writeThrough
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'FlatVer2'
|
||||
disks_facts[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
|
||||
disks_facts[disk_index]['backing_thinprovisioned'] = bool(disk.backing.thinProvisioned)
|
||||
disks_facts[disk_index]['backing_eagerlyscrub'] = bool(disk.backing.eagerlyScrub)
|
||||
disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.LocalPMemBackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'LocalPMem'
|
||||
disks_facts[disk_index]['backing_volumeuuid'] = disk.backing.volumeUUID
|
||||
disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'PartitionedRawDiskVer2'
|
||||
disks_facts[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
|
||||
disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'RawDiskMappingVer1'
|
||||
disks_facts[disk_index]['backing_devicename'] = disk.backing.deviceName
|
||||
disks_facts[disk_index]['backing_diskmode'] = disk.backing.diskMode
|
||||
disks_facts[disk_index]['backing_lunuuid'] = disk.backing.lunUuid
|
||||
disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'RawDiskVer2'
|
||||
disks_facts[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
|
||||
disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'SeSparse'
|
||||
disks_facts[disk_index]['backing_diskmode'] = disk.backing.diskMode
|
||||
disks_facts[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
|
||||
disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer1BackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'SparseVer1'
|
||||
disks_facts[disk_index]['backing_diskmode'] = disk.backing.diskMode
|
||||
disks_facts[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
|
||||
disks_facts[disk_index]['backing_split'] = bool(disk.backing.split)
|
||||
disks_facts[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer2BackingInfo):
|
||||
disks_facts[disk_index]['backing_type'] = 'SparseVer2'
|
||||
disks_facts[disk_index]['backing_diskmode'] = disk.backing.diskMode
|
||||
disks_facts[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
|
||||
disks_facts[disk_index]['backing_split'] = bool(disk.backing.split)
|
||||
disks_facts[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
|
||||
disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
for controller_index in range(len(controller_facts)):
|
||||
if controller_facts[controller_index]['key'] == disks_facts[disk_index]['controller_key']:
|
||||
disks_facts[disk_index]['controller_bus_number'] = controller_facts[controller_index]['bus_number']
|
||||
disks_facts[disk_index]['controller_type'] = controller_facts[controller_index]['controller_type']
|
||||
|
||||
disk_index += 1
|
||||
return disks_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str', required=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if module.params['folder']:
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['folder'] = module.params['folder'].rstrip('/')
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
# Check if the VM exists before continuing
|
||||
vm = pyv.get_vm()
|
||||
|
||||
if vm:
|
||||
# VM exists
|
||||
try:
|
||||
module.exit_json(guest_disk_facts=pyv.gather_disk_facts(vm))
|
||||
except Exception as exc:
|
||||
module.fail_json(msg="Failed to gather facts with exception : %s" % to_text(exc))
|
||||
else:
|
||||
# We unable to find the virtual machine user specified
|
||||
# Bail out
|
||||
vm_id = (module.params.get('uuid') or module.params.get('moid') or module.params.get('name'))
|
||||
module.fail_json(msg="Unable to gather disk facts for non-existing VM %s" % vm_id)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1 +0,0 @@
|
||||
vmware_guest_info.py
|
@ -1 +0,0 @@
|
||||
vmware_guest_snapshot_info.py
|
@ -1,226 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_capability_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_capability_info) instead.
|
||||
short_description: Gathers facts about an ESXi host's capability information
|
||||
description:
|
||||
- This module can be used to gather facts about an ESXi host's capability information when ESXi hostname or Cluster name is given.
|
||||
version_added: 2.6
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster from all host systems to be used for facts gathering.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname to gather facts from.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather capability facts about all ESXi Host in given Cluster
|
||||
vmware_host_capability_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
register: all_cluster_hosts_facts
|
||||
|
||||
- name: Gather capability facts about ESXi Host
|
||||
vmware_host_capability_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
register: hosts_facts
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_capability_facts:
|
||||
description: metadata about host's capability information
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"esxi_hostname_0001": {
|
||||
"accel3dSupported": false,
|
||||
"backgroundSnapshotsSupported": false,
|
||||
"checkpointFtCompatibilityIssues": [],
|
||||
"checkpointFtSupported": false,
|
||||
"cloneFromSnapshotSupported": true,
|
||||
"cpuHwMmuSupported": true,
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class CapabilityFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(CapabilityFactsManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
|
||||
def gather_host_capability_facts(self):
|
||||
hosts_capability_facts = dict()
|
||||
for host in self.hosts:
|
||||
hc = host.capability
|
||||
hosts_capability_facts[host.name] = dict(
|
||||
recursiveResourcePoolsSupported=hc.recursiveResourcePoolsSupported,
|
||||
cpuMemoryResourceConfigurationSupported=hc.cpuMemoryResourceConfigurationSupported,
|
||||
rebootSupported=hc.rebootSupported,
|
||||
shutdownSupported=hc.shutdownSupported,
|
||||
vmotionSupported=hc.vmotionSupported,
|
||||
standbySupported=hc.standbySupported,
|
||||
ipmiSupported=hc.ipmiSupported,
|
||||
maxSupportedVMs=hc.maxSupportedVMs,
|
||||
maxRunningVMs=hc.maxRunningVMs,
|
||||
maxSupportedVcpus=hc.maxSupportedVcpus,
|
||||
maxRegisteredVMs=hc.maxRegisteredVMs,
|
||||
datastorePrincipalSupported=hc.datastorePrincipalSupported,
|
||||
sanSupported=hc.sanSupported,
|
||||
nfsSupported=hc.nfsSupported,
|
||||
iscsiSupported=hc.iscsiSupported,
|
||||
vlanTaggingSupported=hc.vlanTaggingSupported,
|
||||
nicTeamingSupported=hc.nicTeamingSupported,
|
||||
highGuestMemSupported=hc.highGuestMemSupported,
|
||||
maintenanceModeSupported=hc.maintenanceModeSupported,
|
||||
suspendedRelocateSupported=hc.suspendedRelocateSupported,
|
||||
restrictedSnapshotRelocateSupported=hc.restrictedSnapshotRelocateSupported,
|
||||
perVmSwapFiles=hc.perVmSwapFiles,
|
||||
localSwapDatastoreSupported=hc.localSwapDatastoreSupported,
|
||||
unsharedSwapVMotionSupported=hc.unsharedSwapVMotionSupported,
|
||||
backgroundSnapshotsSupported=hc.backgroundSnapshotsSupported,
|
||||
preAssignedPCIUnitNumbersSupported=hc.preAssignedPCIUnitNumbersSupported,
|
||||
screenshotSupported=hc.screenshotSupported,
|
||||
scaledScreenshotSupported=hc.scaledScreenshotSupported,
|
||||
storageVMotionSupported=hc.storageVMotionSupported,
|
||||
vmotionWithStorageVMotionSupported=hc.vmotionWithStorageVMotionSupported,
|
||||
vmotionAcrossNetworkSupported=hc.vmotionAcrossNetworkSupported,
|
||||
maxNumDisksSVMotion=hc.maxNumDisksSVMotion,
|
||||
hbrNicSelectionSupported=hc.hbrNicSelectionSupported,
|
||||
vrNfcNicSelectionSupported=hc.vrNfcNicSelectionSupported,
|
||||
recordReplaySupported=hc.recordReplaySupported,
|
||||
ftSupported=hc.ftSupported,
|
||||
replayUnsupportedReason=hc.replayUnsupportedReason,
|
||||
checkpointFtSupported=hc.checkpointFtSupported,
|
||||
smpFtSupported=hc.smpFtSupported,
|
||||
maxVcpusPerFtVm=hc.maxVcpusPerFtVm,
|
||||
loginBySSLThumbprintSupported=hc.loginBySSLThumbprintSupported,
|
||||
cloneFromSnapshotSupported=hc.cloneFromSnapshotSupported,
|
||||
deltaDiskBackingsSupported=hc.deltaDiskBackingsSupported,
|
||||
perVMNetworkTrafficShapingSupported=hc.perVMNetworkTrafficShapingSupported,
|
||||
tpmSupported=hc.tpmSupported,
|
||||
virtualExecUsageSupported=hc.virtualExecUsageSupported,
|
||||
storageIORMSupported=hc.storageIORMSupported,
|
||||
vmDirectPathGen2Supported=hc.vmDirectPathGen2Supported,
|
||||
vmDirectPathGen2UnsupportedReasonExtended=hc.vmDirectPathGen2UnsupportedReasonExtended,
|
||||
vStorageCapable=hc.vStorageCapable,
|
||||
snapshotRelayoutSupported=hc.snapshotRelayoutSupported,
|
||||
firewallIpRulesSupported=hc.firewallIpRulesSupported,
|
||||
servicePackageInfoSupported=hc.servicePackageInfoSupported,
|
||||
maxHostRunningVms=hc.maxHostRunningVms,
|
||||
maxHostSupportedVcpus=hc.maxHostSupportedVcpus,
|
||||
vmfsDatastoreMountCapable=hc.vmfsDatastoreMountCapable,
|
||||
eightPlusHostVmfsSharedAccessSupported=hc.eightPlusHostVmfsSharedAccessSupported,
|
||||
nestedHVSupported=hc.nestedHVSupported,
|
||||
vPMCSupported=hc.vPMCSupported,
|
||||
interVMCommunicationThroughVMCISupported=hc.interVMCommunicationThroughVMCISupported,
|
||||
scheduledHardwareUpgradeSupported=hc.scheduledHardwareUpgradeSupported,
|
||||
featureCapabilitiesSupported=hc.featureCapabilitiesSupported,
|
||||
latencySensitivitySupported=hc.latencySensitivitySupported,
|
||||
storagePolicySupported=hc.storagePolicySupported,
|
||||
accel3dSupported=hc.accel3dSupported,
|
||||
reliableMemoryAware=hc.reliableMemoryAware,
|
||||
multipleNetworkStackInstanceSupported=hc.multipleNetworkStackInstanceSupported,
|
||||
messageBusProxySupported=hc.messageBusProxySupported,
|
||||
vsanSupported=hc.vsanSupported,
|
||||
vFlashSupported=hc.vFlashSupported,
|
||||
hostAccessManagerSupported=hc.hostAccessManagerSupported,
|
||||
provisioningNicSelectionSupported=hc.provisioningNicSelectionSupported,
|
||||
nfs41Supported=hc.nfs41Supported,
|
||||
nfs41Krb5iSupported=hc.nfs41Krb5iSupported,
|
||||
turnDiskLocatorLedSupported=hc.turnDiskLocatorLedSupported,
|
||||
virtualVolumeDatastoreSupported=hc.virtualVolumeDatastoreSupported,
|
||||
markAsSsdSupported=hc.markAsSsdSupported,
|
||||
markAsLocalSupported=hc.markAsLocalSupported,
|
||||
smartCardAuthenticationSupported=hc.smartCardAuthenticationSupported,
|
||||
cryptoSupported=hc.cryptoSupported,
|
||||
oneKVolumeAPIsSupported=hc.oneKVolumeAPIsSupported,
|
||||
gatewayOnNicSupported=hc.gatewayOnNicSupported,
|
||||
upitSupported=hc.upitSupported,
|
||||
cpuHwMmuSupported=hc.cpuHwMmuSupported,
|
||||
encryptedVMotionSupported=hc.encryptedVMotionSupported,
|
||||
encryptionChangeOnAddRemoveSupported=hc.encryptionChangeOnAddRemoveSupported,
|
||||
encryptionHotOperationSupported=hc.encryptionHotOperationSupported,
|
||||
encryptionWithSnapshotsSupported=hc.encryptionWithSnapshotsSupported,
|
||||
encryptionFaultToleranceSupported=hc.encryptionFaultToleranceSupported,
|
||||
encryptionMemorySaveSupported=hc.encryptionMemorySaveSupported,
|
||||
encryptionRDMSupported=hc.encryptionRDMSupported,
|
||||
encryptionVFlashSupported=hc.encryptionVFlashSupported,
|
||||
encryptionCBRCSupported=hc.encryptionCBRCSupported,
|
||||
encryptionHBRSupported=hc.encryptionHBRSupported,
|
||||
supportedVmfsMajorVersion=[version for version in hc.supportedVmfsMajorVersion],
|
||||
vmDirectPathGen2UnsupportedReason=[reason for reason in hc.vmDirectPathGen2UnsupportedReason],
|
||||
ftCompatibilityIssues=[issue for issue in hc.ftCompatibilityIssues],
|
||||
checkpointFtCompatibilityIssues=[issue for issue in hc.checkpointFtCompatibilityIssues],
|
||||
smpFtCompatibilityIssues=[issue for issue in hc.smpFtCompatibilityIssues],
|
||||
replayCompatibilityIssues=[issue for issue in hc.replayCompatibilityIssues],
|
||||
)
|
||||
return hosts_capability_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
host_capability_manager = CapabilityFactsManager(module)
|
||||
module.exit_json(changed=False,
|
||||
hosts_capability_facts=host_capability_manager.gather_host_capability_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,124 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_config_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_config_info) instead.
|
||||
short_description: Gathers facts about an ESXi host's advance configuration information
|
||||
description:
|
||||
- This module can be used to gather facts about an ESXi host's advance configuration information when ESXi hostname or Cluster name is given.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster from which the ESXi host belong to.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname to gather facts from.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about all ESXi Host in given Cluster
|
||||
vmware_host_config_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Gather facts about ESXi Host
|
||||
vmware_host_config_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_facts:
|
||||
description:
|
||||
- dict with hostname as key and dict with host config facts
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"10.76.33.226": {
|
||||
"Annotations.WelcomeMessage": "",
|
||||
"BufferCache.FlushInterval": 30000,
|
||||
"BufferCache.HardMaxDirty": 95,
|
||||
"BufferCache.PerFileHardMaxDirty": 50,
|
||||
"BufferCache.SoftMaxDirty": 15,
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareConfigFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareConfigFactsManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
|
||||
def gather_host_facts(self):
|
||||
hosts_facts = {}
|
||||
for host in self.hosts:
|
||||
host_facts = {}
|
||||
for option in host.configManager.advancedOption.QueryOptions():
|
||||
host_facts[option.key] = option.value
|
||||
hosts_facts[host.name] = host_facts
|
||||
return hosts_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
vmware_host_config = VmwareConfigFactsManager(module)
|
||||
module.exit_json(changed=False, hosts_facts=vmware_host_config.gather_host_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,135 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_dns_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_dns_info) instead.
|
||||
short_description: Gathers facts about an ESXi host's DNS configuration information
|
||||
description:
|
||||
- This module can be used to gather facts about an ESXi host's DNS configuration information when ESXi hostname or Cluster name is given.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster from which the ESXi host belong to.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname to gather facts from.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather DNS facts about all ESXi Hosts in given Cluster
|
||||
vmware_host_dns_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Gather DNS facts about ESXi Host
|
||||
vmware_host_dns_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_dns_facts:
|
||||
description: metadata about DNS config from given cluster / host system
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"DC0_C0_H0": {
|
||||
"dhcp": true,
|
||||
"domain_name": "localdomain",
|
||||
"host_name": "localhost",
|
||||
"ip_address": [
|
||||
"8.8.8.8"
|
||||
],
|
||||
"search_domain": [
|
||||
"localdomain"
|
||||
],
|
||||
"virtual_nic_device": "vmk0"
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareDnsFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareDnsFactsManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
|
||||
def gather_dns_facts(self):
|
||||
hosts_facts = {}
|
||||
for host in self.hosts:
|
||||
host_facts = {}
|
||||
dns_config = host.config.network.dnsConfig
|
||||
host_facts['dhcp'] = dns_config.dhcp
|
||||
host_facts['virtual_nic_device'] = dns_config.virtualNicDevice
|
||||
host_facts['host_name'] = dns_config.hostName
|
||||
host_facts['domain_name'] = dns_config.domainName
|
||||
host_facts['ip_address'] = [ip for ip in dns_config.address]
|
||||
host_facts['search_domain'] = [domain for domain in dns_config.searchDomain]
|
||||
hosts_facts[host.name] = host_facts
|
||||
return hosts_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
vmware_dns_config = VmwareDnsFactsManager(module)
|
||||
module.exit_json(changed=False, hosts_dns_facts=vmware_dns_config.gather_dns_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,147 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_feature_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_feature_info) instead.
|
||||
short_description: Gathers facts about an ESXi host's feature capability information
|
||||
description:
|
||||
- This module can be used to gather facts about an ESXi host's feature capability information when ESXi hostname or Cluster name is given.
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster from all host systems to be used for facts gathering.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname to gather facts from.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather feature capability facts about all ESXi Hosts in given Cluster
|
||||
vmware_host_feature_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
register: all_cluster_hosts_facts
|
||||
|
||||
- name: Check if ESXi is vulnerable for Speculative Store Bypass Disable (SSBD) vulnerability
|
||||
vmware_host_feature_facts:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
validate_certs: no
|
||||
esxi_hostname: "{{ esxi_hostname }}"
|
||||
register: features_set
|
||||
- set_fact:
|
||||
ssbd : "{{ item.value }}"
|
||||
loop: "{{ features_set.host_feature_facts[esxi_hostname] |json_query(name) }}"
|
||||
vars:
|
||||
name: "[?key=='cpuid.SSBD']"
|
||||
- assert:
|
||||
that:
|
||||
- ssbd|int == 1
|
||||
when: ssbd is defined
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_feature_facts:
|
||||
description: metadata about host's feature capability information
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"10.76.33.226": [
|
||||
{
|
||||
"feature_name": "cpuid.3DNOW",
|
||||
"key": "cpuid.3DNOW",
|
||||
"value": "0"
|
||||
},
|
||||
{
|
||||
"feature_name": "cpuid.3DNOWPLUS",
|
||||
"key": "cpuid.3DNOWPLUS",
|
||||
"value": "0"
|
||||
},
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class FeatureCapabilityFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(FeatureCapabilityFactsManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
|
||||
def gather_host_feature_facts(self):
|
||||
host_feature_facts = dict()
|
||||
for host in self.hosts:
|
||||
host_feature_capabilities = host.config.featureCapability
|
||||
capability = []
|
||||
for fc in host_feature_capabilities:
|
||||
temp_dict = {
|
||||
'key': fc.key,
|
||||
'feature_name': fc.featureName,
|
||||
'value': fc.value,
|
||||
}
|
||||
capability.append(temp_dict)
|
||||
|
||||
host_feature_facts[host.name] = capability
|
||||
|
||||
return host_feature_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
host_capability_manager = FeatureCapabilityFactsManager(module)
|
||||
module.exit_json(changed=False,
|
||||
hosts_feature_facts=host_capability_manager.gather_host_feature_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,168 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_firewall_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_firewall_info) instead.
|
||||
short_description: Gathers facts about an ESXi host's firewall configuration information
|
||||
description:
|
||||
- This module can be used to gather facts about an ESXi host's firewall configuration information when ESXi hostname or Cluster name is given.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster from which the ESXi host belong to.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname to gather facts from.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather firewall facts about all ESXi Host in given Cluster
|
||||
vmware_host_firewall_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Gather firewall facts about ESXi Host
|
||||
vmware_host_firewall_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_firewall_facts:
|
||||
description: metadata about host's firewall configuration
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"esxi_hostname_0001": [
|
||||
{
|
||||
"allowed_hosts": {
|
||||
"all_ip": true,
|
||||
"ip_address": [
|
||||
"10.10.10.1",
|
||||
],
|
||||
"ip_network": [
|
||||
"11.111.112.0/22",
|
||||
"192.168.10.1/24"
|
||||
],
|
||||
},
|
||||
"enabled": true,
|
||||
"key": "CIMHttpServer",
|
||||
"rule": [
|
||||
{
|
||||
"direction": "inbound",
|
||||
"end_port": null,
|
||||
"port": 5988,
|
||||
"port_type": "dst",
|
||||
"protocol": "tcp"
|
||||
}
|
||||
],
|
||||
"service": "sfcbd-watchdog"
|
||||
},
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class FirewallFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(FirewallFactsManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
|
||||
@staticmethod
|
||||
def normalize_rule_set(rule_obj):
|
||||
rule_dict = dict()
|
||||
rule_dict['key'] = rule_obj.key
|
||||
rule_dict['service'] = rule_obj.service
|
||||
rule_dict['enabled'] = rule_obj.enabled
|
||||
rule_dict['rule'] = []
|
||||
|
||||
for rule in rule_obj.rule:
|
||||
rule_set_dict = dict()
|
||||
rule_set_dict['port'] = rule.port
|
||||
rule_set_dict['end_port'] = rule.endPort
|
||||
rule_set_dict['direction'] = rule.direction
|
||||
rule_set_dict['port_type'] = rule.portType
|
||||
rule_set_dict['protocol'] = rule.protocol
|
||||
rule_dict['rule'].append(rule_set_dict)
|
||||
|
||||
allowed_host = rule_obj.allowedHosts
|
||||
rule_allow_host = dict()
|
||||
rule_allow_host['ip_address'] = [ip for ip in allowed_host.ipAddress]
|
||||
rule_allow_host['ip_network'] = [ip.network + "/" + str(ip.prefixLength) for ip in allowed_host.ipNetwork]
|
||||
rule_allow_host['all_ip'] = allowed_host.allIp
|
||||
rule_dict['allowed_hosts'] = rule_allow_host
|
||||
return rule_dict
|
||||
|
||||
def gather_host_firewall_facts(self):
|
||||
hosts_firewall_facts = dict()
|
||||
for host in self.hosts:
|
||||
firewall_system = host.configManager.firewallSystem
|
||||
if firewall_system:
|
||||
hosts_firewall_facts[host.name] = []
|
||||
for rule_set_obj in firewall_system.firewallInfo.ruleset:
|
||||
hosts_firewall_facts[host.name].append(self.normalize_rule_set(rule_obj=rule_set_obj))
|
||||
return hosts_firewall_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
vmware_host_firewall = FirewallFactsManager(module)
|
||||
module.exit_json(changed=False, hosts_firewall_facts=vmware_host_firewall.gather_host_firewall_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,139 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_ntp_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_ntp_info) instead.
|
||||
short_description: Gathers facts about NTP configuration on an ESXi host
|
||||
description:
|
||||
- This module can be used to gather facts about NTP configurations on an ESXi host.
|
||||
version_added: 2.7
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- NTP config facts about each ESXi server will be returned for the given cluster.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname.
|
||||
- NTP config facts about this ESXi server will be returned.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather NTP facts about all ESXi Host in the given Cluster
|
||||
vmware_host_ntp_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
register: cluster_host_ntp
|
||||
|
||||
- name: Gather NTP facts about ESXi Host
|
||||
vmware_host_ntp_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
register: host_ntp
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_ntp_facts:
|
||||
description:
|
||||
- dict with hostname as key and dict with NTP facts as value
|
||||
returned: hosts_ntp_facts
|
||||
type: dict
|
||||
sample: {
|
||||
"10.76.33.226": [
|
||||
{
|
||||
"ntp_servers": [],
|
||||
"time_zone_description": "UTC",
|
||||
"time_zone_gmt_offset": 0,
|
||||
"time_zone_identifier": "UTC",
|
||||
"time_zone_name": "UTC"
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareNtpFactManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareNtpFactManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
|
||||
def gather_ntp_facts(self):
|
||||
hosts_facts = {}
|
||||
for host in self.hosts:
|
||||
host_ntp_facts = []
|
||||
host_date_time_manager = host.configManager.dateTimeSystem
|
||||
if host_date_time_manager:
|
||||
host_ntp_facts.append(
|
||||
dict(
|
||||
time_zone_identifier=host_date_time_manager.dateTimeInfo.timeZone.key,
|
||||
time_zone_name=host_date_time_manager.dateTimeInfo.timeZone.name,
|
||||
time_zone_description=host_date_time_manager.dateTimeInfo.timeZone.description,
|
||||
time_zone_gmt_offset=host_date_time_manager.dateTimeInfo.timeZone.gmtOffset,
|
||||
ntp_servers=[ntp_server for ntp_server in host_date_time_manager.dateTimeInfo.ntpConfig.server]
|
||||
)
|
||||
)
|
||||
hosts_facts[host.name] = host_ntp_facts
|
||||
return hosts_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_host_ntp_config = VmwareNtpFactManager(module)
|
||||
module.exit_json(changed=False, hosts_ntp_facts=vmware_host_ntp_config.gather_ntp_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,132 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_package_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_package_info) instead.
|
||||
short_description: Gathers facts about available packages on an ESXi host
|
||||
description:
|
||||
- This module can be used to gather facts about available packages and their status on an ESXi host.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- Package facts about each ESXi server will be returned for given cluster.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname.
|
||||
- Package facts about this ESXi server will be returned.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about all ESXi Host in given Cluster
|
||||
vmware_host_package_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
register: cluster_host_packages
|
||||
|
||||
- name: Gather facts about ESXi Host
|
||||
vmware_host_package_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
register: host_packages
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_package_facts:
|
||||
description:
|
||||
- dict with hostname as key and dict with package facts as value
|
||||
returned: hosts_package_facts
|
||||
type: dict
|
||||
sample: { "hosts_package_facts": { "localhost.localdomain": []}}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwarePackageManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwarePackageManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
|
||||
def gather_package_facts(self):
|
||||
hosts_facts = {}
|
||||
for host in self.hosts:
|
||||
host_package_facts = []
|
||||
host_pkg_mgr = host.configManager.imageConfigManager
|
||||
if host_pkg_mgr:
|
||||
pkgs = host_pkg_mgr.FetchSoftwarePackages()
|
||||
for pkg in pkgs:
|
||||
host_package_facts.append(dict(name=pkg.name,
|
||||
version=pkg.version,
|
||||
vendor=pkg.vendor,
|
||||
summary=pkg.summary,
|
||||
description=pkg.description,
|
||||
acceptance_level=pkg.acceptanceLevel,
|
||||
maintenance_mode_required=pkg.maintenanceModeRequired,
|
||||
creation_date=pkg.creationDate,
|
||||
)
|
||||
)
|
||||
hosts_facts[host.name] = host_package_facts
|
||||
return hosts_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_host_package_config = VmwarePackageManager(module)
|
||||
module.exit_json(changed=False, hosts_package_facts=vmware_host_package_config.gather_package_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,158 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_service_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_service_info) instead.
|
||||
short_description: Gathers facts about an ESXi host's services
|
||||
description:
|
||||
- This module can be used to gather facts about an ESXi host's services.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
- If source package name is not available then fact is populated as null.
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- Service facts about each ESXi server will be returned for given cluster.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname.
|
||||
- Service facts about this ESXi server will be returned.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about all ESXi Host in given Cluster
|
||||
vmware_host_service_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
register: cluster_host_services
|
||||
|
||||
- name: Gather facts about ESXi Host
|
||||
vmware_host_service_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
register: host_services
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
host_service_facts:
|
||||
description:
|
||||
- dict with hostname as key and dict with host service config facts
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"10.76.33.226": [
|
||||
{
|
||||
"key": "DCUI",
|
||||
"label": "Direct Console UI",
|
||||
"policy": "on",
|
||||
"required": false,
|
||||
"running": true,
|
||||
"uninstallable": false,
|
||||
"source_package_name": "esx-base",
|
||||
"source_package_desc": "This VIB contains all of the base functionality of vSphere ESXi."
|
||||
},
|
||||
{
|
||||
"key": "TSM",
|
||||
"label": "ESXi Shell",
|
||||
"policy": "off",
|
||||
"required": false,
|
||||
"running": false,
|
||||
"uninstallable": false,
|
||||
"source_package_name": "esx-base",
|
||||
"source_package_desc": "This VIB contains all of the base functionality of vSphere ESXi."
|
||||
},
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareServiceManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareServiceManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
|
||||
def gather_host_facts(self):
|
||||
hosts_facts = {}
|
||||
for host in self.hosts:
|
||||
host_service_facts = []
|
||||
host_service_system = host.configManager.serviceSystem
|
||||
if host_service_system:
|
||||
services = host_service_system.serviceInfo.service
|
||||
for service in services:
|
||||
host_service_facts.append(
|
||||
dict(
|
||||
key=service.key,
|
||||
label=service.label,
|
||||
required=service.required,
|
||||
uninstallable=service.uninstallable,
|
||||
running=service.running,
|
||||
policy=service.policy,
|
||||
source_package_name=service.sourcePackage.sourcePackageName if service.sourcePackage else None,
|
||||
source_package_desc=service.sourcePackage.description if service.sourcePackage else None,
|
||||
)
|
||||
)
|
||||
hosts_facts[host.name] = host_service_facts
|
||||
return hosts_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_host_service_config = VmwareServiceManager(module)
|
||||
module.exit_json(changed=False, host_service_facts=vmware_host_service_config.gather_host_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,152 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_ssl_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_ssl_info) instead.
|
||||
short_description: Gather facts of ESXi host system about SSL
|
||||
description:
|
||||
- This module can be used to gather facts of the SSL thumbprint information for a host.
|
||||
version_added: 2.7
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- SSL thumbprint information about all ESXi host system in the given cluster will be reported.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname.
|
||||
- SSL thumbprint information of this ESXi host system will be reported.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster
|
||||
vmware_host_ssl_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
delegate_to: localhost
|
||||
register: all_host_ssl_facts
|
||||
|
||||
- name: Get SSL Thumbprint info about "{{ esxi_hostname }}"
|
||||
vmware_host_ssl_facts:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
register: ssl_facts
|
||||
- set_fact:
|
||||
ssl_thumbprint: "{{ ssl_facts['host_ssl_facts'][esxi_hostname]['ssl_thumbprints'][0] }}"
|
||||
- debug:
|
||||
msg: "{{ ssl_thumbprint }}"
|
||||
- name: Add ESXi Host to vCenter
|
||||
vmware_host:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: '{{ datacenter_name }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
esxi_username: '{{ esxi_username }}'
|
||||
esxi_password: '{{ esxi_password }}'
|
||||
esxi_ssl_thumbprint: '{{ ssl_thumbprint }}'
|
||||
state: present
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
host_ssl_facts:
|
||||
description:
|
||||
- dict with hostname as key and dict with SSL thumbprint related facts
|
||||
returned: facts
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"10.76.33.215": {
|
||||
"owner_tag": "",
|
||||
"principal": "vpxuser",
|
||||
"ssl_thumbprints": [
|
||||
"E3:E8:A9:20:8D:32:AE:59:C6:8D:A5:91:B0:20:EF:00:A2:7C:27:EE",
|
||||
"F1:AC:DA:6E:D8:1E:37:36:4A:5C:07:E5:04:0B:87:C8:75:FB:42:01"
|
||||
]
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VMwareHostSslManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareHostSslManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
self.hosts_facts = {}
|
||||
|
||||
def gather_ssl_facts(self):
|
||||
for host in self.hosts:
|
||||
self.hosts_facts[host.name] = dict(principal='',
|
||||
owner_tag='',
|
||||
ssl_thumbprints=[])
|
||||
|
||||
host_ssl_info_mgr = host.config.sslThumbprintInfo
|
||||
if host_ssl_info_mgr:
|
||||
self.hosts_facts[host.name]['principal'] = host_ssl_info_mgr.principal
|
||||
self.hosts_facts[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag
|
||||
self.hosts_facts[host.name]['ssl_thumbprints'] = [i for i in host_ssl_info_mgr.sslThumbprints]
|
||||
|
||||
self.module.exit_json(changed=False, host_ssl_facts=self.hosts_facts)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str'),
|
||||
esxi_hostname=dict(type='str'),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_host_accept_config = VMwareHostSslManager(module)
|
||||
vmware_host_accept_config.gather_ssl_facts()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,222 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_vmhba_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_vmhba_info) instead.
|
||||
short_description: Gathers facts about vmhbas available on the given ESXi host
|
||||
description:
|
||||
- This module can be used to gather facts about vmhbas available on the given ESXi host.
|
||||
- If C(cluster_name) is provided, then vmhba facts about all hosts from given cluster will be returned.
|
||||
- If C(esxi_hostname) is provided, then vmhba facts about given host system will be returned.
|
||||
version_added: '2.8'
|
||||
author:
|
||||
- Christian Kotte (@ckotte)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
esxi_hostname:
|
||||
description:
|
||||
- Name of the host system to work with.
|
||||
- Vmhba facts about this ESXi server will be returned.
|
||||
- This parameter is required if C(cluster_name) is not specified.
|
||||
type: str
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster from which all host systems will be used.
|
||||
- Vmhba facts about each ESXi server will be returned for the given cluster.
|
||||
- This parameter is required if C(esxi_hostname) is not specified.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about vmhbas of all ESXi Host in the given Cluster
|
||||
vmware_host_vmhba_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
delegate_to: localhost
|
||||
register: cluster_host_vmhbas
|
||||
|
||||
- name: Gather facts about vmhbas of an ESXi Host
|
||||
vmware_host_vmhba_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
register: host_vmhbas
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_vmhbas_facts:
|
||||
description:
|
||||
- dict with hostname as key and dict with vmhbas facts as value.
|
||||
returned: hosts_vmhbas_facts
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"10.76.33.204": {
|
||||
"vmhba_details": [
|
||||
{
|
||||
"adapter": "HPE Smart Array P440ar",
|
||||
"bus": 3,
|
||||
"device": "vmhba0",
|
||||
"driver": "nhpsa",
|
||||
"location": "0000:03:00.0",
|
||||
"model": "Smart Array P440ar",
|
||||
"node_wwn": "50:01:43:80:37:18:9e:a0",
|
||||
"status": "unknown",
|
||||
"type": "SAS"
|
||||
},
|
||||
{
|
||||
"adapter": "QLogic Corp ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
|
||||
"bus": 5,
|
||||
"device": "vmhba1",
|
||||
"driver": "qlnativefc",
|
||||
"location": "0000:05:00.0",
|
||||
"model": "ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
|
||||
"node_wwn": "57:64:96:32:15:90:23:95:82",
|
||||
"port_type": "unknown",
|
||||
"port_wwn": "57:64:96:32:15:90:23:95:82",
|
||||
"speed": 8,
|
||||
"status": "online",
|
||||
"type": "Fibre Channel"
|
||||
},
|
||||
{
|
||||
"adapter": "QLogic Corp ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
|
||||
"bus": 8,
|
||||
"device": "vmhba2",
|
||||
"driver": "qlnativefc",
|
||||
"location": "0000:08:00.0",
|
||||
"model": "ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
|
||||
"node_wwn": "57:64:96:32:15:90:23:95:21",
|
||||
"port_type": "unknown",
|
||||
"port_wwn": "57:64:96:32:15:90:23:95:21",
|
||||
"speed": 8,
|
||||
"status": "online",
|
||||
"type": "Fibre Channel"
|
||||
}
|
||||
],
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class HostVmhbaMgr(PyVmomi):
|
||||
"""Class to manage vmhba facts"""
|
||||
def __init__(self, module):
|
||||
super(HostVmhbaMgr, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
if not self.hosts:
|
||||
self.module.fail_json(msg="Failed to find host system.")
|
||||
|
||||
def gather_host_vmhba_facts(self):
|
||||
"""Gather vmhba facts"""
|
||||
hosts_vmhba_facts = {}
|
||||
for host in self.hosts:
|
||||
host_vmhba_facts = dict()
|
||||
host_st_system = host.configManager.storageSystem
|
||||
if host_st_system:
|
||||
device_info = host_st_system.storageDeviceInfo
|
||||
host_vmhba_facts['vmhba_details'] = []
|
||||
for hba in device_info.hostBusAdapter:
|
||||
hba_facts = dict()
|
||||
if hba.pci:
|
||||
hba_facts['location'] = hba.pci
|
||||
for pci_device in host.hardware.pciDevice:
|
||||
if pci_device.id == hba.pci:
|
||||
hba_facts['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName
|
||||
break
|
||||
else:
|
||||
hba_facts['location'] = 'PCI'
|
||||
hba_facts['device'] = hba.device
|
||||
# contains type as string in format of 'key-vim.host.FibreChannelHba-vmhba1'
|
||||
hba_type = hba.key.split(".")[-1].split("-")[0]
|
||||
if hba_type == 'SerialAttachedHba':
|
||||
hba_facts['type'] = 'SAS'
|
||||
elif hba_type == 'FibreChannelHba':
|
||||
hba_facts['type'] = 'Fibre Channel'
|
||||
else:
|
||||
hba_facts['type'] = hba_type
|
||||
hba_facts['bus'] = hba.bus
|
||||
hba_facts['status'] = hba.status
|
||||
hba_facts['model'] = hba.model
|
||||
hba_facts['driver'] = hba.driver
|
||||
try:
|
||||
hba_facts['node_wwn'] = self.format_number(hba.nodeWorldWideName)
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
hba_facts['port_wwn'] = self.format_number(hba.portWorldWideName)
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
hba_facts['port_type'] = hba.portType
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
hba_facts['speed'] = hba.speed
|
||||
except AttributeError:
|
||||
pass
|
||||
host_vmhba_facts['vmhba_details'].append(hba_facts)
|
||||
|
||||
hosts_vmhba_facts[host.name] = host_vmhba_facts
|
||||
return hosts_vmhba_facts
|
||||
|
||||
@staticmethod
|
||||
def format_number(number):
|
||||
"""Format number"""
|
||||
string = str(number)
|
||||
return ':'.join(a + b for a, b in zip(string[::2], string[1::2]))
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
host_vmhba_mgr = HostVmhbaMgr(module)
|
||||
module.exit_json(changed=False, hosts_vmhbas_facts=host_vmhba_mgr.gather_host_vmhba_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,324 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_host_vmnic_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_host_vmnic_info) instead.
|
||||
short_description: Gathers facts about vmnics available on the given ESXi host
|
||||
description:
|
||||
- This module can be used to gather facts about vmnics available on the given ESXi host.
|
||||
- If C(cluster_name) is provided, then vmnic facts about all hosts from given cluster will be returned.
|
||||
- If C(esxi_hostname) is provided, then vmnic facts about given host system will be returned.
|
||||
- Additional details about vswitch and dvswitch with respective vmnic is also provided which is added in 2.7 version.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
- Christian Kotte (@ckotte)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
capabilities:
|
||||
description:
|
||||
- Gather facts about general capabilities (Auto negotiation, Wake On LAN, and Network I/O Control).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.8
|
||||
directpath_io:
|
||||
description:
|
||||
- Gather facts about DirectPath I/O capabilities and configuration.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.8
|
||||
sriov:
|
||||
description:
|
||||
- Gather facts about SR-IOV capabilities and configuration.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.8
|
||||
esxi_hostname:
|
||||
description:
|
||||
- Name of the host system to work with.
|
||||
- Vmnic facts about this ESXi server will be returned.
|
||||
- This parameter is required if C(cluster_name) is not specified.
|
||||
type: str
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster from which all host systems will be used.
|
||||
- Vmnic facts about each ESXi server will be returned for the given cluster.
|
||||
- This parameter is required if C(esxi_hostname) is not specified.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about vmnics of all ESXi Host in the given Cluster
|
||||
vmware_host_vmnic_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
delegate_to: localhost
|
||||
register: cluster_host_vmnics
|
||||
|
||||
- name: Gather facts about vmnics of an ESXi Host
|
||||
vmware_host_vmnic_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
register: host_vmnics
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_vmnics_facts:
|
||||
description:
|
||||
- dict with hostname as key and dict with vmnics facts as value.
|
||||
- for C(num_vmnics), only NICs starting with vmnic are counted. NICs like vusb* are not counted.
|
||||
- details about vswitch and dvswitch was added in version 2.7.
|
||||
- details about vmnics was added in version 2.8.
|
||||
returned: hosts_vmnics_facts
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"10.76.33.204": {
|
||||
"all": [
|
||||
"vmnic0",
|
||||
"vmnic1"
|
||||
],
|
||||
"available": [],
|
||||
"dvswitch": {
|
||||
"dvs_0002": [
|
||||
"vmnic1"
|
||||
]
|
||||
},
|
||||
"num_vmnics": 2,
|
||||
"used": [
|
||||
"vmnic1",
|
||||
"vmnic0"
|
||||
],
|
||||
"vmnic_details": [
|
||||
{
|
||||
"actual_duplex": "Full Duplex",
|
||||
"actual_speed": 10000,
|
||||
"adapter": "Intel(R) 82599 10 Gigabit Dual Port Network Connection",
|
||||
"configured_duplex": "Auto negotiate",
|
||||
"configured_speed": "Auto negotiate",
|
||||
"device": "vmnic0",
|
||||
"driver": "ixgbe",
|
||||
"location": "0000:01:00.0",
|
||||
"mac": "aa:bb:cc:dd:ee:ff",
|
||||
"status": "Connected",
|
||||
},
|
||||
{
|
||||
"actual_duplex": "Full Duplex",
|
||||
"actual_speed": 10000,
|
||||
"adapter": "Intel(R) 82599 10 Gigabit Dual Port Network Connection",
|
||||
"configured_duplex": "Auto negotiate",
|
||||
"configured_speed": "Auto negotiate",
|
||||
"device": "vmnic1",
|
||||
"driver": "ixgbe",
|
||||
"location": "0000:01:00.1",
|
||||
"mac": "ab:ba:cc:dd:ee:ff",
|
||||
"status": "Connected",
|
||||
},
|
||||
],
|
||||
"vswitch": {
|
||||
"vSwitch0": [
|
||||
"vmnic0"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs
|
||||
|
||||
|
||||
class HostVmnicMgr(PyVmomi):
|
||||
"""Class to manage vmnic facts"""
|
||||
def __init__(self, module):
|
||||
super(HostVmnicMgr, self).__init__(module)
|
||||
self.capabilities = self.params.get('capabilities')
|
||||
self.directpath_io = self.params.get('directpath_io')
|
||||
self.sriov = self.params.get('sriov')
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
if not self.hosts:
|
||||
self.module.fail_json(msg="Failed to find host system.")
|
||||
|
||||
def find_dvs_by_uuid(self, uuid=None):
|
||||
"""Find DVS by it's UUID"""
|
||||
dvs_obj = None
|
||||
if uuid is None:
|
||||
return dvs_obj
|
||||
|
||||
dvswitches = get_all_objs(self.content, [vim.DistributedVirtualSwitch])
|
||||
for dvs in dvswitches:
|
||||
if dvs.uuid == uuid:
|
||||
dvs_obj = dvs
|
||||
break
|
||||
|
||||
return dvs_obj
|
||||
|
||||
def gather_host_vmnic_facts(self):
|
||||
"""Gather vmnic facts"""
|
||||
hosts_vmnic_facts = {}
|
||||
for host in self.hosts:
|
||||
host_vmnic_facts = dict(all=[], available=[], used=[], vswitch=dict(), dvswitch=dict())
|
||||
host_nw_system = host.configManager.networkSystem
|
||||
if host_nw_system:
|
||||
nw_config = host_nw_system.networkConfig
|
||||
vmnics = [pnic.device for pnic in nw_config.pnic if pnic.device.startswith('vmnic')]
|
||||
host_vmnic_facts['all'] = [pnic.device for pnic in nw_config.pnic]
|
||||
host_vmnic_facts['num_vmnics'] = len(vmnics)
|
||||
host_vmnic_facts['vmnic_details'] = []
|
||||
for pnic in host.config.network.pnic:
|
||||
pnic_facts = dict()
|
||||
if pnic.device.startswith('vmnic'):
|
||||
if pnic.pci:
|
||||
pnic_facts['location'] = pnic.pci
|
||||
for pci_device in host.hardware.pciDevice:
|
||||
if pci_device.id == pnic.pci:
|
||||
pnic_facts['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName
|
||||
break
|
||||
else:
|
||||
pnic_facts['location'] = 'PCI'
|
||||
pnic_facts['device'] = pnic.device
|
||||
pnic_facts['driver'] = pnic.driver
|
||||
if pnic.linkSpeed:
|
||||
pnic_facts['status'] = 'Connected'
|
||||
pnic_facts['actual_speed'] = pnic.linkSpeed.speedMb
|
||||
pnic_facts['actual_duplex'] = 'Full Duplex' if pnic.linkSpeed.duplex else 'Half Duplex'
|
||||
else:
|
||||
pnic_facts['status'] = 'Disconnected'
|
||||
pnic_facts['actual_speed'] = 'N/A'
|
||||
pnic_facts['actual_duplex'] = 'N/A'
|
||||
if pnic.spec.linkSpeed:
|
||||
pnic_facts['configured_speed'] = pnic.spec.linkSpeed.speedMb
|
||||
pnic_facts['configured_duplex'] = 'Full Duplex' if pnic.spec.linkSpeed.duplex else 'Half Duplex'
|
||||
else:
|
||||
pnic_facts['configured_speed'] = 'Auto negotiate'
|
||||
pnic_facts['configured_duplex'] = 'Auto negotiate'
|
||||
pnic_facts['mac'] = pnic.mac
|
||||
# General NIC capabilities
|
||||
if self.capabilities:
|
||||
pnic_facts['nioc_status'] = 'Allowed' if pnic.resourcePoolSchedulerAllowed else 'Not allowed'
|
||||
pnic_facts['auto_negotiation_supported'] = pnic.autoNegotiateSupported
|
||||
pnic_facts['wake_on_lan_supported'] = pnic.wakeOnLanSupported
|
||||
# DirectPath I/O and SR-IOV capabilities and configuration
|
||||
if self.directpath_io:
|
||||
pnic_facts['directpath_io_supported'] = pnic.vmDirectPathGen2Supported
|
||||
if self.directpath_io or self.sriov:
|
||||
if pnic.pci:
|
||||
for pci_device in host.configManager.pciPassthruSystem.pciPassthruInfo:
|
||||
if pci_device.id == pnic.pci:
|
||||
if self.directpath_io:
|
||||
pnic_facts['passthru_enabled'] = pci_device.passthruEnabled
|
||||
pnic_facts['passthru_capable'] = pci_device.passthruCapable
|
||||
pnic_facts['passthru_active'] = pci_device.passthruActive
|
||||
if self.sriov:
|
||||
try:
|
||||
if pci_device.sriovCapable:
|
||||
pnic_facts['sriov_status'] = (
|
||||
'Enabled' if pci_device.sriovEnabled else 'Disabled'
|
||||
)
|
||||
pnic_facts['sriov_active'] = \
|
||||
pci_device.sriovActive
|
||||
pnic_facts['sriov_virt_functions'] = \
|
||||
pci_device.numVirtualFunction
|
||||
pnic_facts['sriov_virt_functions_requested'] = \
|
||||
pci_device.numVirtualFunctionRequested
|
||||
pnic_facts['sriov_virt_functions_supported'] = \
|
||||
pci_device.maxVirtualFunctionSupported
|
||||
else:
|
||||
pnic_facts['sriov_status'] = 'Not supported'
|
||||
except AttributeError:
|
||||
pnic_facts['sriov_status'] = 'Not supported'
|
||||
host_vmnic_facts['vmnic_details'].append(pnic_facts)
|
||||
|
||||
vswitch_vmnics = []
|
||||
proxy_switch_vmnics = []
|
||||
if nw_config.vswitch:
|
||||
for vswitch in nw_config.vswitch:
|
||||
host_vmnic_facts['vswitch'][vswitch.name] = []
|
||||
# Workaround for "AttributeError: 'NoneType' object has no attribute 'nicDevice'"
|
||||
# this issue doesn't happen every time; vswitch.spec.bridge.nicDevice exists!
|
||||
try:
|
||||
for vnic in vswitch.spec.bridge.nicDevice:
|
||||
vswitch_vmnics.append(vnic)
|
||||
host_vmnic_facts['vswitch'][vswitch.name].append(vnic)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if nw_config.proxySwitch:
|
||||
for proxy_config in nw_config.proxySwitch:
|
||||
dvs_obj = self.find_dvs_by_uuid(uuid=proxy_config.uuid)
|
||||
if dvs_obj:
|
||||
host_vmnic_facts['dvswitch'][dvs_obj.name] = []
|
||||
for proxy_nic in proxy_config.spec.backing.pnicSpec:
|
||||
proxy_switch_vmnics.append(proxy_nic.pnicDevice)
|
||||
if dvs_obj:
|
||||
host_vmnic_facts['dvswitch'][dvs_obj.name].append(proxy_nic.pnicDevice)
|
||||
|
||||
used_vmics = proxy_switch_vmnics + vswitch_vmnics
|
||||
host_vmnic_facts['used'] = used_vmics
|
||||
host_vmnic_facts['available'] = [pnic.device for pnic in nw_config.pnic if pnic.device not in used_vmics]
|
||||
|
||||
hosts_vmnic_facts[host.name] = host_vmnic_facts
|
||||
return hosts_vmnic_facts
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
capabilities=dict(type='bool', required=False, default=False),
|
||||
directpath_io=dict(type='bool', required=False, default=False),
|
||||
sriov=dict(type='bool', required=False, default=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
host_vmnic_mgr = HostVmnicMgr(module)
|
||||
module.exit_json(changed=False, hosts_vmnics_facts=host_vmnic_mgr.gather_host_vmnic_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,143 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_local_role_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_local_role_info) instead.
|
||||
short_description: Gather facts about local roles on an ESXi host
|
||||
description:
|
||||
- This module can be used to gather facts about local role facts on an ESXi host
|
||||
version_added: 2.7
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on ESXi 6.5
|
||||
- Be sure that the ESXi user used for login, has the appropriate rights to view roles
|
||||
- The module returns a list of dict in version 2.8 and above.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather facts about local role from an ESXi
|
||||
vmware_local_role_facts:
|
||||
hostname: '{{ esxi_hostname }}'
|
||||
username: '{{ esxi_username }}'
|
||||
password: '{{ esxi_password }}'
|
||||
register: fact_details
|
||||
delegate_to: localhost
|
||||
- name: Get Admin privileges
|
||||
set_fact:
|
||||
admin_priv: "{{ fact_details.local_role_facts['Admin']['privileges'] }}"
|
||||
- debug:
|
||||
msg: "{{ admin_priv }}"
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
local_role_facts:
|
||||
description: Facts about role present on ESXi host
|
||||
returned: always
|
||||
type: dict
|
||||
sample: [
|
||||
{
|
||||
"privileges": [
|
||||
"Alarm.Acknowledge",
|
||||
"Alarm.Create",
|
||||
"Alarm.Delete",
|
||||
"Alarm.DisableActions",
|
||||
],
|
||||
"role_id": -12,
|
||||
"role_info_label": "Ansible User",
|
||||
"role_info_summary": "Ansible Automation user",
|
||||
"role_name": "AnsiUser1",
|
||||
"role_system": true
|
||||
},
|
||||
{
|
||||
"privileges": [],
|
||||
"role_id": -5,
|
||||
"role_info_label": "No access",
|
||||
"role_info_summary": "Used for restricting granted access",
|
||||
"role_name": "NoAccess",
|
||||
"role_system": true
|
||||
},
|
||||
{
|
||||
"privileges": [
|
||||
"System.Anonymous",
|
||||
"System.View"
|
||||
],
|
||||
"role_id": -3,
|
||||
"role_info_label": "View",
|
||||
"role_info_summary": "Visibility access (cannot be granted)",
|
||||
"role_name": "View",
|
||||
"role_system": true
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
|
||||
class VMwareLocalRoleFacts(PyVmomi):
|
||||
"""Class to manage local role facts"""
|
||||
def __init__(self, module):
|
||||
super(VMwareLocalRoleFacts, self).__init__(module)
|
||||
self.module = module
|
||||
self.params = module.params
|
||||
|
||||
if self.content.authorizationManager is None:
|
||||
self.module.fail_json(
|
||||
msg="Failed to get local authorization manager settings.",
|
||||
details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.params['hostname']
|
||||
)
|
||||
|
||||
def gather_local_role_facts(self):
|
||||
"""Gather facts about local roles"""
|
||||
results = list()
|
||||
for role in self.content.authorizationManager.roleList:
|
||||
results.append(
|
||||
dict(
|
||||
role_name=role.name,
|
||||
role_id=role.roleId,
|
||||
privileges=[priv_name for priv_name in role.privilege],
|
||||
role_system=role.system,
|
||||
role_info_label=role.info.label,
|
||||
role_info_summary=role.info.summary,
|
||||
)
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=False, local_role_facts=results)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
vmware_local_role_facts = VMwareLocalRoleFacts(module)
|
||||
vmware_local_role_facts.gather_local_role_facts()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,177 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_local_user_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_local_user_info) instead.
|
||||
short_description: Gather facts about users on the given ESXi host
|
||||
description:
|
||||
- This module can be used to gather facts about users present on the given ESXi host system in VMware infrastructure.
|
||||
- All variables and VMware object names are case sensitive.
|
||||
- User must hold the 'Authorization.ModifyPermissions' privilege to invoke this module.
|
||||
version_added: "2.6"
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
- Christian Kotte (@ckotte)
|
||||
notes:
|
||||
- Tested on ESXi 6.5
|
||||
- The C(full_name), C(principal), and C(user_group) properties are deprecated starting from Ansible v2.12
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather facts about all Users on given ESXi host system
|
||||
vmware_local_user_facts:
|
||||
hostname: '{{ esxi_hostname }}'
|
||||
username: '{{ esxi_username }}'
|
||||
password: '{{ esxi_password }}'
|
||||
delegate_to: localhost
|
||||
register: all_user_facts
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
local_user_facts:
|
||||
description: metadata about all local users
|
||||
returned: always
|
||||
type: dict
|
||||
sample: [
|
||||
{
|
||||
"role": "admin",
|
||||
"description": "Administrator",
|
||||
"full_name": "Administrator",
|
||||
"group": false,
|
||||
"user_group": false,
|
||||
"user_id": 0,
|
||||
"user_name": "root",
|
||||
"principal": "root",
|
||||
"shell_access": true
|
||||
},
|
||||
{
|
||||
"role": "admin",
|
||||
"description": "DCUI User",
|
||||
"full_name": "DCUI User",
|
||||
"group": false,
|
||||
"user_group": false,
|
||||
"user_id": 100,
|
||||
"user_name": "dcui",
|
||||
"principal": "dcui",
|
||||
"shell_access": false
|
||||
},
|
||||
]
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VMwareUserFactsManager(PyVmomi):
|
||||
"""Class to manage local user facts"""
|
||||
def __init__(self, module):
|
||||
super(VMwareUserFactsManager, self).__init__(module)
|
||||
|
||||
if self.is_vcenter():
|
||||
self.module.fail_json(
|
||||
msg="Failed to get local account manager settings.",
|
||||
details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.module.params['hostname']
|
||||
)
|
||||
|
||||
def gather_user_facts(self):
|
||||
"""Gather facts about local users"""
|
||||
results = dict(changed=False, local_user_facts=[])
|
||||
search_string = ''
|
||||
exact_match = False
|
||||
find_users = True
|
||||
find_groups = False
|
||||
user_accounts = self.content.userDirectory.RetrieveUserGroups(
|
||||
None, search_string, None, None, exact_match, find_users, find_groups
|
||||
)
|
||||
if user_accounts:
|
||||
for user in user_accounts:
|
||||
temp_user = dict()
|
||||
# NOTE: the properties full_name, principal, and user_group are deprecated starting from Ansible v2.12
|
||||
temp_user['full_name'] = user.fullName
|
||||
temp_user['principal'] = user.principal
|
||||
temp_user['user_group'] = user.group
|
||||
temp_user['user_name'] = user.principal
|
||||
temp_user['description'] = user.fullName
|
||||
temp_user['group'] = user.group
|
||||
temp_user['user_id'] = user.id
|
||||
temp_user['shell_access'] = user.shellAccess
|
||||
temp_user['role'] = None
|
||||
try:
|
||||
permissions = self.content.authorizationManager.RetrieveEntityPermissions(
|
||||
entity=self.content.rootFolder,
|
||||
inherited=False
|
||||
)
|
||||
except vmodl.fault.ManagedObjectNotFound as not_found:
|
||||
self.module.fail_json(
|
||||
msg="The entity doesn't exist" % to_native(not_found)
|
||||
)
|
||||
for permission in permissions:
|
||||
if permission.principal == user.principal:
|
||||
temp_user['role'] = self.get_role_name(permission.roleId, self.content.authorizationManager.roleList)
|
||||
break
|
||||
|
||||
results['local_user_facts'].append(temp_user)
|
||||
self.module.exit_json(**results)
|
||||
|
||||
@staticmethod
|
||||
def get_role_name(role_id, role_list):
|
||||
"""Get role name from role ID"""
|
||||
role_name = None
|
||||
# Default role: No access
|
||||
if role_id == -5:
|
||||
role_name = 'no-access'
|
||||
# Default role: Read-only
|
||||
elif role_id == -2:
|
||||
role_name = 'read-only'
|
||||
# Default role: Administrator
|
||||
elif role_id == -1:
|
||||
role_name = 'admin'
|
||||
# Custom roles
|
||||
else:
|
||||
for role in role_list:
|
||||
if role.roleId == role_id:
|
||||
role_name = role.name
|
||||
break
|
||||
return role_name
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
vmware_local_user_facts = VMwareUserFactsManager(module)
|
||||
vmware_local_user_facts.gather_user_facts()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,234 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_portgroup_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_portgroup_info) instead.
|
||||
short_description: Gathers facts about an ESXi host's Port Group configuration
|
||||
description:
|
||||
- This module can be used to gather facts about an ESXi host's Port Group configuration when ESXi hostname or Cluster name is given.
|
||||
version_added: '2.6'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
- Christian Kotte (@ckotte)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
- The C(vswitch_name) property is deprecated starting from Ansible v2.12
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
policies:
|
||||
description:
|
||||
- Gather facts about Security, Traffic Shaping, as well as Teaming and failover.
|
||||
- The property C(ts) stands for Traffic Shaping and C(lb) for Load Balancing.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.8
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- Facts will be returned for all hostsystem belonging to this cluster name.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname to gather facts from.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather portgroup facts about all ESXi Host in given Cluster
|
||||
vmware_portgroup_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Gather portgroup facts about ESXi Host system
|
||||
vmware_portgroup_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_portgroup_facts:
|
||||
description: metadata about host's portgroup configuration
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"esx01": [
|
||||
{
|
||||
"failback": true,
|
||||
"failover_active": ["vmnic0", "vmnic1"],
|
||||
"failover_standby": [],
|
||||
"failure_detection": "link_status_only",
|
||||
"lb": "loadbalance_srcid",
|
||||
"notify": true,
|
||||
"portgroup": "Management Network",
|
||||
"security": [false, false, false],
|
||||
"ts": "No override",
|
||||
"vlan_id": 0,
|
||||
"vswitch": "vSwitch0",
|
||||
"vswitch_name": "vSwitch0"
|
||||
},
|
||||
{
|
||||
"failback": true,
|
||||
"failover_active": ["vmnic2"],
|
||||
"failover_standby": ["vmnic3"],
|
||||
"failure_detection": "No override",
|
||||
"lb": "No override",
|
||||
"notify": true,
|
||||
"portgroup": "vMotion",
|
||||
"security": [false, false, false],
|
||||
"ts": "No override",
|
||||
"vlan_id": 33,
|
||||
"vswitch": "vSwitch1",
|
||||
"vswitch_name": "vSwitch1"
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class PortgroupFactsManager(PyVmomi):
|
||||
"""Class to manage Port Group facts"""
|
||||
def __init__(self, module):
|
||||
super(PortgroupFactsManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
if not self.hosts:
|
||||
self.module.fail_json(msg="Failed to find host system.")
|
||||
self.policies = self.params.get('policies')
|
||||
|
||||
@staticmethod
|
||||
def normalize_pg_info(portgroup_obj, policy_facts):
|
||||
"""Create Port Group information"""
|
||||
pg_info_dict = dict()
|
||||
spec = portgroup_obj.spec
|
||||
pg_info_dict['portgroup'] = spec.name
|
||||
pg_info_dict['vlan_id'] = spec.vlanId
|
||||
# NOTE: the property vswitch_name is deprecated starting from Ansible v2.12
|
||||
pg_info_dict['vswitch_name'] = spec.vswitchName
|
||||
pg_info_dict['vswitch'] = spec.vswitchName
|
||||
|
||||
if policy_facts:
|
||||
# Security facts
|
||||
if spec.policy.security:
|
||||
promiscuous_mode = spec.policy.security.allowPromiscuous
|
||||
mac_changes = spec.policy.security.macChanges
|
||||
forged_transmits = spec.policy.security.forgedTransmits
|
||||
pg_info_dict['security'] = (
|
||||
["No override" if promiscuous_mode is None else promiscuous_mode,
|
||||
"No override" if mac_changes is None else mac_changes,
|
||||
"No override" if forged_transmits is None else forged_transmits]
|
||||
)
|
||||
else:
|
||||
pg_info_dict['security'] = ["No override", "No override", "No override"]
|
||||
|
||||
# Traffic Shaping facts
|
||||
if spec.policy.shapingPolicy and spec.policy.shapingPolicy.enabled is not None:
|
||||
pg_info_dict['ts'] = portgroup_obj.spec.policy.shapingPolicy.enabled
|
||||
else:
|
||||
pg_info_dict['ts'] = "No override"
|
||||
|
||||
# Teaming and failover facts
|
||||
if spec.policy.nicTeaming:
|
||||
if spec.policy.nicTeaming.policy is None:
|
||||
pg_info_dict['lb'] = "No override"
|
||||
else:
|
||||
pg_info_dict['lb'] = spec.policy.nicTeaming.policy
|
||||
if spec.policy.nicTeaming.notifySwitches is None:
|
||||
pg_info_dict['notify'] = "No override"
|
||||
else:
|
||||
pg_info_dict['notify'] = spec.policy.nicTeaming.notifySwitches
|
||||
if spec.policy.nicTeaming.rollingOrder is None:
|
||||
pg_info_dict['failback'] = "No override"
|
||||
else:
|
||||
pg_info_dict['failback'] = not spec.policy.nicTeaming.rollingOrder
|
||||
if spec.policy.nicTeaming.nicOrder is None:
|
||||
pg_info_dict['failover_active'] = "No override"
|
||||
pg_info_dict['failover_standby'] = "No override"
|
||||
else:
|
||||
pg_info_dict['failover_active'] = spec.policy.nicTeaming.nicOrder.activeNic
|
||||
pg_info_dict['failover_standby'] = spec.policy.nicTeaming.nicOrder.standbyNic
|
||||
if spec.policy.nicTeaming.failureCriteria and spec.policy.nicTeaming.failureCriteria.checkBeacon is None:
|
||||
pg_info_dict['failure_detection'] = "No override"
|
||||
else:
|
||||
if spec.policy.nicTeaming.failureCriteria.checkBeacon:
|
||||
pg_info_dict['failure_detection'] = "beacon_probing"
|
||||
else:
|
||||
pg_info_dict['failure_detection'] = "link_status_only"
|
||||
else:
|
||||
pg_info_dict['lb'] = "No override"
|
||||
pg_info_dict['notify'] = "No override"
|
||||
pg_info_dict['failback'] = "No override"
|
||||
pg_info_dict['failover_active'] = "No override"
|
||||
pg_info_dict['failover_standby'] = "No override"
|
||||
pg_info_dict['failure_detection'] = "No override"
|
||||
|
||||
return pg_info_dict
|
||||
|
||||
def gather_host_portgroup_facts(self):
|
||||
"""Gather Port Group facts per ESXi host"""
|
||||
hosts_pg_facts = dict()
|
||||
for host in self.hosts:
|
||||
pgs = host.config.network.portgroup
|
||||
hosts_pg_facts[host.name] = []
|
||||
for portgroup in pgs:
|
||||
hosts_pg_facts[host.name].append(
|
||||
self.normalize_pg_info(portgroup_obj=portgroup, policy_facts=self.policies)
|
||||
)
|
||||
return hosts_pg_facts
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
policies=dict(type='bool', required=False, default=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
host_pg_mgr = PortgroupFactsManager(module)
|
||||
module.exit_json(changed=False, hosts_portgroup_facts=host_pg_mgr.gather_host_portgroup_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,144 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_resource_pool_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_resource_pool_info) instead.
|
||||
short_description: Gathers facts about resource pool information
|
||||
description:
|
||||
- This module can be used to gather facts about all resource configuration information.
|
||||
version_added: '2.6'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather resource pool facts about all resource pools available
|
||||
vmware_resource_pool_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
register: rp_facts
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
resource_pool_facts:
|
||||
description: metadata about resource pool configuration
|
||||
returned: on success
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"cpu_allocation_expandable_reservation": false,
|
||||
"cpu_allocation_limit": 4121,
|
||||
"cpu_allocation_overhead_limit": null,
|
||||
"cpu_allocation_reservation": 4121,
|
||||
"cpu_allocation_shares": 9000,
|
||||
"cpu_allocation_shares_level": "custom",
|
||||
"mem_allocation_expandable_reservation": false,
|
||||
"mem_allocation_limit": 961,
|
||||
"mem_allocation_overhead_limit": null,
|
||||
"mem_allocation_reservation": 961,
|
||||
"mem_allocation_shares": 9000,
|
||||
"mem_allocation_shares_level": "custom",
|
||||
"name": "Resources",
|
||||
"overall_status": "green",
|
||||
"owner": "DC0_H0",
|
||||
"runtime_cpu_max_usage": 4121,
|
||||
"runtime_cpu_overall_usage": 0,
|
||||
"runtime_cpu_reservation_used": 0,
|
||||
"runtime_cpu_reservation_used_vm": 0,
|
||||
"runtime_cpu_unreserved_for_pool": 4121,
|
||||
"runtime_cpu_unreserved_for_vm": 4121,
|
||||
"runtime_memory_max_usage": 1007681536,
|
||||
"runtime_memory_overall_usage": 0,
|
||||
"runtime_memory_reservation_used": 0,
|
||||
"runtime_memory_reservation_used_vm": 0,
|
||||
"runtime_memory_unreserved_for_pool": 1007681536,
|
||||
"runtime_memory_unreserved_for_vm": 1007681536
|
||||
},
|
||||
]
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs
|
||||
|
||||
|
||||
class ResourcePoolFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(ResourcePoolFactsManager, self).__init__(module)
|
||||
|
||||
def gather_rp_facts(self):
|
||||
resource_pool_facts = []
|
||||
rps = get_all_objs(self.content, [vim.ResourcePool])
|
||||
for rp in rps:
|
||||
tmp_facts = dict(
|
||||
name=rp.name,
|
||||
cpu_allocation_reservation=rp.config.cpuAllocation.reservation,
|
||||
cpu_allocation_expandable_reservation=rp.config.cpuAllocation.expandableReservation,
|
||||
cpu_allocation_limit=rp.config.cpuAllocation.limit,
|
||||
cpu_allocation_shares=rp.config.cpuAllocation.shares.shares,
|
||||
cpu_allocation_shares_level=rp.config.cpuAllocation.shares.level,
|
||||
cpu_allocation_overhead_limit=rp.config.cpuAllocation.overheadLimit,
|
||||
mem_allocation_reservation=rp.config.memoryAllocation.reservation,
|
||||
mem_allocation_expandable_reservation=rp.config.memoryAllocation.expandableReservation,
|
||||
mem_allocation_limit=rp.config.memoryAllocation.limit,
|
||||
mem_allocation_shares=rp.config.memoryAllocation.shares.shares,
|
||||
mem_allocation_shares_level=rp.config.memoryAllocation.shares.level,
|
||||
mem_allocation_overhead_limit=rp.config.memoryAllocation.overheadLimit,
|
||||
owner=rp.owner.name,
|
||||
overall_status=rp.summary.runtime.overallStatus,
|
||||
runtime_cpu_reservation_used=rp.summary.runtime.cpu.reservationUsed,
|
||||
runtime_cpu_reservation_used_vm=rp.summary.runtime.cpu.reservationUsedForVm,
|
||||
runtime_cpu_unreserved_for_pool=rp.summary.runtime.cpu.unreservedForPool,
|
||||
runtime_cpu_unreserved_for_vm=rp.summary.runtime.cpu.unreservedForVm,
|
||||
runtime_cpu_overall_usage=rp.summary.runtime.cpu.overallUsage,
|
||||
runtime_cpu_max_usage=rp.summary.runtime.cpu.maxUsage,
|
||||
runtime_memory_reservation_used=rp.summary.runtime.memory.reservationUsed,
|
||||
runtime_memory_reservation_used_vm=rp.summary.runtime.memory.reservationUsedForVm,
|
||||
runtime_memory_unreserved_for_pool=rp.summary.runtime.memory.unreservedForPool,
|
||||
runtime_memory_unreserved_for_vm=rp.summary.runtime.memory.unreservedForVm,
|
||||
runtime_memory_overall_usage=rp.summary.runtime.memory.overallUsage,
|
||||
runtime_memory_max_usage=rp.summary.runtime.memory.maxUsage,
|
||||
)
|
||||
|
||||
resource_pool_facts.append(tmp_facts)
|
||||
return resource_pool_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
vmware_rp_mgr = ResourcePoolFactsManager(module)
|
||||
module.exit_json(changed=False, resource_pool_facts=vmware_rp_mgr.gather_rp_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1 +0,0 @@
|
||||
vmware_tag_info.py
|
@ -1,189 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_target_canonical_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_target_canonical_info) instead.
|
||||
short_description: Return canonical (NAA) from an ESXi host system
|
||||
description:
|
||||
- This module can be used to gather facts about canonical (NAA) from an ESXi host based on SCSI target ID.
|
||||
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
requirements:
|
||||
- Tested on vSphere 5.5 and 6.5
|
||||
- PyVmomi installed
|
||||
options:
|
||||
target_id:
|
||||
description:
|
||||
- The target id based on order of scsi device.
|
||||
- version 2.6 onwards, this parameter is optional.
|
||||
required: False
|
||||
type: int
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- Facts about all SCSI devices for all host system in the given cluster is returned.
|
||||
- This parameter is required, if C(esxi_hostname) is not provided.
|
||||
version_added: 2.6
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- Name of the ESXi host system.
|
||||
- Facts about all SCSI devices for the given ESXi host system is returned.
|
||||
- This parameter is required, if C(cluster_name) is not provided.
|
||||
version_added: 2.6
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get Canonical name of particular target on particular ESXi host system
|
||||
vmware_target_canonical_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
target_id: 7
|
||||
esxi_hostname: esxi_hostname
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Get Canonical name of all target on particular ESXi host system
|
||||
vmware_target_canonical_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Get Canonical name of all ESXi hostname on particular Cluster
|
||||
vmware_target_canonical_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r"""
|
||||
canonical:
|
||||
description: metadata about SCSI Target device
|
||||
returned: if host system and target id is given
|
||||
type: str
|
||||
sample: "mpx.vmhba0:C0:T0:L0"
|
||||
|
||||
scsi_tgt_facts:
|
||||
description: metadata about all SCSI Target devices
|
||||
returned: if host system or cluster is given
|
||||
type: dict
|
||||
sample: {
|
||||
"DC0_C0_H0": {
|
||||
"scsilun_canonical": {
|
||||
"key-vim.host.ScsiDisk-0000000000766d686261303a303a30": "mpx.vmhba0:C0:T0:L0",
|
||||
"key-vim.host.ScsiLun-0005000000766d686261313a303a30": "mpx.vmhba1:C0:T0:L0"
|
||||
},
|
||||
"target_lun_uuid": {
|
||||
"0": "key-vim.host.ScsiDisk-0000000000766d686261303a303a30"
|
||||
}
|
||||
},
|
||||
"DC0_C0_H1": {
|
||||
"scsilun_canonical": {
|
||||
"key-vim.host.ScsiDisk-0000000000766d686261303a303a30": "mpx.vmhba0:C0:T0:L0",
|
||||
"key-vim.host.ScsiLun-0005000000766d686261313a303a30": "mpx.vmhba1:C0:T0:L0"
|
||||
},
|
||||
"target_lun_uuid": {
|
||||
"0": "key-vim.host.ScsiDisk-0000000000766d686261303a303a30"
|
||||
}
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
|
||||
class ScsiTargetFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(ScsiTargetFactsManager, self).__init__(module)
|
||||
cluster_name = self.module.params.get('cluster_name')
|
||||
self.esxi_hostname = self.module.params.get('esxi_hostname')
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=self.esxi_hostname)
|
||||
|
||||
def gather_scsi_device_facts(self):
|
||||
"""
|
||||
Function to gather facts about SCSI target devices
|
||||
|
||||
"""
|
||||
scsi_tgt_facts = {}
|
||||
target_lun_uuid = {}
|
||||
scsilun_canonical = {}
|
||||
target_id = self.module.params['target_id']
|
||||
|
||||
for host in self.hosts:
|
||||
# Associate the scsiLun key with the canonicalName (NAA)
|
||||
for scsilun in host.config.storageDevice.scsiLun:
|
||||
scsilun_canonical[scsilun.key] = scsilun.canonicalName
|
||||
|
||||
# Associate target number with LUN uuid
|
||||
for target in host.config.storageDevice.scsiTopology.adapter[0].target:
|
||||
for lun in target.lun:
|
||||
target_lun_uuid[target.target] = lun.scsiLun
|
||||
|
||||
scsi_tgt_facts[host.name] = dict(scsilun_canonical=scsilun_canonical,
|
||||
target_lun_uuid=target_lun_uuid)
|
||||
|
||||
if target_id is not None and self.esxi_hostname is not None:
|
||||
canonical = ''
|
||||
temp_lun_data = scsi_tgt_facts[self.esxi_hostname]['target_lun_uuid']
|
||||
if self.esxi_hostname in scsi_tgt_facts and \
|
||||
target_id in temp_lun_data:
|
||||
temp_scsi_data = scsi_tgt_facts[self.esxi_hostname]['scsilun_canonical']
|
||||
temp_target = temp_lun_data[target_id]
|
||||
canonical = temp_scsi_data[temp_target]
|
||||
self.module.exit_json(changed=False, canonical=canonical)
|
||||
|
||||
self.module.exit_json(changed=False, scsi_tgt_facts=scsi_tgt_facts)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
target_id=dict(required=False, type='int'),
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
scsi_tgt_manager = ScsiTargetFactsManager(module)
|
||||
scsi_tgt_manager.gather_scsi_device_facts()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1 +0,0 @@
|
||||
vmware_vm_info.py
|
@ -1,207 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_vmkernel_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_vmkernel_info) instead.
|
||||
short_description: Gathers VMKernel facts about an ESXi host
|
||||
description:
|
||||
- This module can be used to gather VMKernel facts about an ESXi host from given ESXi hostname or cluster name.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- VMKernel facts about each ESXi server will be returned for the given cluster.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname.
|
||||
- VMKernel facts about this ESXi server will be returned.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather VMKernel facts about all ESXi Host in given Cluster
|
||||
vmware_vmkernel_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: cluster_name
|
||||
delegate_to: localhost
|
||||
register: cluster_host_vmks
|
||||
|
||||
- name: Gather VMKernel facts about ESXi Host
|
||||
vmware_vmkernel_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
register: host_vmks
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
host_vmk_facts:
|
||||
description: metadata about VMKernel present on given host system
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"10.76.33.208": [
|
||||
{
|
||||
"device": "vmk0",
|
||||
"dhcp": true,
|
||||
"enable_ft": false,
|
||||
"enable_management": true,
|
||||
"enable_vmotion": false,
|
||||
"enable_vsan": false,
|
||||
"ipv4_address": "10.76.33.28",
|
||||
"ipv4_subnet_mask": "255.255.255.0",
|
||||
"key": "key-vim.host.VirtualNic-vmk0",
|
||||
"mac": "52:54:00:12:50:ce",
|
||||
"mtu": 1500,
|
||||
"portgroup": "Management Network",
|
||||
"stack": "defaultTcpipStack"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VmkernelFactsManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmkernelFactsManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
self.service_type_vmks = dict()
|
||||
self.get_all_vmks_by_service_type()
|
||||
|
||||
def get_all_vmks_by_service_type(self):
|
||||
"""
|
||||
Function to return information about service types and VMKernel
|
||||
|
||||
"""
|
||||
for host in self.hosts:
|
||||
self.service_type_vmks[host.name] = dict(vmotion=[], vsan=[], management=[], faultToleranceLogging=[])
|
||||
for service_type in self.service_type_vmks[host.name].keys():
|
||||
vmks_list = self.query_service_type_for_vmks(host, service_type)
|
||||
self.service_type_vmks[host.name][service_type] = vmks_list
|
||||
|
||||
def query_service_type_for_vmks(self, host_system, service_type):
|
||||
"""
|
||||
Function to return list of VMKernels
|
||||
Args:
|
||||
host_system: Host system managed object
|
||||
service_type: Name of service type
|
||||
|
||||
Returns: List of VMKernel which belongs to that service type
|
||||
|
||||
"""
|
||||
vmks_list = []
|
||||
query = None
|
||||
try:
|
||||
query = host_system.configManager.virtualNicManager.QueryNetConfig(service_type)
|
||||
except vim.fault.HostConfigFault as config_fault:
|
||||
self.module.fail_json(msg="Failed to get all VMKs for service type %s due to"
|
||||
" host config fault : %s" % (service_type, to_native(config_fault.msg)))
|
||||
except vmodl.fault.InvalidArgument as invalid_argument:
|
||||
self.module.fail_json(msg="Failed to get all VMKs for service type %s due to"
|
||||
" invalid arguments : %s" % (service_type, to_native(invalid_argument.msg)))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to get all VMKs for service type %s due to"
|
||||
"%s" % (service_type, to_native(e)))
|
||||
|
||||
if not query.selectedVnic:
|
||||
return vmks_list
|
||||
selected_vnics = [vnic for vnic in query.selectedVnic]
|
||||
vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in selected_vnics]
|
||||
return vnics_with_service_type
|
||||
|
||||
def gather_host_vmk_facts(self):
|
||||
hosts_facts = {}
|
||||
|
||||
for host in self.hosts:
|
||||
host_vmk_facts = []
|
||||
host_network_system = host.config.network
|
||||
if host_network_system:
|
||||
vmks_config = host.config.network.vnic
|
||||
for vmk in vmks_config:
|
||||
host_vmk_facts.append(dict(
|
||||
device=vmk.device,
|
||||
key=vmk.key,
|
||||
portgroup=vmk.portgroup,
|
||||
ipv4_address=vmk.spec.ip.ipAddress,
|
||||
ipv4_subnet_mask=vmk.spec.ip.subnetMask,
|
||||
dhcp=vmk.spec.ip.dhcp,
|
||||
mac=vmk.spec.mac,
|
||||
mtu=vmk.spec.mtu,
|
||||
stack=vmk.spec.netStackInstanceKey,
|
||||
enable_vsan=vmk.device in self.service_type_vmks[host.name]['vsan'],
|
||||
enable_vmotion=vmk.device in self.service_type_vmks[host.name]['vmotion'],
|
||||
enable_management=vmk.device in self.service_type_vmks[host.name]['management'],
|
||||
enable_ft=vmk.device in self.service_type_vmks[host.name]['faultToleranceLogging'],
|
||||
)
|
||||
)
|
||||
hosts_facts[host.name] = host_vmk_facts
|
||||
return hosts_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
vmware_vmk_config = VmkernelFactsManager(module)
|
||||
module.exit_json(changed=False, host_vmk_facts=vmware_vmk_config.gather_host_vmk_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,161 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['deprecated'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_vswitch_facts
|
||||
deprecated:
|
||||
removed_in: '2.13'
|
||||
why: Deprecated in favour of C(_info) module.
|
||||
alternative: Use M(vmware_vswitch_info) instead.
|
||||
short_description: Gathers facts about an ESXi host's vswitch configurations
|
||||
description:
|
||||
- This module can be used to gather facts about an ESXi host's vswitch configurations when ESXi hostname or Cluster name is given.
|
||||
- The vSphere Client shows the value for the number of ports as elastic from vSphere 5.5 and above.
|
||||
- Other tools like esxcli might show the number of ports as 1536 or 5632.
|
||||
- See U(https://kb.vmware.com/s/article/2064511) for more details.
|
||||
version_added: '2.6'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- Facts about vswitch belonging to every ESXi host systems under this cluster will be returned.
|
||||
- If C(esxi_hostname) is not given, this parameter is required.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- ESXi hostname to gather facts from.
|
||||
- If C(cluster_name) is not given, this parameter is required.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather vswitch facts about all ESXi Host in given Cluster
|
||||
vmware_vswitch_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
delegate_to: localhost
|
||||
register: all_hosts_vswitch_facts
|
||||
|
||||
- name: Gather firewall facts about ESXi Host
|
||||
vmware_vswitch_facts:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
delegate_to: localhost
|
||||
register: all_vswitch_facts
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
hosts_vswitch_facts:
|
||||
description: metadata about host's vswitch configuration
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"10.76.33.218": {
|
||||
"vSwitch0": {
|
||||
"mtu": 1500,
|
||||
"num_ports": 128,
|
||||
"pnics": [
|
||||
"vmnic0"
|
||||
]
|
||||
},
|
||||
"vSwitch_0011": {
|
||||
"mtu": 1500,
|
||||
"num_ports": 128,
|
||||
"pnics": [
|
||||
"vmnic2",
|
||||
"vmnic1"
|
||||
]
|
||||
},
|
||||
},
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VswitchFactsManager(PyVmomi):
|
||||
"""Class to gather vSwitch facts"""
|
||||
def __init__(self, module):
|
||||
super(VswitchFactsManager, self).__init__(module)
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
esxi_host_name = self.params.get('esxi_hostname', None)
|
||||
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
|
||||
if not self.hosts:
|
||||
self.module.fail_json(msg="Failed to find host system.")
|
||||
|
||||
@staticmethod
|
||||
def serialize_pnics(vswitch_obj):
|
||||
"""Get pnic names"""
|
||||
pnics = []
|
||||
for pnic in vswitch_obj.pnic:
|
||||
# vSwitch contains all PNICs as string in format of 'key-vim.host.PhysicalNic-vmnic0'
|
||||
pnics.append(pnic.split("-", 3)[-1])
|
||||
return pnics
|
||||
|
||||
def gather_vswitch_facts(self):
|
||||
"""Gather vSwitch facts"""
|
||||
hosts_vswitch_facts = dict()
|
||||
for host in self.hosts:
|
||||
network_manager = host.configManager.networkSystem
|
||||
if network_manager:
|
||||
temp_switch_dict = dict()
|
||||
for available_vswitch in network_manager.networkInfo.vswitch:
|
||||
temp_switch_dict[available_vswitch.name] = dict(
|
||||
pnics=self.serialize_pnics(available_vswitch),
|
||||
mtu=available_vswitch.mtu,
|
||||
# we need to use the spec to get the ports
|
||||
# otherwise, the output might be different compared to the vswitch config module
|
||||
# (e.g. 5632 ports instead of 128)
|
||||
num_ports=available_vswitch.spec.numPorts
|
||||
)
|
||||
hosts_vswitch_facts[host.name] = temp_switch_dict
|
||||
return hosts_vswitch_facts
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
cluster_name=dict(type='str', required=False),
|
||||
esxi_hostname=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'esxi_hostname'],
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
vmware_vswitch_mgr = VswitchFactsManager(module)
|
||||
module.exit_json(changed=False, hosts_vswitch_facts=vmware_vswitch_mgr.gather_vswitch_facts())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,247 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright: (c) 2015, VMware, Inc. All Rights Reserved.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vca_fw
|
||||
short_description: add remove firewall rules in a gateway in a vca
|
||||
description:
|
||||
- Adds or removes firewall rules from a gateway in a vca environment
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- Peter Sprygada (@privateip)
|
||||
options:
|
||||
fw_rules:
|
||||
description:
|
||||
- A list of firewall rules to be added to the gateway, Please see examples on valid entries
|
||||
required: True
|
||||
default: false
|
||||
extends_documentation_fragment: vca.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
#Add a set of firewall rules
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- vca_fw:
|
||||
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
|
||||
vdc_name: 'benz_ansible'
|
||||
state: 'absent'
|
||||
fw_rules:
|
||||
- description: "ben testing"
|
||||
source_ip: "Any"
|
||||
dest_ip: 192.0.2.23
|
||||
- description: "ben testing 2"
|
||||
source_ip: 192.0.2.50
|
||||
source_port: "Any"
|
||||
dest_port: "22"
|
||||
dest_ip: 192.0.2.101
|
||||
is_enable: "true"
|
||||
enable_logging: "false"
|
||||
protocol: "Tcp"
|
||||
policy: "allow"
|
||||
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType
|
||||
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType
|
||||
except ImportError:
|
||||
# normally set a flag here but it will be caught when testing for
|
||||
# the existence of pyvcloud (see module_utils/vca.py). This just
|
||||
# protects against generating an exception at runtime
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vca import VcaError, vca_argument_spec, vca_login
|
||||
|
||||
|
||||
VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any']
|
||||
VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description',
|
||||
'dest_ip', 'dest_port', 'source_ip', 'source_port',
|
||||
'protocol']
|
||||
|
||||
|
||||
def protocol_to_tuple(protocol):
|
||||
return (protocol.get_Tcp(),
|
||||
protocol.get_Udp(),
|
||||
protocol.get_Icmp(),
|
||||
protocol.get_Other(),
|
||||
protocol.get_Any())
|
||||
|
||||
|
||||
def protocol_to_string(protocol):
|
||||
protocol = protocol_to_tuple(protocol)
|
||||
if protocol[0] is True:
|
||||
return 'Tcp'
|
||||
elif protocol[1] is True:
|
||||
return 'Udp'
|
||||
elif protocol[2] is True:
|
||||
return 'Icmp'
|
||||
elif protocol[3] is True:
|
||||
return 'Other'
|
||||
elif protocol[4] is True:
|
||||
return 'Any'
|
||||
|
||||
|
||||
def protocol_to_type(protocol):
|
||||
try:
|
||||
protocols = ProtocolsType()
|
||||
setattr(protocols, protocol, True)
|
||||
return protocols
|
||||
except AttributeError:
|
||||
raise VcaError("The value in protocol is not valid")
|
||||
|
||||
|
||||
def validate_fw_rules(fw_rules):
|
||||
for rule in fw_rules:
|
||||
for k in rule.keys():
|
||||
if k not in VALID_RULE_KEYS:
|
||||
raise VcaError("%s is not a valid key in fw rules, please "
|
||||
"check above.." % k, valid_keys=VALID_RULE_KEYS)
|
||||
|
||||
rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower()
|
||||
rule['dest_ip'] = rule.get('dest_ip', 'Any').lower()
|
||||
rule['source_port'] = str(rule.get('source_port', 'Any')).lower()
|
||||
rule['source_ip'] = rule.get('source_ip', 'Any').lower()
|
||||
rule['protocol'] = rule.get('protocol', 'Any').lower()
|
||||
rule['policy'] = rule.get('policy', 'allow').lower()
|
||||
rule['is_enable'] = rule.get('is_enable', True)
|
||||
rule['enable_logging'] = rule.get('enable_logging', False)
|
||||
rule['description'] = rule.get('description', 'rule added by Ansible')
|
||||
|
||||
return fw_rules
|
||||
|
||||
|
||||
def fw_rules_to_dict(rules):
|
||||
fw_rules = list()
|
||||
for rule in rules:
|
||||
fw_rules.append(
|
||||
dict(
|
||||
dest_port=rule.get_DestinationPortRange().lower(),
|
||||
dest_ip=rule.get_DestinationIp().lower().lower(),
|
||||
source_port=rule.get_SourcePortRange().lower(),
|
||||
source_ip=rule.get_SourceIp().lower(),
|
||||
protocol=protocol_to_string(rule.get_Protocols()).lower(),
|
||||
policy=rule.get_Policy().lower(),
|
||||
is_enable=rule.get_IsEnabled(),
|
||||
enable_logging=rule.get_EnableLogging(),
|
||||
description=rule.get_Description()
|
||||
)
|
||||
)
|
||||
return fw_rules
|
||||
|
||||
|
||||
def create_fw_rule(is_enable, description, policy, protocol, dest_port,
|
||||
dest_ip, source_port, source_ip, enable_logging):
|
||||
|
||||
return FirewallRuleType(IsEnabled=is_enable,
|
||||
Description=description,
|
||||
Policy=policy,
|
||||
Protocols=protocol_to_type(protocol),
|
||||
DestinationPortRange=dest_port,
|
||||
DestinationIp=dest_ip,
|
||||
SourcePortRange=source_port,
|
||||
SourceIp=source_ip,
|
||||
EnableLogging=enable_logging)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vca_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
fw_rules=dict(required=True, type='list'),
|
||||
gateway_name=dict(default='gateway'),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec, supports_check_mode=True)
|
||||
|
||||
fw_rules = module.params.get('fw_rules')
|
||||
gateway_name = module.params.get('gateway_name')
|
||||
vdc_name = module.params['vdc_name']
|
||||
|
||||
vca = vca_login(module)
|
||||
|
||||
gateway = vca.get_gateway(vdc_name, gateway_name)
|
||||
if not gateway:
|
||||
module.fail_json(msg="Not able to find the gateway %s, please check "
|
||||
"the gateway_name param" % gateway_name)
|
||||
|
||||
fwservice = gateway._getFirewallService()
|
||||
|
||||
rules = gateway.get_fw_rules()
|
||||
current_rules = fw_rules_to_dict(rules)
|
||||
|
||||
try:
|
||||
desired_rules = validate_fw_rules(fw_rules)
|
||||
except VcaError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
result = dict(changed=False)
|
||||
result['current_rules'] = current_rules
|
||||
result['desired_rules'] = desired_rules
|
||||
|
||||
updates = list()
|
||||
additions = list()
|
||||
deletions = list()
|
||||
|
||||
for (index, rule) in enumerate(desired_rules):
|
||||
try:
|
||||
if rule != current_rules[index]:
|
||||
updates.append((index, rule))
|
||||
except IndexError:
|
||||
additions.append(rule)
|
||||
|
||||
eol = len(current_rules) - len(desired_rules)
|
||||
if eol > 0:
|
||||
for rule in current_rules[eol:]:
|
||||
deletions.append(rule)
|
||||
|
||||
for rule in additions:
|
||||
if not module.check_mode:
|
||||
rule['protocol'] = rule['protocol'].capitalize()
|
||||
gateway.add_fw_rule(**rule)
|
||||
result['changed'] = True
|
||||
|
||||
for index, rule in updates:
|
||||
if not module.check_mode:
|
||||
rule = create_fw_rule(**rule)
|
||||
fwservice.replace_FirewallRule_at(index, rule)
|
||||
result['changed'] = True
|
||||
|
||||
keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip']
|
||||
for rule in deletions:
|
||||
if not module.check_mode:
|
||||
kwargs = dict([(k, v) for k, v in rule.items() if k in keys])
|
||||
kwargs['protocol'] = protocol_to_string(kwargs['protocol'])
|
||||
gateway.delete_fw_rule(**kwargs)
|
||||
result['changed'] = True
|
||||
|
||||
if not module.check_mode and result['changed'] is True:
|
||||
task = gateway.save_services_configuration()
|
||||
if task:
|
||||
vca.block_until_completed(task)
|
||||
|
||||
result['rules_updated'] = len(updates)
|
||||
result['rules_added'] = len(additions)
|
||||
result['rules_deleted'] = len(deletions)
|
||||
|
||||
return module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,205 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright: (c) 2015, VMware, Inc. All Rights Reserved.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vca_nat
|
||||
short_description: add remove nat rules in a gateway in a vca
|
||||
description:
|
||||
- Adds or removes nat rules from a gateway in a vca environment
|
||||
version_added: "2.0"
|
||||
author: Peter Sprygada (@privateip)
|
||||
options:
|
||||
purge_rules:
|
||||
description:
|
||||
- If set to true, it will delete all rules in the gateway that are not given as parameter to this module.
|
||||
type: bool
|
||||
default: false
|
||||
nat_rules:
|
||||
description:
|
||||
- A list of rules to be added to the gateway, Please see examples on valid entries
|
||||
required: True
|
||||
default: false
|
||||
extends_documentation_fragment: vca.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
#An example for a source nat
|
||||
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- vca_nat:
|
||||
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
|
||||
vdc_name: 'benz_ansible'
|
||||
state: 'present'
|
||||
nat_rules:
|
||||
- rule_type: SNAT
|
||||
original_ip: 192.0.2.42
|
||||
translated_ip: 203.0.113.23
|
||||
|
||||
#example for a DNAT
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- vca_nat:
|
||||
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
|
||||
vdc_name: 'benz_ansible'
|
||||
state: 'present'
|
||||
nat_rules:
|
||||
- rule_type: DNAT
|
||||
original_ip: 203.0.113.23
|
||||
original_port: 22
|
||||
translated_ip: 192.0.2.42
|
||||
translated_port: 22
|
||||
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vca import VcaError, vca_argument_spec, vca_login
|
||||
|
||||
|
||||
VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port',
|
||||
'translated_ip', 'translated_port', 'protocol']
|
||||
|
||||
|
||||
def validate_nat_rules(nat_rules):
|
||||
for rule in nat_rules:
|
||||
if not isinstance(rule, dict):
|
||||
raise VcaError("nat rules must be a list of dictionaries, "
|
||||
"Please check", valid_keys=VALID_RULE_KEYS)
|
||||
|
||||
for k in rule.keys():
|
||||
if k not in VALID_RULE_KEYS:
|
||||
raise VcaError("%s is not a valid key in nat rules, please "
|
||||
"check above.." % k, valid_keys=VALID_RULE_KEYS)
|
||||
|
||||
rule['original_port'] = str(rule.get('original_port', 'any')).lower()
|
||||
rule['original_ip'] = rule.get('original_ip', 'any').lower()
|
||||
rule['translated_ip'] = rule.get('translated_ip', 'any').lower()
|
||||
rule['translated_port'] = str(rule.get('translated_port', 'any')).lower()
|
||||
rule['protocol'] = rule.get('protocol', 'any').lower()
|
||||
rule['rule_type'] = rule.get('rule_type', 'DNAT').lower()
|
||||
|
||||
return nat_rules
|
||||
|
||||
|
||||
def nat_rules_to_dict(nat_rules):
|
||||
result = []
|
||||
for rule in nat_rules:
|
||||
gw_rule = rule.get_GatewayNatRule()
|
||||
result.append(
|
||||
dict(
|
||||
rule_type=rule.get_RuleType().lower(),
|
||||
original_ip=gw_rule.get_OriginalIp().lower(),
|
||||
original_port=(gw_rule.get_OriginalPort().lower() or 'any'),
|
||||
translated_ip=gw_rule.get_TranslatedIp().lower(),
|
||||
translated_port=(gw_rule.get_TranslatedPort().lower() or 'any'),
|
||||
protocol=(gw_rule.get_Protocol().lower() or 'any')
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def rule_to_string(rule):
|
||||
strings = list()
|
||||
for key, value in rule.items():
|
||||
strings.append('%s=%s' % (key, value))
|
||||
return ', '.join(strings)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vca_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
nat_rules=dict(type='list', default=[]),
|
||||
gateway_name=dict(default='gateway'),
|
||||
purge_rules=dict(default=False, type='bool'),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec, supports_check_mode=True)
|
||||
|
||||
vdc_name = module.params.get('vdc_name')
|
||||
nat_rules = module.params['nat_rules']
|
||||
gateway_name = module.params['gateway_name']
|
||||
purge_rules = module.params['purge_rules']
|
||||
|
||||
if not purge_rules and not nat_rules:
|
||||
module.fail_json(msg='Must define purge_rules or nat_rules')
|
||||
|
||||
vca = vca_login(module)
|
||||
|
||||
gateway = vca.get_gateway(vdc_name, gateway_name)
|
||||
if not gateway:
|
||||
module.fail_json(msg="Not able to find the gateway %s, please check "
|
||||
"the gateway_name param" % gateway_name)
|
||||
|
||||
try:
|
||||
desired_rules = validate_nat_rules(nat_rules)
|
||||
except VcaError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
rules = gateway.get_nat_rules()
|
||||
|
||||
result = dict(changed=False, rules_purged=0)
|
||||
|
||||
deletions = 0
|
||||
additions = 0
|
||||
|
||||
if purge_rules is True and len(rules) > 0:
|
||||
result['rules_purged'] = len(rules)
|
||||
deletions = result['rules_purged']
|
||||
rules = list()
|
||||
if not module.check_mode:
|
||||
gateway.del_all_nat_rules()
|
||||
task = gateway.save_services_configuration()
|
||||
vca.block_until_completed(task)
|
||||
rules = gateway.get_nat_rules()
|
||||
result['changed'] = True
|
||||
|
||||
current_rules = nat_rules_to_dict(rules)
|
||||
|
||||
result['current_rules'] = current_rules
|
||||
result['desired_rules'] = desired_rules
|
||||
|
||||
for rule in desired_rules:
|
||||
if rule not in current_rules:
|
||||
additions += 1
|
||||
if not module.check_mode:
|
||||
gateway.add_nat_rule(**rule)
|
||||
result['changed'] = True
|
||||
result['rules_added'] = additions
|
||||
|
||||
result['delete_rule'] = list()
|
||||
result['delete_rule_rc'] = list()
|
||||
for rule in current_rules:
|
||||
if rule not in desired_rules:
|
||||
deletions += 1
|
||||
if not module.check_mode:
|
||||
result['delete_rule'].append(rule)
|
||||
rc = gateway.del_nat_rule(**rule)
|
||||
result['delete_rule_rc'].append(rc)
|
||||
result['changed'] = True
|
||||
result['rules_deleted'] = deletions
|
||||
|
||||
if not module.check_mode and (additions > 0 or deletions > 0):
|
||||
task = gateway.save_services_configuration()
|
||||
vca.block_until_completed(task)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,351 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright: (c) 2015, Ansible, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vca_vapp
|
||||
short_description: Manages vCloud Air vApp instances.
|
||||
description:
|
||||
- This module will actively managed vCloud Air vApp instances. Instances
|
||||
can be created and deleted as well as both deployed and undeployed.
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- Peter Sprygada (@privateip)
|
||||
notes:
|
||||
- VMware sold their vCloud Air service in Q2 2017.
|
||||
- VMware made significant changes to the pyvcloud interface around this time. The C(vca_vapp) module relies on now deprecated code.
|
||||
- Mileage with C(vca_vapp) may vary as vCloud Director APIs advance.
|
||||
- A viable alternative maybe U(https://github.com/vmware/ansible-module-vcloud-director)
|
||||
requirements:
|
||||
- pyvcloud <= 18.2.2
|
||||
options:
|
||||
vapp_name:
|
||||
description:
|
||||
- The name of the vCloud Air vApp instance
|
||||
required: yes
|
||||
template_name:
|
||||
description:
|
||||
- The name of the vApp template to use to create the vApp instance. If
|
||||
the I(state) is not `absent` then the I(template_name) value must be
|
||||
provided. The I(template_name) must be previously uploaded to the
|
||||
catalog specified by I(catalog_name)
|
||||
network_name:
|
||||
description:
|
||||
- The name of the network that should be attached to the virtual machine
|
||||
in the vApp. The virtual network specified must already be created in
|
||||
the vCloud Air VDC. If the I(state) is not 'absent' then the
|
||||
I(network_name) argument must be provided.
|
||||
network_mode:
|
||||
description:
|
||||
- Configures the mode of the network connection.
|
||||
default: pool
|
||||
choices: ['pool', 'dhcp', 'static']
|
||||
vm_name:
|
||||
description:
|
||||
- The name of the virtual machine instance in the vApp to manage.
|
||||
vm_cpus:
|
||||
description:
|
||||
- The number of vCPUs to configure for the VM in the vApp. If the
|
||||
I(vm_name) argument is provided, then this becomes a per VM setting
|
||||
otherwise it is applied to all VMs in the vApp.
|
||||
vm_memory:
|
||||
description:
|
||||
- The amount of memory in MB to allocate to VMs in the vApp. If the
|
||||
I(vm_name) argument is provided, then this becomes a per VM setting
|
||||
otherwise it is applied to all VMs in the vApp.
|
||||
operation:
|
||||
description:
|
||||
- Specifies an operation to be performed on the vApp.
|
||||
default: noop
|
||||
choices: ['noop', 'poweron', 'poweroff', 'suspend', 'shutdown', 'reboot', 'reset']
|
||||
state:
|
||||
description:
|
||||
- Configures the state of the vApp.
|
||||
default: present
|
||||
choices: ['present', 'absent', 'deployed', 'undeployed']
|
||||
username:
|
||||
description:
|
||||
- The vCloud Air username to use during authentication
|
||||
password:
|
||||
description:
|
||||
- The vCloud Air password to use during authentication
|
||||
org:
|
||||
description:
|
||||
- The org to login to for creating vapp, mostly set when the service_type is vdc.
|
||||
instance_id:
|
||||
description:
|
||||
- The instance id in a vchs environment to be used for creating the vapp
|
||||
host:
|
||||
description:
|
||||
- The authentication host to be used when service type is vcd.
|
||||
api_version:
|
||||
description:
|
||||
- The api version to be used with the vca
|
||||
default: "5.7"
|
||||
service_type:
|
||||
description:
|
||||
- The type of service we are authenticating against
|
||||
default: vca
|
||||
choices: [ "vca", "vchs", "vcd" ]
|
||||
vdc_name:
|
||||
description:
|
||||
- The name of the virtual data center (VDC) where the vm should be created or contains the vAPP.
|
||||
extends_documentation_fragment: vca
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Creates a new vApp in a VCA instance
|
||||
vca_vapp:
|
||||
vapp_name: tower
|
||||
state: present
|
||||
template_name: 'Ubuntu Server 12.04 LTS (amd64 20150127)'
|
||||
vdc_name: VDC1
|
||||
instance_id: '<your instance id here>'
|
||||
username: '<your username here>'
|
||||
password: '<your password here>'
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
from ansible.module_utils.vca import VcaAnsibleModule, VcaError
|
||||
|
||||
DEFAULT_VAPP_OPERATION = 'noop'
|
||||
|
||||
VAPP_STATUS = {
|
||||
'Powered off': 'poweroff',
|
||||
'Powered on': 'poweron',
|
||||
'Suspended': 'suspend'
|
||||
}
|
||||
|
||||
VAPP_STATES = ['present', 'absent', 'deployed', 'undeployed']
|
||||
VAPP_OPERATIONS = ['poweron', 'poweroff', 'suspend', 'shutdown',
|
||||
'reboot', 'reset', 'noop']
|
||||
|
||||
|
||||
def get_instance(module):
|
||||
vapp_name = module.params['vapp_name']
|
||||
inst = dict(vapp_name=vapp_name, state='absent')
|
||||
try:
|
||||
vapp = module.get_vapp(vapp_name)
|
||||
if vapp:
|
||||
status = module.vca.get_status(vapp.me.get_status())
|
||||
inst['status'] = VAPP_STATUS.get(status, 'unknown')
|
||||
inst['state'] = 'deployed' if vapp.me.deployed else 'undeployed'
|
||||
return inst
|
||||
except VcaError:
|
||||
return inst
|
||||
|
||||
|
||||
def create(module):
|
||||
vdc_name = module.params['vdc_name']
|
||||
vapp_name = module.params['vapp_name']
|
||||
template_name = module.params['template_name']
|
||||
catalog_name = module.params['catalog_name']
|
||||
network_name = module.params['network_name']
|
||||
network_mode = module.params['network_mode']
|
||||
vm_name = module.params['vm_name']
|
||||
vm_cpus = module.params['vm_cpus']
|
||||
vm_memory = module.params['vm_memory']
|
||||
deploy = module.params['state'] == 'deploy'
|
||||
poweron = module.params['operation'] == 'poweron'
|
||||
|
||||
task = module.vca.create_vapp(vdc_name, vapp_name, template_name,
|
||||
catalog_name, network_name, 'bridged',
|
||||
vm_name, vm_cpus, vm_memory, deploy, poweron)
|
||||
|
||||
if task is False:
|
||||
module.fail('Failed to create vapp: %s' % vapp_name)
|
||||
|
||||
module.vca.block_until_completed(task)
|
||||
|
||||
# Connect the network to the Vapp/VM and return assigned IP
|
||||
if network_name is not None:
|
||||
vm_ip = connect_to_network(module, vdc_name, vapp_name, network_name, network_mode)
|
||||
return vm_ip
|
||||
|
||||
|
||||
def delete(module):
|
||||
vdc_name = module.params['vdc_name']
|
||||
vapp_name = module.params['vapp_name']
|
||||
module.vca.delete_vapp(vdc_name, vapp_name)
|
||||
|
||||
|
||||
def do_operation(module):
|
||||
vapp_name = module.params['vapp_name']
|
||||
operation = module.params['operation']
|
||||
|
||||
vm_name = module.params.get('vm_name')
|
||||
vm = None
|
||||
if vm_name:
|
||||
vm = module.get_vm(vapp_name, vm_name)
|
||||
|
||||
if operation == 'poweron':
|
||||
operation = 'powerOn'
|
||||
elif operation == 'poweroff':
|
||||
operation = 'powerOff'
|
||||
|
||||
cmd = 'power:%s' % operation
|
||||
module.get_vapp(vapp_name).execute(cmd, 'post', targetVM=vm)
|
||||
|
||||
|
||||
def set_state(module):
|
||||
state = module.params['state']
|
||||
vapp = module.get_vapp(module.params['vapp_name'])
|
||||
if state == 'deployed':
|
||||
action = module.params['operation'] == 'poweron'
|
||||
if not vapp.deploy(action):
|
||||
module.fail('unable to deploy vapp')
|
||||
elif state == 'undeployed':
|
||||
action = module.params['operation']
|
||||
if action == 'poweroff':
|
||||
action = 'powerOff'
|
||||
elif action != 'suspend':
|
||||
action = None
|
||||
if not vapp.undeploy(action):
|
||||
module.fail('unable to undeploy vapp')
|
||||
|
||||
|
||||
def connect_to_network(module, vdc_name, vapp_name, network_name, network_mode):
|
||||
nets = filter(lambda n: n.name == network_name, module.vca.get_networks(vdc_name))
|
||||
if len(nets) != 1:
|
||||
module.fail_json("Unable to find network %s " % network_name)
|
||||
|
||||
the_vdc = module.vca.get_vdc(vdc_name)
|
||||
the_vapp = module.vca.get_vapp(the_vdc, vapp_name)
|
||||
|
||||
if the_vapp and the_vapp.name != vapp_name:
|
||||
module.fail_json(msg="Failed to find vapp named %s" % the_vapp.name)
|
||||
|
||||
# Connect vApp
|
||||
task = the_vapp.connect_to_network(nets[0].name, nets[0].href)
|
||||
result = module.vca.block_until_completed(task)
|
||||
|
||||
if result is None:
|
||||
module.fail_json(msg="Failed to complete task")
|
||||
|
||||
# Connect VM
|
||||
ip_allocation_mode = None
|
||||
if network_mode == 'pool':
|
||||
ip_allocation_mode = 'POOL'
|
||||
elif network_mode == 'dhcp':
|
||||
ip_allocation_mode = 'DHCP'
|
||||
|
||||
task = the_vapp.connect_vms(nets[0].name, connection_index=0, ip_allocation_mode=ip_allocation_mode)
|
||||
if result is None:
|
||||
module.fail_json(msg="Failed to complete task")
|
||||
|
||||
result = module.vca.block_until_completed(task)
|
||||
if result is None:
|
||||
module.fail_json(msg="Failed to complete task")
|
||||
|
||||
# Update VApp info and get VM IP
|
||||
the_vapp = module.vca.get_vapp(the_vdc, vapp_name)
|
||||
if the_vapp is None:
|
||||
module.fail_json(msg="Failed to get vapp named %s" % vapp_name)
|
||||
|
||||
return get_vm_details(module)
|
||||
|
||||
|
||||
def get_vm_details(module):
|
||||
vdc_name = module.params['vdc_name']
|
||||
vapp_name = module.params['vapp_name']
|
||||
vm_name = module.params['vm_name']
|
||||
the_vdc = module.vca.get_vdc(vdc_name)
|
||||
the_vapp = module.vca.get_vapp(the_vdc, vapp_name)
|
||||
if the_vapp and the_vapp.name != vapp_name:
|
||||
module.fail_json(msg="Failed to find vapp named %s" % the_vapp.name)
|
||||
the_vm_details = dict()
|
||||
|
||||
for vm in the_vapp.me.Children.Vm:
|
||||
sections = vm.get_Section()
|
||||
|
||||
customization_section = (
|
||||
filter(lambda section:
|
||||
section.__class__.__name__ ==
|
||||
"GuestCustomizationSectionType",
|
||||
sections)[0])
|
||||
if customization_section.get_AdminPasswordEnabled():
|
||||
the_vm_details["vm_admin_password"] = customization_section.get_AdminPassword()
|
||||
|
||||
virtual_hardware_section = (
|
||||
filter(lambda section:
|
||||
section.__class__.__name__ ==
|
||||
"VirtualHardwareSection_Type",
|
||||
sections)[0])
|
||||
items = virtual_hardware_section.get_Item()
|
||||
ips = []
|
||||
_url = '{http://www.vmware.com/vcloud/v1.5}ipAddress'
|
||||
for item in items:
|
||||
if item.Connection:
|
||||
for c in item.Connection:
|
||||
if c.anyAttributes_.get(
|
||||
_url):
|
||||
ips.append(c.anyAttributes_.get(
|
||||
_url))
|
||||
if len(ips) > 0:
|
||||
the_vm_details["vm_ip"] = ips[0]
|
||||
|
||||
return the_vm_details
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
vapp_name=dict(required=True),
|
||||
vdc_name=dict(required=True),
|
||||
template_name=dict(),
|
||||
catalog_name=dict(default='Public Catalog'),
|
||||
network_name=dict(),
|
||||
network_mode=dict(default='pool', choices=['dhcp', 'static', 'pool']),
|
||||
vm_name=dict(),
|
||||
vm_cpus=dict(),
|
||||
vm_memory=dict(),
|
||||
operation=dict(default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS),
|
||||
state=dict(default='present', choices=VAPP_STATES)
|
||||
)
|
||||
|
||||
module = VcaAnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
state = module.params['state']
|
||||
operation = module.params['operation']
|
||||
|
||||
instance = get_instance(module)
|
||||
|
||||
result = dict(changed=False)
|
||||
|
||||
if instance and state == 'absent':
|
||||
if not module.check_mode:
|
||||
delete(module)
|
||||
result['changed'] = True
|
||||
|
||||
elif state != 'absent':
|
||||
if instance['state'] == 'absent':
|
||||
if not module.check_mode:
|
||||
result['ansible_facts'] = create(module)
|
||||
result['changed'] = True
|
||||
|
||||
elif instance['state'] != state and state != 'present':
|
||||
if not module.check_mode:
|
||||
set_state(module)
|
||||
result['changed'] = True
|
||||
|
||||
if operation != instance.get('status') and operation != 'noop':
|
||||
if not module.check_mode:
|
||||
do_operation(module)
|
||||
result['changed'] = True
|
||||
result['ansible_facts'] = get_vm_details(module)
|
||||
|
||||
return module.exit(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,229 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Michael Tipton <mike () ibeta.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vcenter_extension
|
||||
short_description: Register/deregister vCenter Extensions
|
||||
description:
|
||||
- This module can be used to register/deregister vCenter Extensions.
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Michael Tipton (@castawayegr)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
extension_key:
|
||||
description:
|
||||
- The extension key of the extension to install or uninstall.
|
||||
required: True
|
||||
type: str
|
||||
version:
|
||||
description:
|
||||
- The version of the extension you are installing or uninstalling.
|
||||
required: True
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Required for C(state=present). The name of the extension you are installing.
|
||||
type: str
|
||||
company:
|
||||
description:
|
||||
- Required for C(state=present). The name of the company that makes the extension.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Required for C(state=present). A short description of the extension.
|
||||
type: str
|
||||
email:
|
||||
description:
|
||||
- Required for C(state=present). Administrator email to use for extension.
|
||||
type: str
|
||||
url:
|
||||
description:
|
||||
- Required for C(state=present). Link to server hosting extension zip file to install.
|
||||
type: str
|
||||
ssl_thumbprint:
|
||||
description:
|
||||
- Required for C(state=present). SSL thumbprint of the extension hosting server.
|
||||
type: str
|
||||
server_type:
|
||||
description:
|
||||
- Required for C(state=present). Type of server being used to install the extension (SOAP, REST, HTTP, etc.).
|
||||
default: vsphere-client-serenity
|
||||
type: str
|
||||
client_type:
|
||||
description:
|
||||
- Required for C(state=present). Type of client the extension is (win32, .net, linux, etc.).
|
||||
default: vsphere-client-serenity
|
||||
type: str
|
||||
visible:
|
||||
description:
|
||||
- Show the extension in solution manager inside vCenter.
|
||||
default: True
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- Add or remove vCenter Extension.
|
||||
choices: [absent, present]
|
||||
default: present
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Register vCenter Extension
|
||||
vcenter_extension:
|
||||
hostname: "{{ groups['vcsa'][0] }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ site_password }}"
|
||||
extension_key: "{{ extension_key }}"
|
||||
version: "1.0"
|
||||
company: "Acme"
|
||||
name: "Acme Extension"
|
||||
description: "acme management"
|
||||
email: "user@example.com"
|
||||
url: "https://10.0.0.1/ACME-vSphere-web-plugin-1.0.zip"
|
||||
ssl_thumbprint: "{{ ssl_thumbprint }}"
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
register: register_extension
|
||||
|
||||
- name: Deregister vCenter Extension
|
||||
vcenter_extension:
|
||||
hostname: "{{ groups['vcsa'][0] }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ site_password }}"
|
||||
extension_key: "{{ extension_key }}"
|
||||
version: "1.0"
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
register: deregister_extension
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
result:
|
||||
description: information about performed operation
|
||||
returned: always
|
||||
type: str
|
||||
sample: "'com.acme.Extension' installed."
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import datetime
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import connect_to_api, vmware_argument_spec
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
extension_key=dict(type='str', required=True),
|
||||
version=dict(type='str', required=True),
|
||||
email=dict(type='str', required=False),
|
||||
description=dict(type='str', required=False),
|
||||
company=dict(type='str', required=False),
|
||||
name=dict(type='str', required=False),
|
||||
url=dict(type='str', required=False),
|
||||
ssl_thumbprint=dict(type='str', required=False),
|
||||
client_type=dict(type='str', default='vsphere-client-serenity', required=False),
|
||||
server_type=dict(type='str', default='vsphere-client-serenity', required=False),
|
||||
visible=dict(type='bool', default='True', required=False),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=False,
|
||||
required_if=[
|
||||
['state', 'present', ['email', 'description', 'company', 'name', 'url', 'ssl_thumbprint', 'server_type', 'client_type']]
|
||||
]
|
||||
)
|
||||
|
||||
state = module.params['state']
|
||||
extension_key = module.params['extension_key']
|
||||
version = module.params['version']
|
||||
email = module.params['email']
|
||||
desc = module.params['description']
|
||||
name = module.params['name']
|
||||
company = module.params['company']
|
||||
client_type = module.params['client_type']
|
||||
server_type = module.params['server_type']
|
||||
url = module.params['url']
|
||||
visible = module.params['visible']
|
||||
thumbprint = module.params['ssl_thumbprint']
|
||||
|
||||
content = connect_to_api(module, False)
|
||||
em = content.extensionManager
|
||||
key_check = em.FindExtension(extension_key)
|
||||
results = dict(changed=False, installed=dict())
|
||||
|
||||
if state == 'present' and key_check:
|
||||
results['changed'] = False
|
||||
results['installed'] = "'%s' is already installed" % (extension_key)
|
||||
|
||||
elif state == 'present' and not key_check:
|
||||
extension = vim.Extension()
|
||||
extension.key = extension_key
|
||||
extension.company = company
|
||||
extension.version = version
|
||||
extension.lastHeartbeatTime = datetime.datetime.now()
|
||||
description = vim.Description()
|
||||
description.label = name
|
||||
description.summary = desc
|
||||
extension.description = description
|
||||
extension.shownInSolutionManager = visible
|
||||
|
||||
client = vim.Extension.ClientInfo()
|
||||
client.company = company
|
||||
client.version = version
|
||||
client.description = description
|
||||
client.type = client_type
|
||||
client.url = url
|
||||
extension.client = [client]
|
||||
|
||||
server = vim.Extension.ServerInfo()
|
||||
server.company = company
|
||||
server.description = description
|
||||
server.type = server_type
|
||||
server.adminEmail = email
|
||||
server.serverThumbprint = thumbprint
|
||||
server.url = url
|
||||
extension.server = [server]
|
||||
|
||||
em.RegisterExtension(extension)
|
||||
results['changed'] = True
|
||||
results['installed'] = "'%s' installed." % (extension_key)
|
||||
|
||||
elif state == 'absent' and key_check:
|
||||
em.UnregisterExtension(extension_key)
|
||||
results['changed'] = True
|
||||
results['installed'] = "'%s' uninstalled." % (extension_key)
|
||||
|
||||
elif state == 'absent' and not key_check:
|
||||
results['changed'] = False
|
||||
results['installed'] = "'%s' is not installed." % (extension_key)
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,115 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vcenter_extension_info
|
||||
short_description: Gather info vCenter extensions
|
||||
description:
|
||||
- This module can be used to gather information about vCenter extension.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather info about vCenter Extensions
|
||||
vcenter_extension_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
register: ext_info
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
extension_info:
|
||||
description: List of extensions
|
||||
returned: success
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"extension_company": "VMware, Inc.",
|
||||
"extension_key": "com.vmware.vim.ls",
|
||||
"extension_label": "License Services",
|
||||
"extension_last_heartbeat_time": "2018-09-03T09:36:18.003768+00:00",
|
||||
"extension_subject_name": "",
|
||||
"extension_summary": "Provides various license services",
|
||||
"extension_type": "",
|
||||
"extension_version": "5.0"
|
||||
},
|
||||
{
|
||||
"extension_company": "VMware Inc.",
|
||||
"extension_key": "com.vmware.vim.sms",
|
||||
"extension_label": "VMware vCenter Storage Monitoring Service",
|
||||
"extension_last_heartbeat_time": "2018-09-03T09:36:18.005730+00:00",
|
||||
"extension_subject_name": "",
|
||||
"extension_summary": "Storage Monitoring and Reporting",
|
||||
"extension_type": "",
|
||||
"extension_version": "5.5"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareExtManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareExtManager, self).__init__(module)
|
||||
|
||||
def gather_plugin_info(self):
|
||||
result = dict(changed=False, extension_info=[])
|
||||
ext_manager = self.content.extensionManager
|
||||
if not ext_manager:
|
||||
self.module.exit_json(**result)
|
||||
|
||||
for ext in ext_manager.extensionList:
|
||||
ext_info = dict(
|
||||
extension_label=ext.description.label,
|
||||
extension_summary=ext.description.summary,
|
||||
extension_key=ext.key,
|
||||
extension_company=ext.company,
|
||||
extension_version=ext.version,
|
||||
extension_type=ext.type if ext.type else '',
|
||||
extension_subject_name=ext.subjectName if ext.subjectName else '',
|
||||
extension_last_heartbeat_time=ext.lastHeartbeatTime,
|
||||
)
|
||||
result['extension_info'].append(ext_info)
|
||||
|
||||
self.module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vcenter_extension_info_mgr = VmwareExtManager(module)
|
||||
vcenter_extension_info_mgr.gather_plugin_info()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,387 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vcenter_folder
|
||||
short_description: Manage folders on given datacenter
|
||||
description:
|
||||
- This module can be used to create, delete, move and rename folder on then given datacenter.
|
||||
- This module is only supported for vCenter.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
|
||||
- Jan Meerkamp (@meerkampdvv)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter:
|
||||
description:
|
||||
- Name of the datacenter.
|
||||
required: True
|
||||
aliases: ['datacenter_name']
|
||||
type: str
|
||||
folder_name:
|
||||
description:
|
||||
- Name of folder to be managed.
|
||||
- This is case sensitive parameter.
|
||||
- Folder name should be under 80 characters. This is a VMware restriction.
|
||||
required: True
|
||||
type: str
|
||||
parent_folder:
|
||||
description:
|
||||
- Name of the parent folder under which new folder needs to be created.
|
||||
- This is case sensitive parameter.
|
||||
- "If user wants to create a folder under '/DC0/vm/vm_folder', this value will be 'vm_folder'."
|
||||
- "If user wants to create a folder under '/DC0/vm/folder1/folder2', this value will be 'folder1/folder2'."
|
||||
required: False
|
||||
type: str
|
||||
folder_type:
|
||||
description:
|
||||
- This is type of folder.
|
||||
- "If set to C(vm), then 'VM and Template Folder' is created under datacenter."
|
||||
- "If set to C(host), then 'Host and Cluster Folder' is created under datacenter."
|
||||
- "If set to C(datastore), then 'Storage Folder' is created under datacenter."
|
||||
- "If set to C(network), then 'Network Folder' is created under datacenter."
|
||||
- This parameter is required, if C(state) is set to C(present) and parent_folder is absent.
|
||||
- This option is ignored, if C(parent_folder) is set.
|
||||
default: vm
|
||||
type: str
|
||||
required: False
|
||||
choices: [ datastore, host, network, vm ]
|
||||
state:
|
||||
description:
|
||||
- State of folder.
|
||||
- If set to C(present) without parent folder parameter, then folder with C(folder_type) is created.
|
||||
- If set to C(present) with parent folder parameter, then folder in created under parent folder. C(folder_type) is ignored.
|
||||
- If set to C(absent), then folder is unregistered and destroyed.
|
||||
default: present
|
||||
choices: [ present, absent ]
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a VM folder on given datacenter
|
||||
vcenter_folder:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter_name
|
||||
folder_name: sample_vm_folder
|
||||
folder_type: vm
|
||||
state: present
|
||||
register: vm_folder_creation_result
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create a datastore folder on given datacenter
|
||||
vcenter_folder:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter_name
|
||||
folder_name: sample_datastore_folder
|
||||
folder_type: datastore
|
||||
state: present
|
||||
register: datastore_folder_creation_result
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create a sub folder under VM folder on given datacenter
|
||||
vcenter_folder:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter_name
|
||||
folder_name: sample_sub_folder
|
||||
parent_folder: vm_folder
|
||||
state: present
|
||||
register: sub_folder_creation_result
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete a VM folder on given datacenter
|
||||
vcenter_folder:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter_name
|
||||
folder_name: sample_vm_folder
|
||||
folder_type: vm
|
||||
state: absent
|
||||
register: vm_folder_deletion_result
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
result:
|
||||
description: The detail about the new folder
|
||||
returned: On success
|
||||
type: complex
|
||||
contains:
|
||||
path:
|
||||
description: the full path of the new folder
|
||||
type: str
|
||||
msg:
|
||||
description: string stating about result
|
||||
type: str
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError as import_err:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, wait_for_task, get_all_objs
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VmwareFolderManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareFolderManager, self).__init__(module)
|
||||
datacenter_name = self.params.get('datacenter', None)
|
||||
self.datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
|
||||
if self.datacenter_obj is None:
|
||||
self.module.fail_json(msg="Failed to find datacenter %s" % datacenter_name)
|
||||
|
||||
self.datacenter_folder_type = {
|
||||
'vm': self.datacenter_obj.vmFolder,
|
||||
'host': self.datacenter_obj.hostFolder,
|
||||
'datastore': self.datacenter_obj.datastoreFolder,
|
||||
'network': self.datacenter_obj.networkFolder,
|
||||
}
|
||||
|
||||
def ensure(self):
|
||||
"""
|
||||
Manage internal state management
|
||||
"""
|
||||
state = self.module.params.get('state')
|
||||
datacenter_name = self.module.params.get('datacenter')
|
||||
folder_type = self.module.params.get('folder_type')
|
||||
folder_name = self.module.params.get('folder_name')
|
||||
parent_folder = self.module.params.get('parent_folder', None)
|
||||
results = {'changed': False, 'result': {}}
|
||||
if state == 'present':
|
||||
# Check if the folder already exists
|
||||
p_folder_obj = None
|
||||
if parent_folder:
|
||||
if "/" in parent_folder:
|
||||
parent_folder_parts = parent_folder.strip('/').split('/')
|
||||
p_folder_obj = None
|
||||
for part in parent_folder_parts:
|
||||
part_folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=part,
|
||||
folder_type=folder_type,
|
||||
parent_folder=p_folder_obj)
|
||||
if not part_folder_obj:
|
||||
self.module.fail_json(msg="Could not find folder %s" % part)
|
||||
p_folder_obj = part_folder_obj
|
||||
child_folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=folder_name,
|
||||
folder_type=folder_type,
|
||||
parent_folder=p_folder_obj)
|
||||
if child_folder_obj:
|
||||
results['result'] = "Folder %s already exists under" \
|
||||
" parent folder %s" % (folder_name, parent_folder)
|
||||
self.module.exit_json(**results)
|
||||
else:
|
||||
p_folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=parent_folder,
|
||||
folder_type=folder_type)
|
||||
|
||||
if not p_folder_obj:
|
||||
self.module.fail_json(msg="Parent folder %s does not exist" % parent_folder)
|
||||
|
||||
# Check if folder exists under parent folder
|
||||
child_folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=folder_name,
|
||||
folder_type=folder_type,
|
||||
parent_folder=p_folder_obj)
|
||||
if child_folder_obj:
|
||||
results['result']['path'] = self.get_folder_path(child_folder_obj)
|
||||
results['result'] = "Folder %s already exists under" \
|
||||
" parent folder %s" % (folder_name, parent_folder)
|
||||
self.module.exit_json(**results)
|
||||
else:
|
||||
folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=folder_name,
|
||||
folder_type=folder_type)
|
||||
|
||||
if folder_obj:
|
||||
results['result']['path'] = self.get_folder_path(folder_obj)
|
||||
results['result']['msg'] = "Folder %s already exists" % folder_name
|
||||
self.module.exit_json(**results)
|
||||
|
||||
# Create a new folder
|
||||
try:
|
||||
if parent_folder and p_folder_obj:
|
||||
if self.module.check_mode:
|
||||
results['msg'] = "Folder '%s' of type '%s' under '%s' will be created." % \
|
||||
(folder_name, folder_type, parent_folder)
|
||||
else:
|
||||
new_folder = p_folder_obj.CreateFolder(folder_name)
|
||||
results['result']['path'] = self.get_folder_path(new_folder)
|
||||
results['result']['msg'] = "Folder '%s' of type '%s' under '%s' created" \
|
||||
" successfully." % (folder_name, folder_type, parent_folder)
|
||||
results['changed'] = True
|
||||
elif not parent_folder and not p_folder_obj:
|
||||
if self.module.check_mode:
|
||||
results['msg'] = "Folder '%s' of type '%s' will be created." % (folder_name, folder_type)
|
||||
else:
|
||||
new_folder = self.datacenter_folder_type[folder_type].CreateFolder(folder_name)
|
||||
results['result']['msg'] = "Folder '%s' of type '%s' created successfully." % (folder_name, folder_type)
|
||||
results['result']['path'] = self.get_folder_path(new_folder)
|
||||
results['changed'] = True
|
||||
except vim.fault.DuplicateName as duplicate_name:
|
||||
# To be consistent with the other vmware modules, We decided to accept this error
|
||||
# and the playbook should simply carry on with other tasks.
|
||||
# User will have to take care of this exception
|
||||
# https://github.com/ansible/ansible/issues/35388#issuecomment-362283078
|
||||
results['changed'] = False
|
||||
results['msg'] = "Failed to create folder as another object has same name" \
|
||||
" in the same target folder : %s" % to_native(duplicate_name.msg)
|
||||
except vim.fault.InvalidName as invalid_name:
|
||||
self.module.fail_json(msg="Failed to create folder as folder name is not a valid "
|
||||
"entity name : %s" % to_native(invalid_name.msg))
|
||||
except Exception as general_exc:
|
||||
self.module.fail_json(msg="Failed to create folder due to generic"
|
||||
" exception : %s " % to_native(general_exc))
|
||||
self.module.exit_json(**results)
|
||||
elif state == 'absent':
|
||||
# Check if the folder already exists
|
||||
p_folder_obj = None
|
||||
if parent_folder:
|
||||
if "/" in parent_folder:
|
||||
parent_folder_parts = parent_folder.strip('/').split('/')
|
||||
p_folder_obj = None
|
||||
for part in parent_folder_parts:
|
||||
part_folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=part,
|
||||
folder_type=folder_type,
|
||||
parent_folder=p_folder_obj)
|
||||
if not part_folder_obj:
|
||||
self.module.fail_json(msg="Could not find folder %s" % part)
|
||||
p_folder_obj = part_folder_obj
|
||||
folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=folder_name,
|
||||
folder_type=folder_type,
|
||||
parent_folder=p_folder_obj)
|
||||
else:
|
||||
p_folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=parent_folder,
|
||||
folder_type=folder_type)
|
||||
|
||||
if not p_folder_obj:
|
||||
self.module.fail_json(msg="Parent folder %s does not exist" % parent_folder)
|
||||
|
||||
# Check if folder exists under parent folder
|
||||
folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=folder_name,
|
||||
folder_type=folder_type,
|
||||
parent_folder=p_folder_obj)
|
||||
else:
|
||||
folder_obj = self.get_folder(datacenter_name=datacenter_name,
|
||||
folder_name=folder_name,
|
||||
folder_type=folder_type)
|
||||
if folder_obj:
|
||||
try:
|
||||
if parent_folder:
|
||||
if self.module.check_mode:
|
||||
results['changed'] = True
|
||||
results['msg'] = "Folder '%s' of type '%s' under '%s' will be removed." % \
|
||||
(folder_name, folder_type, parent_folder)
|
||||
else:
|
||||
if folder_type == 'vm':
|
||||
task = folder_obj.UnregisterAndDestroy()
|
||||
else:
|
||||
task = folder_obj.Destroy()
|
||||
results['changed'], results['msg'] = wait_for_task(task=task)
|
||||
else:
|
||||
if self.module.check_mode:
|
||||
results['changed'] = True
|
||||
results['msg'] = "Folder '%s' of type '%s' will be removed." % (folder_name, folder_type)
|
||||
else:
|
||||
if folder_type == 'vm':
|
||||
task = folder_obj.UnregisterAndDestroy()
|
||||
else:
|
||||
task = folder_obj.Destroy()
|
||||
results['changed'], results['msg'] = wait_for_task(task=task)
|
||||
except vim.fault.ConcurrentAccess as concurrent_access:
|
||||
self.module.fail_json(msg="Failed to remove folder as another client"
|
||||
" modified folder before this operation : %s" % to_native(concurrent_access.msg))
|
||||
except vim.fault.InvalidState as invalid_state:
|
||||
self.module.fail_json(msg="Failed to remove folder as folder is in"
|
||||
" invalid state : %s" % to_native(invalid_state.msg))
|
||||
except Exception as gen_exec:
|
||||
self.module.fail_json(msg="Failed to remove folder due to generic"
|
||||
" exception %s " % to_native(gen_exec))
|
||||
self.module.exit_json(**results)
|
||||
|
||||
def get_folder(self, datacenter_name, folder_name, folder_type, parent_folder=None):
|
||||
"""
|
||||
Get managed object of folder by name
|
||||
Returns: Managed object of folder by name
|
||||
|
||||
"""
|
||||
folder_objs = get_all_objs(self.content, [vim.Folder], parent_folder)
|
||||
for folder in folder_objs:
|
||||
if parent_folder:
|
||||
if folder.name == folder_name and \
|
||||
self.datacenter_folder_type[folder_type].childType == folder.childType:
|
||||
return folder
|
||||
else:
|
||||
if folder.name == folder_name and \
|
||||
self.datacenter_folder_type[folder_type].childType == folder.childType and \
|
||||
folder.parent.parent.name == datacenter_name: # e.g. folder.parent.parent.name == /DC01/host/folder
|
||||
return folder
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
|
||||
folder_name=dict(type='str', required=True),
|
||||
parent_folder=dict(type='str', required=False),
|
||||
state=dict(type='str',
|
||||
choices=['present', 'absent'],
|
||||
default='present'),
|
||||
folder_type=dict(type='str',
|
||||
default='vm',
|
||||
choices=['datastore', 'host', 'network', 'vm'],
|
||||
required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if len(module.params.get('folder_name')) > 79:
|
||||
module.fail_json(msg="Failed to manage folder as folder_name can only contain 80 characters.")
|
||||
|
||||
vcenter_folder_mgr = VmwareFolderManager(module)
|
||||
if not vcenter_folder_mgr.is_vcenter():
|
||||
module.fail_json(msg="Module vcenter_folder is meant for vCenter, hostname %s "
|
||||
"is not vCenter server." % module.params.get('hostname'))
|
||||
vcenter_folder_mgr.ensure()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,275 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
module: vcenter_license
|
||||
short_description: Manage VMware vCenter license keys
|
||||
description:
|
||||
- Add and delete vCenter, ESXi server license keys.
|
||||
version_added: '2.4'
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
requirements:
|
||||
- pyVmomi
|
||||
options:
|
||||
labels:
|
||||
description:
|
||||
- The optional labels of the license key to manage in vSphere vCenter.
|
||||
- This is dictionary with key/value pair.
|
||||
default: {
|
||||
'source': 'ansible'
|
||||
}
|
||||
type: dict
|
||||
license:
|
||||
description:
|
||||
- The license key to manage in vSphere vCenter.
|
||||
required: yes
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to add (C(present)) or remove (C(absent)) the license key.
|
||||
choices: [absent, present]
|
||||
default: present
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- The hostname of the ESXi server to which the specified license will be assigned.
|
||||
- This parameter is optional.
|
||||
version_added: '2.8'
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter name to use for the operation.
|
||||
type: str
|
||||
version_added: '2.9'
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster to apply vSAN license.
|
||||
type: str
|
||||
version_added: '2.9'
|
||||
notes:
|
||||
- This module will also auto-assign the current vCenter to the license key
|
||||
if the product matches the license key, and vCenter us currently assigned
|
||||
an evaluation license only.
|
||||
- The evaluation license (00000-00000-00000-00000-00000) is not listed
|
||||
when unused.
|
||||
- If C(esxi_hostname) is specified, then will assign the C(license) key to
|
||||
the ESXi host.
|
||||
extends_documentation_fragment: vmware.vcenter_documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Add a new vCenter license
|
||||
vcenter_license:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
license: f600d-21ae3-5592b-249e0-cc341
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Remove an (unused) vCenter license
|
||||
vcenter_license:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
license: f600d-21ae3-5592b-249e0-cc341
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Add ESXi license and assign to the ESXi host
|
||||
vcenter_license:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
license: f600d-21ae3-5592b-249e0-dd502
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Add vSAN license and assign to the given cluster
|
||||
vcenter_license:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: '{{ datacenter_name }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
license: f600d-21ae3-5592b-249e0-dd502
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
licenses:
|
||||
description: list of license keys after module executed
|
||||
returned: always
|
||||
type: list
|
||||
sample:
|
||||
- f600d-21ae3-5592b-249e0-cc341
|
||||
- 143cc-0e942-b2955-3ea12-d006f
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_hostsystem_by_name
|
||||
|
||||
|
||||
class VcenterLicenseMgr(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VcenterLicenseMgr, self).__init__(module)
|
||||
|
||||
def find_key(self, licenses, license):
|
||||
for item in licenses:
|
||||
if item.licenseKey == license:
|
||||
return item
|
||||
return None
|
||||
|
||||
def list_keys(self, licenses):
|
||||
keys = []
|
||||
for item in licenses:
|
||||
# Filter out evaluation license key
|
||||
if item.used is None:
|
||||
continue
|
||||
keys.append(item.licenseKey)
|
||||
return keys
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
labels=dict(type='dict', default=dict(source='ansible')),
|
||||
license=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
esxi_hostname=dict(type='str'),
|
||||
datacenter=dict(type='str'),
|
||||
cluster_name=dict(type='str'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
license = module.params['license']
|
||||
state = module.params['state']
|
||||
|
||||
# FIXME: This does not seem to work on vCenter v6.0
|
||||
labels = []
|
||||
for k in module.params['labels']:
|
||||
kv = vim.KeyValue()
|
||||
kv.key = k
|
||||
kv.value = module.params['labels'][k]
|
||||
labels.append(kv)
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
diff=dict(),
|
||||
)
|
||||
|
||||
pyv = VcenterLicenseMgr(module)
|
||||
if not pyv.is_vcenter():
|
||||
module.fail_json(msg="vcenter_license is meant for vCenter, hostname %s "
|
||||
"is not vCenter server." % module.params.get('hostname'))
|
||||
|
||||
lm = pyv.content.licenseManager
|
||||
|
||||
result['licenses'] = pyv.list_keys(lm.licenses)
|
||||
if module._diff:
|
||||
result['diff']['before'] = '\n'.join(result['licenses']) + '\n'
|
||||
|
||||
if state == 'present':
|
||||
if license not in result['licenses']:
|
||||
result['changed'] = True
|
||||
if module.check_mode:
|
||||
result['licenses'].append(license)
|
||||
else:
|
||||
lm.AddLicense(license, labels)
|
||||
|
||||
key = pyv.find_key(lm.licenses, license)
|
||||
if key is not None:
|
||||
lam = lm.licenseAssignmentManager
|
||||
assigned_license = None
|
||||
datacenter = module.params['datacenter']
|
||||
datacenter_obj = None
|
||||
if datacenter:
|
||||
datacenter_obj = pyv.find_datacenter_by_name(datacenter)
|
||||
if not datacenter_obj:
|
||||
module.fail_json(msg="Unable to find the datacenter %(datacenter)s" % module.params)
|
||||
|
||||
cluster = module.params['cluster_name']
|
||||
if cluster:
|
||||
cluster_obj = pyv.find_cluster_by_name(cluster_name=cluster, datacenter_name=datacenter_obj)
|
||||
if not cluster_obj:
|
||||
msg = "Unable to find the cluster %(cluster_name)s"
|
||||
if datacenter:
|
||||
msg += " in datacenter %(datacenter)s"
|
||||
module.fail_json(msg=msg % module.params)
|
||||
entityId = cluster_obj._moId
|
||||
# assign to current vCenter, if esxi_hostname is not specified
|
||||
elif module.params['esxi_hostname'] is None:
|
||||
entityId = pyv.content.about.instanceUuid
|
||||
# if key name not contain "VMware vCenter Server"
|
||||
if pyv.content.about.name not in key.name:
|
||||
module.warn('License key "%s" (%s) is not suitable for "%s"' % (license, key.name, pyv.content.about.name))
|
||||
# assign to ESXi server
|
||||
else:
|
||||
esxi_host = find_hostsystem_by_name(pyv.content, module.params['esxi_hostname'])
|
||||
if esxi_host is None:
|
||||
module.fail_json(msg='Cannot find the specified ESXi host "%s".' % module.params['esxi_hostname'])
|
||||
entityId = esxi_host._moId
|
||||
# e.g., key.editionKey is "esx.enterprisePlus.cpuPackage", not sure all keys are in this format
|
||||
if 'esx' not in key.editionKey:
|
||||
module.warn('License key "%s" edition "%s" is not suitable for ESXi server' % (license, key.editionKey))
|
||||
|
||||
try:
|
||||
assigned_license = lam.QueryAssignedLicenses(entityId=entityId)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Could not query vCenter "%s" assigned license info due to %s.' % (entityId, to_native(e)))
|
||||
|
||||
if not assigned_license or (len(assigned_license) != 0 and assigned_license[0].assignedLicense.licenseKey != license):
|
||||
try:
|
||||
lam.UpdateAssignedLicense(entity=entityId, licenseKey=license)
|
||||
except Exception:
|
||||
module.fail_json(msg='Could not assign "%s" (%s) to vCenter.' % (license, key.name))
|
||||
result['changed'] = True
|
||||
result['licenses'] = pyv.list_keys(lm.licenses)
|
||||
else:
|
||||
module.fail_json(msg='License "%s" is not existing or can not be added' % license)
|
||||
if module._diff:
|
||||
result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
|
||||
|
||||
elif state == 'absent' and license in result['licenses']:
|
||||
|
||||
# Check if key is in use
|
||||
key = pyv.find_key(lm.licenses, license)
|
||||
if key.used > 0:
|
||||
module.fail_json(msg='Cannot remove key "%s", still in use %s time(s).' % (license, key.used))
|
||||
|
||||
result['changed'] = True
|
||||
if module.check_mode:
|
||||
result['licenses'].remove(license)
|
||||
else:
|
||||
lm.RemoveLicense(license)
|
||||
result['licenses'] = pyv.list_keys(lm.licenses)
|
||||
if module._diff:
|
||||
result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,126 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_about_info
|
||||
short_description: Provides information about VMware server to which user is connecting to
|
||||
description:
|
||||
- This module can be used to gather information about VMware server to which user is trying to connect.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Provide information about vCenter
|
||||
vmware_about_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
delegate_to: localhost
|
||||
register: vcenter_about_info
|
||||
|
||||
- name: Provide information about a standalone ESXi server
|
||||
vmware_about_info:
|
||||
hostname: '{{ esxi_hostname }}'
|
||||
username: '{{ esxi_username }}'
|
||||
password: '{{ esxi_password }}'
|
||||
delegate_to: localhost
|
||||
register: esxi_about_info
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
about_info:
|
||||
description:
|
||||
- dict about VMware server
|
||||
returned: success
|
||||
type: str
|
||||
sample:
|
||||
{
|
||||
"api_type": "VirtualCenter",
|
||||
"api_version": "6.5",
|
||||
"build": "5973321",
|
||||
"instance_uuid": "dbed6e0c-bd88-4ef6-b594-21283e1c677f",
|
||||
"license_product_name": "VMware VirtualCenter Server",
|
||||
"license_product_version": "6.0",
|
||||
"locale_build": "000",
|
||||
"locale_version": "INTL",
|
||||
"os_type": "darwin-amd64",
|
||||
"product_full_name": "VMware vCenter Server 6.5.0 build-5973321",
|
||||
"product_line_id": "vpx",
|
||||
"product_name": "VMware vCenter Server (govmomi simulator)",
|
||||
"vendor": "VMware, Inc.",
|
||||
"version": "6.5.0"
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareAboutManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareAboutManager, self).__init__(module)
|
||||
|
||||
def gather_about_info(self):
|
||||
|
||||
if not self.content:
|
||||
self.module.exit_json(changed=False, about_info=dict())
|
||||
|
||||
about = self.content.about
|
||||
|
||||
self.module.exit_json(
|
||||
changed=False,
|
||||
about_info=dict(
|
||||
product_name=about.name,
|
||||
product_full_name=about.fullName,
|
||||
vendor=about.vendor,
|
||||
version=about.version,
|
||||
build=about.build,
|
||||
locale_version=about.localeVersion,
|
||||
locale_build=about.localeBuild,
|
||||
os_type=about.osType,
|
||||
product_line_id=about.productLineId,
|
||||
api_type=about.apiType,
|
||||
api_version=about.apiVersion,
|
||||
instance_uuid=about.instanceUuid,
|
||||
license_product_name=about.licenseProductName,
|
||||
license_product_version=about.licenseProductVersion,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_about_info_mgr = VmwareAboutManager(module)
|
||||
vmware_about_info_mgr.gather_about_info()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,330 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_category
|
||||
short_description: Manage VMware categories
|
||||
description:
|
||||
- This module can be used to create / delete / update VMware categories.
|
||||
- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
|
||||
- All variables and VMware object names are case sensitive.
|
||||
version_added: '2.7'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
- vSphere Automation SDK
|
||||
options:
|
||||
category_name:
|
||||
description:
|
||||
- The name of category to manage.
|
||||
required: True
|
||||
type: str
|
||||
category_description:
|
||||
description:
|
||||
- The category description.
|
||||
- This is required only if C(state) is set to C(present).
|
||||
- This parameter is ignored, when C(state) is set to C(absent).
|
||||
default: ''
|
||||
type: str
|
||||
category_cardinality:
|
||||
description:
|
||||
- The category cardinality.
|
||||
- This parameter is ignored, when updating existing category.
|
||||
choices: ['multiple', 'single']
|
||||
default: 'multiple'
|
||||
type: str
|
||||
new_category_name:
|
||||
description:
|
||||
- The new name for an existing category.
|
||||
- This value is used while updating an existing category.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The state of category.
|
||||
- If set to C(present) and category does not exists, then category is created.
|
||||
- If set to C(present) and category exists, then category is updated.
|
||||
- If set to C(absent) and category exists, then category is deleted.
|
||||
- If set to C(absent) and category does not exists, no action is taken.
|
||||
- Process of updating category only allows name, description change.
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent' ]
|
||||
type: str
|
||||
associable_object_types:
|
||||
description:
|
||||
- List of object types that can be associated with the given category.
|
||||
choices:
|
||||
- All objects
|
||||
- Cluster
|
||||
- Content Library
|
||||
- Datacenter
|
||||
- Datastore
|
||||
- Datastore Cluster
|
||||
- Distributed Port Group
|
||||
- Distributed Switch
|
||||
- Folder
|
||||
- Host
|
||||
- Library item
|
||||
- Network
|
||||
- Resource Pool
|
||||
- vApp
|
||||
- Virtual Machine
|
||||
version_added: '2.10'
|
||||
type: list
|
||||
elements: str
|
||||
extends_documentation_fragment: vmware_rest_client.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a category
|
||||
vmware_category:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
category_name: Sample_Cat_0001
|
||||
category_description: Sample Description
|
||||
category_cardinality: 'multiple'
|
||||
state: present
|
||||
|
||||
- name: Rename category
|
||||
vmware_category:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
category_name: Sample_Category_0001
|
||||
new_category_name: Sample_Category_0002
|
||||
state: present
|
||||
|
||||
- name: Update category description
|
||||
vmware_category:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
category_name: Sample_Category_0001
|
||||
category_description: Some fancy description
|
||||
state: present
|
||||
|
||||
- name: Delete category
|
||||
vmware_category:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
category_name: Sample_Category_0002
|
||||
state: absent
|
||||
|
||||
- name: Create category with 2 associable object types
|
||||
vmware_category:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
validate_certs: False
|
||||
category_name: 'Sample_Category_0003'
|
||||
category_description: 'sample description'
|
||||
associable_object_types:
|
||||
- Datastore
|
||||
- Cluster
|
||||
state: present
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
category_results:
|
||||
description: dictionary of category metadata
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"category_id": "urn:vmomi:InventoryServiceCategory:d7120bda-9fa5-4f92-9d71-aa1acff2e5a8:GLOBAL",
|
||||
"msg": "Category NewCat_0001 updated."
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware_rest_client import VmwareRestClient
|
||||
try:
|
||||
from com.vmware.cis.tagging_client import CategoryModel
|
||||
from com.vmware.vapi.std.errors_client import Error
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class VmwareCategory(VmwareRestClient):
|
||||
def __init__(self, module):
|
||||
super(VmwareCategory, self).__init__(module)
|
||||
self.category_service = self.api_client.tagging.Category
|
||||
self.global_categories = dict()
|
||||
self.category_name = self.params.get('category_name')
|
||||
self.get_all_categories()
|
||||
|
||||
def ensure_state(self):
|
||||
"""Manage internal states of categories. """
|
||||
desired_state = self.params.get('state')
|
||||
states = {
|
||||
'present': {
|
||||
'present': self.state_update_category,
|
||||
'absent': self.state_create_category,
|
||||
},
|
||||
'absent': {
|
||||
'present': self.state_delete_category,
|
||||
'absent': self.state_unchanged,
|
||||
}
|
||||
}
|
||||
states[desired_state][self.check_category_status()]()
|
||||
|
||||
def state_create_category(self):
|
||||
"""Create category."""
|
||||
category_spec = self.category_service.CreateSpec()
|
||||
category_spec.name = self.category_name
|
||||
category_spec.description = self.params.get('category_description')
|
||||
|
||||
if self.params.get('category_cardinality') == 'single':
|
||||
category_spec.cardinality = CategoryModel.Cardinality.SINGLE
|
||||
else:
|
||||
category_spec.cardinality = CategoryModel.Cardinality.MULTIPLE
|
||||
|
||||
associable_object_types = self.params.get('associable_object_types')
|
||||
|
||||
obj_types_set = []
|
||||
if associable_object_types:
|
||||
for obj_type in associable_object_types:
|
||||
if obj_type.lower() == 'all objects':
|
||||
obj_types_set = []
|
||||
break
|
||||
else:
|
||||
obj_types_set.append(obj_type)
|
||||
|
||||
category_spec.associable_types = set(obj_types_set)
|
||||
|
||||
try:
|
||||
category_id = self.category_service.create(category_spec)
|
||||
except Error as error:
|
||||
self.module.fail_json(msg="%s" % self.get_error_message(error))
|
||||
|
||||
if category_id:
|
||||
self.module.exit_json(changed=True,
|
||||
category_results=dict(msg="Category '%s' created." % category_spec.name,
|
||||
category_id=category_id))
|
||||
self.module.exit_json(changed=False,
|
||||
category_results=dict(msg="No category created", category_id=''))
|
||||
|
||||
def state_unchanged(self):
|
||||
"""Return unchanged state."""
|
||||
self.module.exit_json(changed=False)
|
||||
|
||||
def state_update_category(self):
|
||||
"""Update category."""
|
||||
category_id = self.global_categories[self.category_name]['category_id']
|
||||
changed = False
|
||||
results = dict(msg="Category %s is unchanged." % self.category_name,
|
||||
category_id=category_id)
|
||||
|
||||
category_update_spec = self.category_service.UpdateSpec()
|
||||
change_list = []
|
||||
old_cat_desc = self.global_categories[self.category_name]['category_description']
|
||||
new_cat_desc = self.params.get('category_description')
|
||||
if new_cat_desc and new_cat_desc != old_cat_desc:
|
||||
category_update_spec.description = new_cat_desc
|
||||
results['msg'] = 'Category %s updated.' % self.category_name
|
||||
change_list.append(True)
|
||||
|
||||
new_cat_name = self.params.get('new_category_name')
|
||||
if new_cat_name in self.global_categories:
|
||||
self.module.fail_json(msg="Unable to rename %s as %s already"
|
||||
" exists in configuration." % (self.category_name, new_cat_name))
|
||||
old_cat_name = self.global_categories[self.category_name]['category_name']
|
||||
|
||||
if new_cat_name and new_cat_name != old_cat_name:
|
||||
category_update_spec.name = new_cat_name
|
||||
results['msg'] = 'Category %s updated.' % self.category_name
|
||||
change_list.append(True)
|
||||
|
||||
if any(change_list):
|
||||
try:
|
||||
self.category_service.update(category_id, category_update_spec)
|
||||
changed = True
|
||||
except Error as error:
|
||||
self.module.fail_json(msg="%s" % self.get_error_message(error))
|
||||
|
||||
self.module.exit_json(changed=changed,
|
||||
category_results=results)
|
||||
|
||||
def state_delete_category(self):
|
||||
"""Delete category."""
|
||||
category_id = self.global_categories[self.category_name]['category_id']
|
||||
try:
|
||||
self.category_service.delete(category_id=category_id)
|
||||
except Error as error:
|
||||
self.module.fail_json(msg="%s" % self.get_error_message(error))
|
||||
self.module.exit_json(changed=True,
|
||||
category_results=dict(msg="Category '%s' deleted." % self.category_name,
|
||||
category_id=category_id))
|
||||
|
||||
def check_category_status(self):
|
||||
"""
|
||||
Check if category exists or not
|
||||
Returns: 'present' if category found, else 'absent'
|
||||
|
||||
"""
|
||||
if self.category_name in self.global_categories:
|
||||
return 'present'
|
||||
else:
|
||||
return 'absent'
|
||||
|
||||
def get_all_categories(self):
|
||||
"""Retrieve all category information."""
|
||||
for category in self.category_service.list():
|
||||
category_obj = self.category_service.get(category)
|
||||
self.global_categories[category_obj.name] = dict(
|
||||
category_description=category_obj.description,
|
||||
category_used_by=category_obj.used_by,
|
||||
category_cardinality=str(category_obj.cardinality),
|
||||
category_associable_types=category_obj.associable_types,
|
||||
category_id=category_obj.id,
|
||||
category_name=category_obj.name,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = VmwareRestClient.vmware_client_argument_spec()
|
||||
argument_spec.update(
|
||||
category_name=dict(type='str', required=True),
|
||||
category_description=dict(type='str', default='', required=False),
|
||||
category_cardinality=dict(type='str', choices=["multiple", "single"], default="multiple"),
|
||||
new_category_name=dict(type='str'),
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
associable_object_types=dict(
|
||||
type='list',
|
||||
choices=[
|
||||
'All objects', 'Folder', 'Cluster',
|
||||
'Datacenter', 'Datastore', 'Datastore Cluster',
|
||||
'Distributed Port Group', 'Distributed Switch',
|
||||
'Host', 'Content Library', 'Library item', 'Network',
|
||||
'Resource Pool', 'vApp', 'Virtual Machine',
|
||||
],
|
||||
elements=str,
|
||||
),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
vmware_category = VmwareCategory(module)
|
||||
vmware_category.ensure_state()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,128 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_category_info
|
||||
short_description: Gather info about VMware tag categories
|
||||
description:
|
||||
- This module can be used to gather information about VMware tag categories.
|
||||
- Tag feature is introduced in vSphere 6 version, so this module is not supported in earlier versions of vSphere.
|
||||
- All variables and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
- vSphere Automation SDK
|
||||
extends_documentation_fragment: vmware_rest_client.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather info about tag categories
|
||||
vmware_category_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
delegate_to: localhost
|
||||
register: all_tag_category_info
|
||||
|
||||
- name: Gather category id from given tag category
|
||||
vmware_category_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
delegate_to: localhost
|
||||
register: tag_category_results
|
||||
|
||||
- set_fact:
|
||||
category_id: "{{ item.category_id }}"
|
||||
loop: "{{ tag_category_results.tag_category_info|json_query(query) }}"
|
||||
vars:
|
||||
query: "[?category_name==`Category0001`]"
|
||||
- debug: var=category_id
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
tag_category_info:
|
||||
description: metadata of tag categories
|
||||
returned: always
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"category_associable_types": [],
|
||||
"category_cardinality": "MULTIPLE",
|
||||
"category_description": "awesome description",
|
||||
"category_id": "urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL",
|
||||
"category_name": "Category0001",
|
||||
"category_used_by": []
|
||||
},
|
||||
{
|
||||
"category_associable_types": [
|
||||
"VirtualMachine"
|
||||
],
|
||||
"category_cardinality": "SINGLE",
|
||||
"category_description": "another awesome description",
|
||||
"category_id": "urn:vmomi:InventoryServiceCategory:ae5b7c6c-e622-4671-9b96-76e93adb70f2:GLOBAL",
|
||||
"category_name": "template_tag",
|
||||
"category_used_by": []
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware_rest_client import VmwareRestClient
|
||||
|
||||
|
||||
class VmwareCategoryInfoManager(VmwareRestClient):
|
||||
def __init__(self, module):
|
||||
super(VmwareCategoryInfoManager, self).__init__(module)
|
||||
self.category_service = self.api_client.tagging.Category
|
||||
|
||||
def get_all_tag_categories(self):
|
||||
"""Retrieve all tag category information."""
|
||||
global_tag_categories = []
|
||||
for category in self.category_service.list():
|
||||
category_obj = self.category_service.get(category)
|
||||
global_tag_categories.append(
|
||||
dict(
|
||||
category_description=category_obj.description,
|
||||
category_used_by=category_obj.used_by,
|
||||
category_cardinality=str(category_obj.cardinality),
|
||||
category_associable_types=category_obj.associable_types,
|
||||
category_id=category_obj.id,
|
||||
category_name=category_obj.name,
|
||||
)
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=False, tag_category_info=global_tag_categories)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = VmwareRestClient.vmware_client_argument_spec()
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
vmware_category_info = VmwareCategoryInfoManager(module)
|
||||
vmware_category_info.get_all_tag_categories()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,227 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, IBM Corp
|
||||
# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_cfg_backup
|
||||
short_description: Backup / Restore / Reset ESXi host configuration
|
||||
description:
|
||||
- This module can be used to perform various operations related to backup, restore and reset of ESXi host configuration.
|
||||
version_added: "2.5"
|
||||
author:
|
||||
- Andreas Nafpliotis (@nafpliot-ibm)
|
||||
notes:
|
||||
- Tested on ESXi 6.0
|
||||
- Works only for ESXi hosts
|
||||
- For configuration load or reset, the host will be switched automatically to maintenance mode.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi installed
|
||||
options:
|
||||
esxi_hostname:
|
||||
description:
|
||||
- Name of ESXi server. This is required only if authentication against a vCenter is done.
|
||||
required: False
|
||||
type: str
|
||||
dest:
|
||||
description:
|
||||
- The destination where the ESXi configuration bundle will be saved. The I(dest) can be a folder or a file.
|
||||
- If I(dest) is a folder, the backup file will be saved in the folder with the default filename generated from the ESXi server.
|
||||
- If I(dest) is a file, the backup file will be saved with that filename. The file extension will always be .tgz.
|
||||
type: path
|
||||
src:
|
||||
description:
|
||||
- The file containing the ESXi configuration that will be restored.
|
||||
type: path
|
||||
state:
|
||||
description:
|
||||
- If C(saved), the .tgz backup bundle will be saved in I(dest).
|
||||
- If C(absent), the host configuration will be reset to default values.
|
||||
- If C(loaded), the backup file in I(src) will be loaded to the ESXi host rewriting the hosts settings.
|
||||
choices: [saved, absent, loaded]
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Save the ESXi configuration locally by authenticating directly against the ESXi host
|
||||
vmware_cfg_backup:
|
||||
hostname: '{{ esxi_hostname }}'
|
||||
username: '{{ esxi_username }}'
|
||||
password: '{{ esxi_password }}'
|
||||
state: saved
|
||||
dest: /tmp/
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Save the ESXi configuration locally by authenticating against the vCenter and selecting the ESXi host
|
||||
vmware_cfg_backup:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
username: '{{ esxi_username }}'
|
||||
password: '{{ esxi_password }}'
|
||||
state: saved
|
||||
dest: /tmp/
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
dest_file:
|
||||
description: The full path of where the file holding the ESXi configurations was stored
|
||||
returned: changed
|
||||
type: str
|
||||
sample: /tmp/configBundle-esxi.host.domain.tgz
|
||||
'''
|
||||
|
||||
import os
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, get_all_objs, wait_for_task, PyVmomi
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VMwareConfigurationBackup(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareConfigurationBackup, self).__init__(module)
|
||||
self.state = self.module.params['state']
|
||||
self.dest = self.module.params['dest']
|
||||
self.src = self.module.params['src']
|
||||
self.hostname = self.module.params['hostname']
|
||||
self.username = self.module.params['username']
|
||||
self.password = self.module.params['password']
|
||||
self.validate_certs = self.module.params['validate_certs']
|
||||
self.esxi_hostname = self.module.params.get('esxi_hostname', None)
|
||||
self.host = self.find_host_system()
|
||||
|
||||
def find_host_system(self):
|
||||
if self.esxi_hostname:
|
||||
host_system_obj = self.find_hostsystem_by_name(host_name=self.esxi_hostname)
|
||||
if host_system_obj:
|
||||
return host_system_obj
|
||||
else:
|
||||
self.module.fail_json(msg="Failed to find ESXi %s" % self.esxi_hostname)
|
||||
|
||||
host_system = get_all_objs(self.content, [vim.HostSystem])
|
||||
return list(host_system)[0]
|
||||
|
||||
def process_state(self):
|
||||
if self.state == 'saved':
|
||||
self.save_configuration()
|
||||
|
||||
if self.state == 'absent':
|
||||
self.reset_configuration()
|
||||
|
||||
if self.state == 'loaded':
|
||||
self.load_configuration()
|
||||
|
||||
def load_configuration(self):
|
||||
if not os.path.isfile(self.src):
|
||||
self.module.fail_json(msg="Source file {0} does not exist".format(self.src))
|
||||
|
||||
url = self.host.configManager.firmwareSystem.QueryFirmwareConfigUploadURL()
|
||||
url = url.replace('*', self.host.name)
|
||||
# find manually the url if there is a redirect because urllib2 -per RFC- doesn't do automatic redirects for PUT requests
|
||||
try:
|
||||
request = open_url(url=url, method='HEAD', validate_certs=self.validate_certs)
|
||||
except HTTPError as e:
|
||||
url = e.geturl()
|
||||
|
||||
try:
|
||||
with open(self.src, 'rb') as file:
|
||||
data = file.read()
|
||||
request = open_url(url=url, data=data, method='PUT', validate_certs=self.validate_certs,
|
||||
url_username=self.username, url_password=self.password, force_basic_auth=True)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg=to_native(e))
|
||||
|
||||
if not self.host.runtime.inMaintenanceMode:
|
||||
self.enter_maintenance()
|
||||
try:
|
||||
self.host.configManager.firmwareSystem.RestoreFirmwareConfiguration(force=True)
|
||||
self.module.exit_json(changed=True)
|
||||
except Exception as e:
|
||||
self.exit_maintenance()
|
||||
self.module.fail_json(msg=to_native(e))
|
||||
|
||||
def reset_configuration(self):
|
||||
if not self.host.runtime.inMaintenanceMode:
|
||||
self.enter_maintenance()
|
||||
try:
|
||||
self.host.configManager.firmwareSystem.ResetFirmwareToFactoryDefaults()
|
||||
self.module.exit_json(changed=True)
|
||||
except Exception as e:
|
||||
self.exit_maintenance()
|
||||
self.module.fail_json(msg=to_native(e))
|
||||
|
||||
def save_configuration(self):
|
||||
url = self.host.configManager.firmwareSystem.BackupFirmwareConfiguration()
|
||||
url = url.replace('*', self.host.name)
|
||||
if os.path.isdir(self.dest):
|
||||
filename = url.rsplit('/', 1)[1]
|
||||
self.dest = os.path.join(self.dest, filename)
|
||||
else:
|
||||
filename, file_extension = os.path.splitext(self.dest)
|
||||
if file_extension != ".tgz":
|
||||
self.dest = filename + ".tgz"
|
||||
try:
|
||||
request = open_url(url=url, validate_certs=self.validate_certs)
|
||||
with open(self.dest, "wb") as file:
|
||||
file.write(request.read())
|
||||
self.module.exit_json(changed=True, dest_file=self.dest)
|
||||
except IOError as e:
|
||||
self.module.fail_json(msg="Failed to write backup file. Ensure that "
|
||||
"the dest path exists and is writable. Details : %s" % to_native(e))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg=to_native(e))
|
||||
|
||||
def enter_maintenance(self):
|
||||
try:
|
||||
task = self.host.EnterMaintenanceMode_Task(timeout=15)
|
||||
success, result = wait_for_task(task)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to enter maintenance mode."
|
||||
" Ensure that there are no powered on machines on the host. %s" % to_native(e))
|
||||
|
||||
def exit_maintenance(self):
|
||||
try:
|
||||
task = self.host.ExitMaintenanceMode_Task(timeout=15)
|
||||
success, result = wait_for_task(task)
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to exit maintenance mode due to %s" % to_native(generic_exc))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(dest=dict(required=False, type='path'),
|
||||
esxi_hostname=dict(required=False, type='str'),
|
||||
src=dict(required=False, type='path'),
|
||||
state=dict(required=True, choices=['saved', 'absent', 'loaded'], type='str')))
|
||||
required_if = [('state', 'saved', ['dest']),
|
||||
('state', 'loaded', ['src'])]
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
required_if=required_if,
|
||||
supports_check_mode=False)
|
||||
|
||||
vmware_cfg_backup = VMwareConfigurationBackup(module)
|
||||
vmware_cfg_backup.process_state()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,590 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_cluster
|
||||
short_description: Manage VMware vSphere clusters
|
||||
description:
|
||||
- Adds or removes VMware vSphere clusters.
|
||||
- Although this module can manage DRS, HA and VSAN related configurations, this functionality is deprecated and will be removed in 2.12.
|
||||
- To manage DRS, HA and VSAN related configurations, use the new modules vmware_cluster_drs, vmware_cluster_ha and vmware_cluster_vsan.
|
||||
- All values and VMware object names are case sensitive.
|
||||
version_added: '2.0'
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
requirements:
|
||||
- Tested on ESXi 5.5 and 6.5.
|
||||
- PyVmomi installed.
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- The name of the cluster to be managed.
|
||||
type: str
|
||||
required: yes
|
||||
datacenter:
|
||||
description:
|
||||
- The name of the datacenter.
|
||||
type: str
|
||||
required: yes
|
||||
aliases: [ datacenter_name ]
|
||||
ignore_drs:
|
||||
description:
|
||||
- If set to C(yes), DRS will not be configured; all explicit and default DRS related configurations will be ignored.
|
||||
type: bool
|
||||
default: 'no'
|
||||
version_added: 2.9
|
||||
ignore_ha:
|
||||
description:
|
||||
- If set to C(yes), HA will not be configured; all explicit and default HA related configurations will be ignored.
|
||||
type: bool
|
||||
default: 'no'
|
||||
version_added: 2.9
|
||||
ignore_vsan:
|
||||
description:
|
||||
- If set to C(yes), VSAN will not be configured; all explicit and default VSAN related configurations will be ignored.
|
||||
type: bool
|
||||
default: 'no'
|
||||
version_added: 2.9
|
||||
enable_drs:
|
||||
description:
|
||||
- If set to C(yes), will enable DRS when the cluster is created.
|
||||
type: bool
|
||||
default: 'no'
|
||||
drs_enable_vm_behavior_overrides:
|
||||
description:
|
||||
- Determines whether DRS Behavior overrides for individual virtual machines are enabled.
|
||||
- If set to C(True), overrides C(drs_default_vm_behavior).
|
||||
type: bool
|
||||
default: True
|
||||
version_added: 2.8
|
||||
drs_default_vm_behavior:
|
||||
description:
|
||||
- Specifies the cluster-wide default DRS behavior for virtual machines.
|
||||
- If set to C(partiallyAutomated), then vCenter generate recommendations for virtual machine migration and
|
||||
for the placement with a host. vCenter automatically implement placement at power on.
|
||||
- If set to C(manual), then vCenter generate recommendations for virtual machine migration and
|
||||
for the placement with a host. vCenter should not implement the recommendations automatically.
|
||||
- If set to C(fullyAutomated), then vCenter should automate both the migration of virtual machines
|
||||
and their placement with a host at power on.
|
||||
default: fullyAutomated
|
||||
choices: [ fullyAutomated, manual, partiallyAutomated ]
|
||||
version_added: 2.8
|
||||
drs_vmotion_rate:
|
||||
description:
|
||||
- Threshold for generated ClusterRecommendations.
|
||||
default: 3
|
||||
choices: [ 1, 2, 3, 4, 5 ]
|
||||
version_added: 2.8
|
||||
enable_ha:
|
||||
description:
|
||||
- If set to C(yes) will enable HA when the cluster is created.
|
||||
type: bool
|
||||
default: 'no'
|
||||
ha_host_monitoring:
|
||||
description:
|
||||
- Indicates whether HA restarts virtual machines after a host fails.
|
||||
- If set to C(enabled), HA restarts virtual machines after a host fails.
|
||||
- If set to C(disabled), HA does not restart virtual machines after a host fails.
|
||||
- If C(enable_ha) is set to C(no), then this value is ignored.
|
||||
choices: [ 'enabled', 'disabled' ]
|
||||
default: 'enabled'
|
||||
version_added: 2.8
|
||||
ha_vm_monitoring:
|
||||
description:
|
||||
- Indicates the state of virtual machine health monitoring service.
|
||||
- If set to C(vmAndAppMonitoring), HA response to both virtual machine and application heartbeat failure.
|
||||
- If set to C(vmMonitoringDisabled), virtual machine health monitoring is disabled.
|
||||
- If set to C(vmMonitoringOnly), HA response to virtual machine heartbeat failure.
|
||||
- If C(enable_ha) is set to C(no), then this value is ignored.
|
||||
choices: ['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled']
|
||||
default: 'vmMonitoringDisabled'
|
||||
version_added: 2.8
|
||||
ha_failover_level:
|
||||
description:
|
||||
- Number of host failures that should be tolerated, still guaranteeing sufficient resources to
|
||||
restart virtual machines on available hosts.
|
||||
- Accepts integer values only.
|
||||
default: 2
|
||||
version_added: 2.8
|
||||
ha_admission_control_enabled:
|
||||
description:
|
||||
- Determines if strict admission control is enabled.
|
||||
- It is recommended to set this parameter to C(True), please refer documentation for more details.
|
||||
default: True
|
||||
type: bool
|
||||
version_added: 2.8
|
||||
ha_vm_failure_interval:
|
||||
description:
|
||||
- The number of seconds after which virtual machine is declared as failed
|
||||
if no heartbeat has been received.
|
||||
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
- Unit is seconds.
|
||||
default: 30
|
||||
version_added: 2.8
|
||||
ha_vm_min_up_time:
|
||||
description:
|
||||
- The number of seconds for the virtual machine's heartbeats to stabilize after
|
||||
the virtual machine has been powered on.
|
||||
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
- Unit is seconds.
|
||||
default: 120
|
||||
version_added: 2.8
|
||||
ha_vm_max_failures:
|
||||
description:
|
||||
- Maximum number of failures and automated resets allowed during the time
|
||||
that C(ha_vm_max_failure_window) specifies.
|
||||
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
default: 3
|
||||
version_added: 2.8
|
||||
ha_vm_max_failure_window:
|
||||
description:
|
||||
- The number of seconds for the window during which up to C(ha_vm_max_failures) resets
|
||||
can occur before automated responses stop.
|
||||
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
- Unit is seconds.
|
||||
- Default specifies no failure window.
|
||||
default: -1
|
||||
version_added: 2.8
|
||||
ha_restart_priority:
|
||||
description:
|
||||
- Determines the preference that HA gives to a virtual machine if sufficient capacity is not available
|
||||
to power on all failed virtual machines.
|
||||
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
- If set to C(disabled), then HA is disabled for this virtual machine.
|
||||
- If set to C(high), then virtual machine with this priority have a higher chance of powering on after a failure,
|
||||
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||
- If set to C(medium), then virtual machine with this priority have an intermediate chance of powering on after a failure,
|
||||
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||
- If set to C(low), then virtual machine with this priority have a lower chance of powering on after a failure,
|
||||
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||
default: 'medium'
|
||||
version_added: 2.8
|
||||
choices: [ 'disabled', 'high', 'low', 'medium' ]
|
||||
enable_vsan:
|
||||
description:
|
||||
- If set to C(yes) will enable vSAN when the cluster is created.
|
||||
type: bool
|
||||
default: 'no'
|
||||
vsan_auto_claim_storage:
|
||||
description:
|
||||
- Determines whether the VSAN service is configured to automatically claim local storage
|
||||
on VSAN-enabled hosts in the cluster.
|
||||
type: bool
|
||||
default: False
|
||||
version_added: 2.8
|
||||
state:
|
||||
description:
|
||||
- Create C(present) or remove C(absent) a VMware vSphere cluster.
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Create Cluster
|
||||
vmware_cluster:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter
|
||||
cluster_name: cluster
|
||||
enable_ha: yes
|
||||
enable_drs: yes
|
||||
enable_vsan: yes
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create Cluster with additional changes
|
||||
vmware_cluster:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
validate_certs: no
|
||||
datacenter_name: DC0
|
||||
cluster_name: "{{ cluster_name }}"
|
||||
enable_ha: True
|
||||
ha_vm_monitoring: vmMonitoringOnly
|
||||
enable_drs: True
|
||||
drs_default_vm_behavior: partiallyAutomated
|
||||
enable_vsan: True
|
||||
register: cl_result
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete Cluster
|
||||
vmware_cluster:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
datacenter_name: datacenter
|
||||
cluster_name: cluster
|
||||
enable_ha: yes
|
||||
enable_drs: yes
|
||||
enable_vsan: yes
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
"""
|
||||
|
||||
RETURN = r"""#
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name,
|
||||
vmware_argument_spec, wait_for_task)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VMwareCluster(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareCluster, self).__init__(module)
|
||||
self.cluster_name = module.params['cluster_name']
|
||||
self.datacenter_name = module.params['datacenter']
|
||||
self.ignore_drs = module.params['ignore_drs']
|
||||
self.ignore_ha = module.params['ignore_ha']
|
||||
self.ignore_vsan = module.params['ignore_vsan']
|
||||
self.enable_drs = module.params['enable_drs']
|
||||
self.enable_ha = module.params['enable_ha']
|
||||
self.enable_vsan = module.params['enable_vsan']
|
||||
self.desired_state = module.params['state']
|
||||
self.datacenter = None
|
||||
self.cluster = None
|
||||
|
||||
def process_state(self):
|
||||
"""
|
||||
Manage internal states of cluster
|
||||
"""
|
||||
cluster_states = {
|
||||
'absent': {
|
||||
'present': self.state_destroy_cluster,
|
||||
'absent': self.state_exit_unchanged,
|
||||
},
|
||||
'present': {
|
||||
'present': self.state_update_cluster,
|
||||
'absent': self.state_create_cluster,
|
||||
}
|
||||
}
|
||||
current_state = self.check_cluster_configuration()
|
||||
# Based on the desired_state and the current_state call
|
||||
# the appropriate method from the dictionary
|
||||
cluster_states[self.desired_state][current_state]()
|
||||
|
||||
def configure_ha(self):
|
||||
"""
|
||||
Manage HA Configuration
|
||||
Returns: Cluster DAS configuration spec
|
||||
|
||||
"""
|
||||
msg = 'Configuring HA using vmware_cluster module is deprecated and will be removed in version 2.12. ' \
|
||||
'Please use vmware_cluster_ha module for the new functionality.'
|
||||
self.module.deprecate(msg, '2.12')
|
||||
|
||||
das_config = vim.cluster.DasConfigInfo()
|
||||
das_config.enabled = self.enable_ha
|
||||
das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
|
||||
das_config.admissionControlPolicy.failoverLevel = self.params.get('ha_failover_level')
|
||||
|
||||
ha_vm_monitoring = self.params.get('ha_vm_monitoring')
|
||||
das_vm_config = None
|
||||
if ha_vm_monitoring in ['vmMonitoringOnly', 'vmAndAppMonitoring']:
|
||||
vm_tool_spec = vim.cluster.VmToolsMonitoringSettings()
|
||||
vm_tool_spec.enabled = True
|
||||
vm_tool_spec.vmMonitoring = ha_vm_monitoring
|
||||
vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval')
|
||||
vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time')
|
||||
vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures')
|
||||
vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window')
|
||||
|
||||
das_vm_config = vim.cluster.DasVmSettings()
|
||||
das_vm_config.restartPriority = self.params.get('ha_restart_priority')
|
||||
das_vm_config.isolationResponse = None
|
||||
das_vm_config.vmToolsMonitoringSettings = vm_tool_spec
|
||||
|
||||
das_config.admissionControlEnabled = self.params.get('ha_admission_control_enabled')
|
||||
|
||||
das_config.hostMonitoring = self.params.get('ha_host_monitoring')
|
||||
das_config.vmMonitoring = ha_vm_monitoring
|
||||
das_config.defaultVmSettings = das_vm_config
|
||||
|
||||
return das_config
|
||||
|
||||
def configure_drs(self):
|
||||
"""
|
||||
Manage DRS configuration
|
||||
Returns: Cluster DRS configuration spec
|
||||
|
||||
"""
|
||||
msg = 'Configuring DRS using vmware_cluster module is deprecated and will be removed in version 2.12. ' \
|
||||
'Please use vmware_cluster_drs module for the new functionality.'
|
||||
self.module.deprecate(msg, '2.12')
|
||||
|
||||
drs_config = vim.cluster.DrsConfigInfo()
|
||||
|
||||
drs_config.enabled = self.enable_drs
|
||||
drs_config.enableVmBehaviorOverrides = self.params.get('drs_enable_vm_behavior_overrides')
|
||||
drs_config.defaultVmBehavior = self.params.get('drs_default_vm_behavior')
|
||||
drs_config.vmotionRate = self.params.get('drs_vmotion_rate')
|
||||
|
||||
return drs_config
|
||||
|
||||
def configure_vsan(self):
|
||||
"""
|
||||
Manage VSAN configuration
|
||||
Returns: Cluster VSAN configuration spec
|
||||
|
||||
"""
|
||||
msg = 'Configuring VSAN using vmware_cluster module is deprecated and will be removed in version 2.12. ' \
|
||||
'Please use vmware_cluster_vsan module for the new functionality.'
|
||||
self.module.deprecate(msg, '2.12')
|
||||
|
||||
vsan_config = vim.vsan.cluster.ConfigInfo()
|
||||
vsan_config.enabled = self.enable_vsan
|
||||
vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
|
||||
vsan_config.defaultConfig.autoClaimStorage = self.params.get('vsan_auto_claim_storage')
|
||||
return vsan_config
|
||||
|
||||
def state_create_cluster(self):
|
||||
"""
|
||||
Create cluster with given configuration
|
||||
"""
|
||||
try:
|
||||
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
||||
if not self.ignore_ha:
|
||||
cluster_config_spec.dasConfig = self.configure_ha()
|
||||
if not self.ignore_drs:
|
||||
cluster_config_spec.drsConfig = self.configure_drs()
|
||||
if self.enable_vsan and not self.ignore_vsan:
|
||||
cluster_config_spec.vsanConfig = self.configure_vsan()
|
||||
if not self.module.check_mode:
|
||||
self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
|
||||
self.module.exit_json(changed=True)
|
||||
except vim.fault.DuplicateName:
|
||||
# To match other vmware_* modules
|
||||
pass
|
||||
except vmodl.fault.InvalidArgument as invalid_args:
|
||||
self.module.fail_json(msg="Cluster configuration specification"
|
||||
" parameter is invalid : %s" % to_native(invalid_args.msg))
|
||||
except vim.fault.InvalidName as invalid_name:
|
||||
self.module.fail_json(msg="'%s' is an invalid name for a"
|
||||
" cluster : %s" % (self.cluster_name,
|
||||
to_native(invalid_name.msg)))
|
||||
except vmodl.fault.NotSupported as not_supported:
|
||||
# This should never happen
|
||||
self.module.fail_json(msg="Trying to create a cluster on an incorrect"
|
||||
" folder object : %s" % to_native(not_supported.msg))
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
# This should never happen either
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to create cluster"
|
||||
" due to generic exception %s" % to_native(generic_exc))
|
||||
|
||||
def state_destroy_cluster(self):
|
||||
"""
|
||||
Destroy cluster
|
||||
"""
|
||||
changed, result = True, None
|
||||
|
||||
try:
|
||||
if not self.module.check_mode:
|
||||
task = self.cluster.Destroy_Task()
|
||||
changed, result = wait_for_task(task)
|
||||
self.module.exit_json(changed=changed, result=result)
|
||||
except vim.fault.VimFault as vim_fault:
|
||||
self.module.fail_json(msg=to_native(vim_fault.msg))
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to destroy cluster"
|
||||
" due to generic exception %s" % to_native(generic_exc))
|
||||
|
||||
def state_exit_unchanged(self):
|
||||
"""
|
||||
Exit without any change
|
||||
"""
|
||||
self.module.exit_json(changed=False)
|
||||
|
||||
def state_update_cluster(self):
|
||||
"""
|
||||
Update cluster configuration of existing cluster
|
||||
"""
|
||||
changed, result = False, None
|
||||
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
||||
diff = False # Triggers Reconfigure Task only when there is a change
|
||||
if self.check_ha_config_diff() and not self.ignore_ha:
|
||||
cluster_config_spec.dasConfig = self.configure_ha()
|
||||
diff = True
|
||||
if self.check_drs_config_diff() and not self.ignore_drs:
|
||||
cluster_config_spec.drsConfig = self.configure_drs()
|
||||
diff = True
|
||||
if self.check_vsan_config_diff() and not self.ignore_vsan:
|
||||
cluster_config_spec.vsanConfig = self.configure_vsan()
|
||||
diff = True
|
||||
|
||||
try:
|
||||
if not self.module.check_mode and diff:
|
||||
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
|
||||
changed, result = wait_for_task(task)
|
||||
self.module.exit_json(changed=changed, result=result)
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except TaskError as task_e:
|
||||
self.module.fail_json(msg=to_native(task_e))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to update cluster"
|
||||
" due to generic exception %s" % to_native(generic_exc))
|
||||
|
||||
def check_ha_config_diff(self):
|
||||
"""
|
||||
Check HA configuration diff
|
||||
Returns: True if there is diff, else False
|
||||
|
||||
"""
|
||||
das_config = self.cluster.configurationEx.dasConfig
|
||||
if das_config.enabled != self.enable_ha or \
|
||||
das_config.admissionControlPolicy.failoverLevel != self.params.get('ha_failover_level') or \
|
||||
das_config.vmMonitoring != self.params.get('ha_vm_monitoring') or \
|
||||
das_config.hostMonitoring != self.params.get('ha_host_monitoring') or \
|
||||
das_config.admissionControlPolicy.failoverLevel != self.params.get('ha_failover_level') or \
|
||||
das_config.admissionControlEnabled != self.params.get('ha_admission_control_enabled') or \
|
||||
das_config.defaultVmSettings.restartPriority != self.params.get('ha_restart_priority') or \
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring != self.params.get('ha_vm_monitoring') or \
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval != self.params.get('ha_vm_failure_interval') or \
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime != self.params.get('ha_vm_min_up_time') or \
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures != self.params.get('ha_vm_max_failures') or \
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow != self.params.get('ha_vm_max_failure_window'):
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_drs_config_diff(self):
|
||||
"""
|
||||
Check DRS configuration diff
|
||||
Returns: True if there is diff, else False
|
||||
|
||||
"""
|
||||
drs_config = self.cluster.configurationEx.drsConfig
|
||||
|
||||
if drs_config.enabled != self.enable_drs or \
|
||||
drs_config.enableVmBehaviorOverrides != self.params.get('drs_enable_vm_behavior_overrides') or \
|
||||
drs_config.defaultVmBehavior != self.params.get('drs_default_vm_behavior') or \
|
||||
drs_config.vmotionRate != self.params.get('drs_vmotion_rate'):
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_vsan_config_diff(self):
|
||||
"""
|
||||
Check VSAN configuration diff
|
||||
Returns: True if there is diff, else False
|
||||
|
||||
"""
|
||||
vsan_config = self.cluster.configurationEx.vsanConfigInfo
|
||||
|
||||
if vsan_config.enabled != self.enable_vsan or \
|
||||
vsan_config.defaultConfig.autoClaimStorage != self.params.get('vsan_auto_claim_storage'):
|
||||
return True
|
||||
return False
|
||||
|
||||
def check_cluster_configuration(self):
|
||||
"""
|
||||
Check cluster configuration
|
||||
Returns: 'Present' if cluster exists, else 'absent'
|
||||
|
||||
"""
|
||||
try:
|
||||
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
|
||||
if self.datacenter is None:
|
||||
self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
|
||||
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name)
|
||||
|
||||
if self.cluster is None:
|
||||
return 'absent'
|
||||
|
||||
return 'present'
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to check configuration"
|
||||
" due to generic exception %s" % to_native(generic_exc))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
cluster_name=dict(type='str', required=True),
|
||||
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
|
||||
state=dict(type='str',
|
||||
default='present',
|
||||
choices=['absent', 'present']),
|
||||
# DRS
|
||||
ignore_drs=dict(type='bool', default=False),
|
||||
enable_drs=dict(type='bool', default=False),
|
||||
drs_enable_vm_behavior_overrides=dict(type='bool', default=True),
|
||||
drs_default_vm_behavior=dict(type='str',
|
||||
choices=['fullyAutomated', 'manual', 'partiallyAutomated'],
|
||||
default='fullyAutomated'),
|
||||
drs_vmotion_rate=dict(type='int',
|
||||
choices=range(1, 6),
|
||||
default=3),
|
||||
# HA
|
||||
ignore_ha=dict(type='bool', default=False),
|
||||
enable_ha=dict(type='bool', default=False),
|
||||
ha_failover_level=dict(type='int', default=2),
|
||||
ha_host_monitoring=dict(type='str',
|
||||
default='enabled',
|
||||
choices=['enabled', 'disabled']),
|
||||
# HA VM Monitoring related parameters
|
||||
ha_vm_monitoring=dict(type='str',
|
||||
choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'],
|
||||
default='vmMonitoringDisabled'),
|
||||
ha_vm_failure_interval=dict(type='int', default=30),
|
||||
ha_vm_min_up_time=dict(type='int', default=120),
|
||||
ha_vm_max_failures=dict(type='int', default=3),
|
||||
ha_vm_max_failure_window=dict(type='int', default=-1),
|
||||
|
||||
ha_restart_priority=dict(type='str',
|
||||
choices=['high', 'low', 'medium', 'disabled'],
|
||||
default='medium'),
|
||||
ha_admission_control_enabled=dict(type='bool', default=True),
|
||||
# VSAN
|
||||
ignore_vsan=dict(type='bool', default=False),
|
||||
enable_vsan=dict(type='bool', default=False),
|
||||
vsan_auto_claim_storage=dict(type='bool', default=False),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_cluster = VMwareCluster(module)
|
||||
vmware_cluster.process_state()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,238 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_cluster_drs
|
||||
short_description: Manage Distributed Resource Scheduler (DRS) on VMware vSphere clusters
|
||||
description:
|
||||
- Manages DRS on VMware vSphere clusters.
|
||||
- All values and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
requirements:
|
||||
- Tested on ESXi 5.5 and 6.5.
|
||||
- PyVmomi installed.
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- The name of the cluster to be managed.
|
||||
type: str
|
||||
required: yes
|
||||
datacenter:
|
||||
description:
|
||||
- The name of the datacenter.
|
||||
type: str
|
||||
required: yes
|
||||
aliases: [ datacenter_name ]
|
||||
enable_drs:
|
||||
description:
|
||||
- Whether to enable DRS.
|
||||
type: bool
|
||||
default: 'no'
|
||||
drs_enable_vm_behavior_overrides:
|
||||
description:
|
||||
- Whether DRS Behavior overrides for individual virtual machines are enabled.
|
||||
- If set to C(True), overrides C(drs_default_vm_behavior).
|
||||
type: bool
|
||||
default: True
|
||||
drs_default_vm_behavior:
|
||||
description:
|
||||
- Specifies the cluster-wide default DRS behavior for virtual machines.
|
||||
- If set to C(partiallyAutomated), vCenter generates recommendations for virtual machine migration and
|
||||
for the placement with a host, then automatically implements placement recommendations at power on.
|
||||
- If set to C(manual), then vCenter generates recommendations for virtual machine migration and
|
||||
for the placement with a host, but does not implement the recommendations automatically.
|
||||
- If set to C(fullyAutomated), then vCenter automates both the migration of virtual machines
|
||||
and their placement with a host at power on.
|
||||
type: str
|
||||
default: fullyAutomated
|
||||
choices: [ fullyAutomated, manual, partiallyAutomated ]
|
||||
drs_vmotion_rate:
|
||||
description:
|
||||
- Threshold for generated ClusterRecommendations.
|
||||
type: int
|
||||
default: 3
|
||||
choices: [ 1, 2, 3, 4, 5 ]
|
||||
advanced_settings:
|
||||
version_added: "2.10"
|
||||
description:
|
||||
- A dictionary of advanced DRS settings.
|
||||
default: {}
|
||||
type: dict
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Enable DRS
|
||||
vmware_cluster_drs:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter
|
||||
cluster_name: cluster
|
||||
enable_drs: yes
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable DRS and distribute a more even number of virtual machines across hosts for availability
|
||||
vmware_cluster_drs:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter
|
||||
cluster_name: cluster
|
||||
enable_drs: yes
|
||||
advanced_settings:
|
||||
'TryBalanceVmsPerHost': '1'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable DRS and set default VM behavior to partially automated
|
||||
vmware_cluster_drs:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter_name: DC0
|
||||
cluster_name: "{{ cluster_name }}"
|
||||
enable_drs: True
|
||||
drs_default_vm_behavior: partiallyAutomated
|
||||
delegate_to: localhost
|
||||
"""
|
||||
|
||||
RETURN = r"""#
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name,
|
||||
vmware_argument_spec, wait_for_task, option_diff)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VMwareCluster(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareCluster, self).__init__(module)
|
||||
self.cluster_name = module.params['cluster_name']
|
||||
self.datacenter_name = module.params['datacenter']
|
||||
self.enable_drs = module.params['enable_drs']
|
||||
self.datacenter = None
|
||||
self.cluster = None
|
||||
|
||||
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
|
||||
if self.datacenter is None:
|
||||
self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
|
||||
|
||||
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name)
|
||||
if self.cluster is None:
|
||||
self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
|
||||
|
||||
self.advanced_settings = self.params.get('advanced_settings')
|
||||
if self.advanced_settings:
|
||||
self.changed_advanced_settings = option_diff(self.advanced_settings, self.cluster.configurationEx.drsConfig.option)
|
||||
else:
|
||||
self.changed_advanced_settings = None
|
||||
|
||||
def check_drs_config_diff(self):
|
||||
"""
|
||||
Check DRS configuration diff
|
||||
Returns: True if there is diff, else False
|
||||
|
||||
"""
|
||||
drs_config = self.cluster.configurationEx.drsConfig
|
||||
|
||||
if drs_config.enabled != self.enable_drs or \
|
||||
drs_config.enableVmBehaviorOverrides != self.params.get('drs_enable_vm_behavior_overrides') or \
|
||||
drs_config.defaultVmBehavior != self.params.get('drs_default_vm_behavior') or \
|
||||
drs_config.vmotionRate != self.params.get('drs_vmotion_rate'):
|
||||
return True
|
||||
|
||||
if self.changed_advanced_settings:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def configure_drs(self):
|
||||
"""
|
||||
Manage DRS configuration
|
||||
|
||||
"""
|
||||
changed, result = False, None
|
||||
|
||||
if self.check_drs_config_diff():
|
||||
if not self.module.check_mode:
|
||||
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
||||
cluster_config_spec.drsConfig = vim.cluster.DrsConfigInfo()
|
||||
cluster_config_spec.drsConfig.enabled = self.enable_drs
|
||||
cluster_config_spec.drsConfig.enableVmBehaviorOverrides = self.params.get('drs_enable_vm_behavior_overrides')
|
||||
cluster_config_spec.drsConfig.defaultVmBehavior = self.params.get('drs_default_vm_behavior')
|
||||
cluster_config_spec.drsConfig.vmotionRate = self.params.get('drs_vmotion_rate')
|
||||
|
||||
if self.changed_advanced_settings:
|
||||
cluster_config_spec.drsConfig.option = self.changed_advanced_settings
|
||||
|
||||
try:
|
||||
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
|
||||
changed, result = wait_for_task(task)
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except TaskError as task_e:
|
||||
self.module.fail_json(msg=to_native(task_e))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to update cluster"
|
||||
" due to generic exception %s" % to_native(generic_exc))
|
||||
else:
|
||||
changed = True
|
||||
|
||||
self.module.exit_json(changed=changed, result=result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
cluster_name=dict(type='str', required=True),
|
||||
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
|
||||
# DRS
|
||||
enable_drs=dict(type='bool', default=False),
|
||||
drs_enable_vm_behavior_overrides=dict(type='bool', default=True),
|
||||
drs_default_vm_behavior=dict(type='str',
|
||||
choices=['fullyAutomated', 'manual', 'partiallyAutomated'],
|
||||
default='fullyAutomated'),
|
||||
drs_vmotion_rate=dict(type='int',
|
||||
choices=range(1, 6),
|
||||
default=3),
|
||||
advanced_settings=dict(type='dict', default=dict(), required=False),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_cluster_drs = VMwareCluster(module)
|
||||
vmware_cluster_drs.configure_drs()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,469 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_cluster_ha
|
||||
short_description: Manage High Availability (HA) on VMware vSphere clusters
|
||||
description:
|
||||
- Manages HA configuration on VMware vSphere clusters.
|
||||
- All values and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
requirements:
|
||||
- Tested on ESXi 5.5 and 6.5.
|
||||
- PyVmomi installed.
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- The name of the cluster to be managed.
|
||||
type: str
|
||||
required: yes
|
||||
datacenter:
|
||||
description:
|
||||
- The name of the datacenter.
|
||||
type: str
|
||||
required: yes
|
||||
aliases: [ datacenter_name ]
|
||||
enable_ha:
|
||||
description:
|
||||
- Whether to enable HA.
|
||||
type: bool
|
||||
default: 'no'
|
||||
ha_host_monitoring:
|
||||
description:
|
||||
- Whether HA restarts virtual machines after a host fails.
|
||||
- If set to C(enabled), HA restarts virtual machines after a host fails.
|
||||
- If set to C(disabled), HA does not restart virtual machines after a host fails.
|
||||
- If C(enable_ha) is set to C(no), then this value is ignored.
|
||||
type: str
|
||||
choices: [ 'enabled', 'disabled' ]
|
||||
default: 'enabled'
|
||||
ha_vm_monitoring:
|
||||
description:
|
||||
- State of virtual machine health monitoring service.
|
||||
- If set to C(vmAndAppMonitoring), HA response to both virtual machine and application heartbeat failure.
|
||||
- If set to C(vmMonitoringDisabled), virtual machine health monitoring is disabled.
|
||||
- If set to C(vmMonitoringOnly), HA response to virtual machine heartbeat failure.
|
||||
- If C(enable_ha) is set to C(no), then this value is ignored.
|
||||
type: str
|
||||
choices: ['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled']
|
||||
default: 'vmMonitoringDisabled'
|
||||
host_isolation_response:
|
||||
description:
|
||||
- Indicates whether or VMs should be powered off if a host determines that it is isolated from the rest of the compute resource.
|
||||
- If set to C(none), do not power off VMs in the event of a host network isolation.
|
||||
- If set to C(powerOff), power off VMs in the event of a host network isolation.
|
||||
- If set to C(shutdown), shut down VMs guest operating system in the event of a host network isolation.
|
||||
type: str
|
||||
choices: ['none', 'powerOff', 'shutdown']
|
||||
default: 'none'
|
||||
slot_based_admission_control:
|
||||
description:
|
||||
- Configure slot based admission control policy.
|
||||
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
|
||||
suboptions:
|
||||
failover_level:
|
||||
description:
|
||||
- Number of host failures that should be tolerated.
|
||||
type: int
|
||||
required: true
|
||||
type: dict
|
||||
reservation_based_admission_control:
|
||||
description:
|
||||
- Configure reservation based admission control policy.
|
||||
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
|
||||
suboptions:
|
||||
failover_level:
|
||||
description:
|
||||
- Number of host failures that should be tolerated.
|
||||
type: int
|
||||
required: true
|
||||
auto_compute_percentages:
|
||||
description:
|
||||
- By default, C(failover_level) is used to calculate C(cpu_failover_resources_percent) and C(memory_failover_resources_percent).
|
||||
If a user wants to override the percentage values, he has to set this field to false.
|
||||
type: bool
|
||||
default: true
|
||||
cpu_failover_resources_percent:
|
||||
description:
|
||||
- Percentage of CPU resources in the cluster to reserve for failover.
|
||||
Ignored if C(auto_compute_percentages) is not set to false.
|
||||
type: int
|
||||
default: 50
|
||||
memory_failover_resources_percent:
|
||||
description:
|
||||
- Percentage of memory resources in the cluster to reserve for failover.
|
||||
Ignored if C(auto_compute_percentages) is not set to false.
|
||||
type: int
|
||||
default: 50
|
||||
type: dict
|
||||
failover_host_admission_control:
|
||||
description:
|
||||
- Configure dedicated failover hosts.
|
||||
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
|
||||
suboptions:
|
||||
failover_hosts:
|
||||
description:
|
||||
- List of dedicated failover hosts.
|
||||
type: list
|
||||
required: true
|
||||
type: dict
|
||||
ha_vm_failure_interval:
|
||||
description:
|
||||
- The number of seconds after which virtual machine is declared as failed
|
||||
if no heartbeat has been received.
|
||||
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
- Unit is seconds.
|
||||
type: int
|
||||
default: 30
|
||||
ha_vm_min_up_time:
|
||||
description:
|
||||
- The number of seconds for the virtual machine's heartbeats to stabilize after
|
||||
the virtual machine has been powered on.
|
||||
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
- Unit is seconds.
|
||||
type: int
|
||||
default: 120
|
||||
ha_vm_max_failures:
|
||||
description:
|
||||
- Maximum number of failures and automated resets allowed during the time
|
||||
that C(ha_vm_max_failure_window) specifies.
|
||||
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
type: int
|
||||
default: 3
|
||||
ha_vm_max_failure_window:
|
||||
description:
|
||||
- The number of seconds for the window during which up to C(ha_vm_max_failures) resets
|
||||
can occur before automated responses stop.
|
||||
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
- Unit is seconds.
|
||||
- Default specifies no failure window.
|
||||
type: int
|
||||
default: -1
|
||||
ha_restart_priority:
|
||||
description:
|
||||
- Priority HA gives to a virtual machine if sufficient capacity is not available
|
||||
to power on all failed virtual machines.
|
||||
- Valid only if I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
|
||||
- If set to C(disabled), then HA is disabled for this virtual machine.
|
||||
- If set to C(high), then virtual machine with this priority have a higher chance of powering on after a failure,
|
||||
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||
- If set to C(medium), then virtual machine with this priority have an intermediate chance of powering on after a failure,
|
||||
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||
- If set to C(low), then virtual machine with this priority have a lower chance of powering on after a failure,
|
||||
when there is insufficient capacity on hosts to meet all virtual machine needs.
|
||||
type: str
|
||||
default: 'medium'
|
||||
choices: [ 'disabled', 'high', 'low', 'medium' ]
|
||||
advanced_settings:
|
||||
version_added: "2.10"
|
||||
description:
|
||||
- A dictionary of advanced HA settings.
|
||||
default: {}
|
||||
type: dict
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Enable HA without admission control
|
||||
vmware_cluster_ha:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter
|
||||
cluster_name: cluster
|
||||
enable_ha: yes
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable HA and VM monitoring without admission control
|
||||
vmware_cluster_ha:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter_name: DC0
|
||||
cluster_name: "{{ cluster_name }}"
|
||||
enable_ha: True
|
||||
ha_vm_monitoring: vmMonitoringOnly
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable HA with admission control reserving 50% of resources for HA
|
||||
vmware_cluster_ha:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter
|
||||
cluster_name: cluster
|
||||
enable_ha: yes
|
||||
reservation_based_admission_control:
|
||||
auto_compute_percentages: False
|
||||
failover_level: 1
|
||||
cpu_failover_resources_percent: 50
|
||||
memory_failover_resources_percent: 50
|
||||
delegate_to: localhost
|
||||
"""
|
||||
|
||||
RETURN = r"""#
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name,
|
||||
vmware_argument_spec, wait_for_task, option_diff)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VMwareCluster(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareCluster, self).__init__(module)
|
||||
self.cluster_name = module.params['cluster_name']
|
||||
self.datacenter_name = module.params['datacenter']
|
||||
self.enable_ha = module.params['enable_ha']
|
||||
self.datacenter = None
|
||||
self.cluster = None
|
||||
self.host_isolation_response = getattr(vim.cluster.DasVmSettings.IsolationResponse, self.params.get('host_isolation_response'))
|
||||
|
||||
if self.enable_ha and (
|
||||
self.params.get('slot_based_admission_control') or
|
||||
self.params.get('reservation_based_admission_control') or
|
||||
self.params.get('failover_host_admission_control')):
|
||||
self.ha_admission_control = True
|
||||
else:
|
||||
self.ha_admission_control = False
|
||||
|
||||
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
|
||||
if self.datacenter is None:
|
||||
self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
|
||||
|
||||
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name)
|
||||
if self.cluster is None:
|
||||
self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
|
||||
|
||||
self.advanced_settings = self.params.get('advanced_settings')
|
||||
if self.advanced_settings:
|
||||
self.changed_advanced_settings = option_diff(self.advanced_settings, self.cluster.configurationEx.dasConfig.option)
|
||||
else:
|
||||
self.changed_advanced_settings = None
|
||||
|
||||
def get_failover_hosts(self):
|
||||
"""
|
||||
Get failover hosts for failover_host_admission_control policy
|
||||
Returns: List of ESXi hosts sorted by name
|
||||
|
||||
"""
|
||||
policy = self.params.get('failover_host_admission_control')
|
||||
hosts = []
|
||||
all_hosts = dict((h.name, h) for h in self.get_all_hosts_by_cluster(self.cluster_name))
|
||||
for host in policy.get('failover_hosts'):
|
||||
if host in all_hosts:
|
||||
hosts.append(all_hosts.get(host))
|
||||
else:
|
||||
self.module.fail_json(msg="Host %s is not a member of cluster %s." % (host, self.cluster_name))
|
||||
hosts.sort(key=lambda h: h.name)
|
||||
return hosts
|
||||
|
||||
def check_ha_config_diff(self):
|
||||
"""
|
||||
Check HA configuration diff
|
||||
Returns: True if there is diff, else False
|
||||
|
||||
"""
|
||||
das_config = self.cluster.configurationEx.dasConfig
|
||||
if das_config.enabled != self.enable_ha:
|
||||
return True
|
||||
|
||||
if self.enable_ha and (
|
||||
das_config.vmMonitoring != self.params.get('ha_vm_monitoring') or
|
||||
das_config.hostMonitoring != self.params.get('ha_host_monitoring') or
|
||||
das_config.admissionControlEnabled != self.ha_admission_control or
|
||||
das_config.defaultVmSettings.restartPriority != self.params.get('ha_restart_priority') or
|
||||
das_config.defaultVmSettings.isolationResponse != self.host_isolation_response or
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring != self.params.get('ha_vm_monitoring') or
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval != self.params.get('ha_vm_failure_interval') or
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime != self.params.get('ha_vm_min_up_time') or
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures != self.params.get('ha_vm_max_failures') or
|
||||
das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow != self.params.get('ha_vm_max_failure_window')):
|
||||
return True
|
||||
|
||||
if self.ha_admission_control:
|
||||
if self.params.get('slot_based_admission_control'):
|
||||
policy = self.params.get('slot_based_admission_control')
|
||||
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverLevelAdmissionControlPolicy) or \
|
||||
das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
|
||||
return True
|
||||
elif self.params.get('reservation_based_admission_control'):
|
||||
policy = self.params.get('reservation_based_admission_control')
|
||||
auto_compute_percentages = policy.get('auto_compute_percentages')
|
||||
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverResourcesAdmissionControlPolicy) or \
|
||||
das_config.admissionControlPolicy.autoComputePercentages != auto_compute_percentages or \
|
||||
das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
|
||||
return True
|
||||
if not auto_compute_percentages:
|
||||
if das_config.admissionControlPolicy.cpuFailoverResourcesPercent != policy.get('cpu_failover_resources_percent') or \
|
||||
das_config.admissionControlPolicy.memoryFailoverResourcesPercent != policy.get('memory_failover_resources_percent'):
|
||||
return True
|
||||
elif self.params.get('failover_host_admission_control'):
|
||||
policy = self.params.get('failover_host_admission_control')
|
||||
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverHostAdmissionControlPolicy):
|
||||
return True
|
||||
das_config.admissionControlPolicy.failoverHosts.sort(key=lambda h: h.name)
|
||||
if das_config.admissionControlPolicy.failoverHosts != self.get_failover_hosts():
|
||||
return True
|
||||
|
||||
if self.changed_advanced_settings:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def configure_ha(self):
|
||||
"""
|
||||
Manage HA Configuration
|
||||
|
||||
"""
|
||||
changed, result = False, None
|
||||
|
||||
if self.check_ha_config_diff():
|
||||
if not self.module.check_mode:
|
||||
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
||||
cluster_config_spec.dasConfig = vim.cluster.DasConfigInfo()
|
||||
cluster_config_spec.dasConfig.enabled = self.enable_ha
|
||||
|
||||
if self.enable_ha:
|
||||
vm_tool_spec = vim.cluster.VmToolsMonitoringSettings()
|
||||
vm_tool_spec.enabled = True
|
||||
vm_tool_spec.vmMonitoring = self.params.get('ha_vm_monitoring')
|
||||
vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval')
|
||||
vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time')
|
||||
vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures')
|
||||
vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window')
|
||||
|
||||
das_vm_config = vim.cluster.DasVmSettings()
|
||||
das_vm_config.restartPriority = self.params.get('ha_restart_priority')
|
||||
das_vm_config.isolationResponse = self.host_isolation_response
|
||||
das_vm_config.vmToolsMonitoringSettings = vm_tool_spec
|
||||
cluster_config_spec.dasConfig.defaultVmSettings = das_vm_config
|
||||
|
||||
cluster_config_spec.dasConfig.admissionControlEnabled = self.ha_admission_control
|
||||
|
||||
if self.ha_admission_control:
|
||||
if self.params.get('slot_based_admission_control'):
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
|
||||
policy = self.params.get('slot_based_admission_control')
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
|
||||
elif self.params.get('reservation_based_admission_control'):
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverResourcesAdmissionControlPolicy()
|
||||
policy = self.params.get('reservation_based_admission_control')
|
||||
auto_compute_percentages = policy.get('auto_compute_percentages')
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy.autoComputePercentages = auto_compute_percentages
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
|
||||
if not auto_compute_percentages:
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy.cpuFailoverResourcesPercent = \
|
||||
policy.get('cpu_failover_resources_percent')
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy.memoryFailoverResourcesPercent = \
|
||||
policy.get('memory_failover_resources_percent')
|
||||
elif self.params.get('failover_host_admission_control'):
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverHostAdmissionControlPolicy()
|
||||
policy = self.params.get('failover_host_admission_control')
|
||||
cluster_config_spec.dasConfig.admissionControlPolicy.failoverHosts = self.get_failover_hosts()
|
||||
|
||||
cluster_config_spec.dasConfig.hostMonitoring = self.params.get('ha_host_monitoring')
|
||||
cluster_config_spec.dasConfig.vmMonitoring = self.params.get('ha_vm_monitoring')
|
||||
|
||||
if self.changed_advanced_settings:
|
||||
cluster_config_spec.dasConfig.option = self.changed_advanced_settings
|
||||
|
||||
try:
|
||||
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
|
||||
changed, result = wait_for_task(task)
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except TaskError as task_e:
|
||||
self.module.fail_json(msg=to_native(task_e))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to update cluster"
|
||||
" due to generic exception %s" % to_native(generic_exc))
|
||||
else:
|
||||
changed = True
|
||||
|
||||
self.module.exit_json(changed=changed, result=result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
cluster_name=dict(type='str', required=True),
|
||||
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
|
||||
# HA
|
||||
enable_ha=dict(type='bool', default=False),
|
||||
ha_host_monitoring=dict(type='str',
|
||||
default='enabled',
|
||||
choices=['enabled', 'disabled']),
|
||||
host_isolation_response=dict(type='str',
|
||||
default='none',
|
||||
choices=['none', 'powerOff', 'shutdown']),
|
||||
advanced_settings=dict(type='dict', default=dict(), required=False),
|
||||
# HA VM Monitoring related parameters
|
||||
ha_vm_monitoring=dict(type='str',
|
||||
choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'],
|
||||
default='vmMonitoringDisabled'),
|
||||
ha_vm_failure_interval=dict(type='int', default=30),
|
||||
ha_vm_min_up_time=dict(type='int', default=120),
|
||||
ha_vm_max_failures=dict(type='int', default=3),
|
||||
ha_vm_max_failure_window=dict(type='int', default=-1),
|
||||
|
||||
ha_restart_priority=dict(type='str',
|
||||
choices=['high', 'low', 'medium', 'disabled'],
|
||||
default='medium'),
|
||||
# HA Admission Control related parameters
|
||||
slot_based_admission_control=dict(type='dict', options=dict(
|
||||
failover_level=dict(type='int', required=True),
|
||||
)),
|
||||
reservation_based_admission_control=dict(type='dict', options=dict(
|
||||
auto_compute_percentages=dict(type='bool', default=True),
|
||||
failover_level=dict(type='int', required=True),
|
||||
cpu_failover_resources_percent=dict(type='int', default=50),
|
||||
memory_failover_resources_percent=dict(type='int', default=50),
|
||||
)),
|
||||
failover_host_admission_control=dict(type='dict', options=dict(
|
||||
failover_hosts=dict(type='list', elements='str', required=True),
|
||||
)),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
['slot_based_admission_control', 'reservation_based_admission_control', 'failover_host_admission_control']
|
||||
]
|
||||
)
|
||||
|
||||
vmware_cluster_ha = VMwareCluster(module)
|
||||
vmware_cluster_ha.configure_ha()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,290 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_cluster_info
|
||||
short_description: Gather info about clusters available in given vCenter
|
||||
description:
|
||||
- This module can be used to gather information about clusters in VMWare infrastructure.
|
||||
- All values and VMware object names are case sensitive.
|
||||
- This module was called C(vmware_cluster_facts) before Ansible 2.9. The usage did not change.
|
||||
version_added: '2.6'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
- Christian Neugum (@digifuchsi)
|
||||
notes:
|
||||
- Tested on vSphere 6.5, 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter:
|
||||
description:
|
||||
- Datacenter to search for cluster/s.
|
||||
- This parameter is required, if C(cluster_name) is not supplied.
|
||||
required: False
|
||||
type: str
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- If set, information of this cluster will be returned.
|
||||
- This parameter is required, if C(datacenter) is not supplied.
|
||||
required: False
|
||||
type: str
|
||||
show_tag:
|
||||
description:
|
||||
- Tags related to cluster are shown if set to C(True).
|
||||
default: False
|
||||
type: bool
|
||||
version_added: 2.9
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather cluster info from given datacenter
|
||||
vmware_cluster_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: ha-datacenter
|
||||
validate_certs: no
|
||||
delegate_to: localhost
|
||||
register: cluster_info
|
||||
|
||||
- name: Gather info from datacenter about specific cluster
|
||||
vmware_cluster_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: DC0_C0
|
||||
delegate_to: localhost
|
||||
register: cluster_info
|
||||
|
||||
- name: Gather info from datacenter about specific cluster with tags
|
||||
vmware_cluster_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: DC0_C0
|
||||
show_tag: True
|
||||
delegate_to: localhost
|
||||
register: cluster_info
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
clusters:
|
||||
description: metadata about the available clusters
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"DC0_C0": {
|
||||
"drs_default_vm_behavior": null,
|
||||
"drs_enable_vm_behavior_overrides": null,
|
||||
"drs_vmotion_rate": null,
|
||||
"enable_ha": null,
|
||||
"enabled_drs": true,
|
||||
"enabled_vsan": false,
|
||||
"ha_admission_control_enabled": null,
|
||||
"ha_failover_level": null,
|
||||
"ha_host_monitoring": null,
|
||||
"ha_restart_priority": null,
|
||||
"ha_vm_failure_interval": null,
|
||||
"ha_vm_max_failure_window": null,
|
||||
"ha_vm_max_failures": null,
|
||||
"ha_vm_min_up_time": null,
|
||||
"ha_vm_monitoring": null,
|
||||
"ha_vm_tools_monitoring": null,
|
||||
"vsan_auto_claim_storage": false,
|
||||
"hosts": [
|
||||
{
|
||||
"name": "esxi01.vsphere.local",
|
||||
"folder": "/DC0/host/DC0_C0",
|
||||
},
|
||||
{
|
||||
"name": "esxi02.vsphere.local",
|
||||
"folder": "/DC0/host/DC0_C0",
|
||||
},
|
||||
{
|
||||
"name": "esxi03.vsphere.local",
|
||||
"folder": "/DC0/host/DC0_C0",
|
||||
},
|
||||
{
|
||||
"name": "esxi04.vsphere.local",
|
||||
"folder": "/DC0/host/DC0_C0",
|
||||
},
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"category_id": "urn:vmomi:InventoryServiceCategory:9fbf83de-7903-442e-8004-70fd3940297c:GLOBAL",
|
||||
"category_name": "sample_cluster_cat_0001",
|
||||
"description": "",
|
||||
"id": "urn:vmomi:InventoryServiceTag:93d680db-b3a6-4834-85ad-3e9516e8fee8:GLOBAL",
|
||||
"name": "sample_cluster_tag_0001"
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_datacenter_by_name, find_cluster_by_name
|
||||
from ansible.module_utils.vmware_rest_client import VmwareRestClient
|
||||
|
||||
|
||||
class VmwreClusterInfoManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwreClusterInfoManager, self).__init__(module)
|
||||
datacenter = self.params.get('datacenter')
|
||||
cluster_name = self.params.get('cluster_name')
|
||||
self.cluster_objs = []
|
||||
if datacenter:
|
||||
datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter)
|
||||
if datacenter_obj is None:
|
||||
self.module.fail_json(msg="Failed to find datacenter '%s'" % datacenter)
|
||||
self.cluster_objs = self.get_all_cluster_objs(parent=datacenter_obj)
|
||||
elif cluster_name:
|
||||
cluster_obj = find_cluster_by_name(self.content, cluster_name=cluster_name)
|
||||
if cluster_obj is None:
|
||||
self.module.fail_json(msg="Failed to find cluster '%s'" % cluster_name)
|
||||
|
||||
self.cluster_objs = [cluster_obj]
|
||||
|
||||
def get_all_cluster_objs(self, parent):
|
||||
"""
|
||||
Get all cluster managed objects from given parent object
|
||||
Args:
|
||||
parent: Managed objected of datacenter or host folder
|
||||
|
||||
Returns: List of host managed objects
|
||||
|
||||
"""
|
||||
cluster_objs = []
|
||||
if isinstance(parent, vim.Datacenter):
|
||||
folder = parent.hostFolder
|
||||
else:
|
||||
folder = parent
|
||||
|
||||
for child in folder.childEntity:
|
||||
if isinstance(child, vim.Folder):
|
||||
cluster_objs = cluster_objs + self.get_all_cluster_objs(child)
|
||||
if isinstance(child, vim.ClusterComputeResource):
|
||||
cluster_objs.append(child)
|
||||
return cluster_objs
|
||||
|
||||
def gather_cluster_info(self):
|
||||
"""
|
||||
Gather information about cluster
|
||||
"""
|
||||
results = dict(changed=False, clusters=dict())
|
||||
for cluster in self.cluster_objs:
|
||||
# Default values
|
||||
ha_failover_level = None
|
||||
ha_restart_priority = None
|
||||
ha_vm_tools_monitoring = None
|
||||
ha_vm_min_up_time = None
|
||||
ha_vm_max_failures = None
|
||||
ha_vm_max_failure_window = None
|
||||
ha_vm_failure_interval = None
|
||||
enabled_vsan = False
|
||||
vsan_auto_claim_storage = False
|
||||
hosts = []
|
||||
|
||||
# Hosts
|
||||
for host in cluster.host:
|
||||
hosts.append({
|
||||
'name': host.name,
|
||||
'folder': self.get_vm_path(self.content, host),
|
||||
})
|
||||
|
||||
# HA
|
||||
das_config = cluster.configurationEx.dasConfig
|
||||
if das_config.admissionControlPolicy:
|
||||
ha_failover_level = das_config.admissionControlPolicy.failoverLevel
|
||||
if das_config.defaultVmSettings:
|
||||
ha_restart_priority = das_config.defaultVmSettings.restartPriority,
|
||||
ha_vm_tools_monitoring = das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring,
|
||||
ha_vm_min_up_time = das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime,
|
||||
ha_vm_max_failures = das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures,
|
||||
ha_vm_max_failure_window = das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow,
|
||||
ha_vm_failure_interval = das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval,
|
||||
|
||||
# DRS
|
||||
drs_config = cluster.configurationEx.drsConfig
|
||||
|
||||
# VSAN
|
||||
if hasattr(cluster.configurationEx, 'vsanConfig'):
|
||||
vsan_config = cluster.configurationEx.vsanConfig
|
||||
enabled_vsan = vsan_config.enabled,
|
||||
vsan_auto_claim_storage = vsan_config.defaultConfig.autoClaimStorage,
|
||||
|
||||
tag_info = []
|
||||
if self.params.get('show_tag'):
|
||||
vmware_client = VmwareRestClient(self.module)
|
||||
tag_info = vmware_client.get_tags_for_cluster(cluster_mid=cluster._moId)
|
||||
|
||||
results['clusters'][cluster.name] = dict(
|
||||
hosts=hosts,
|
||||
enable_ha=das_config.enabled,
|
||||
ha_failover_level=ha_failover_level,
|
||||
ha_vm_monitoring=das_config.vmMonitoring,
|
||||
ha_host_monitoring=das_config.hostMonitoring,
|
||||
ha_admission_control_enabled=das_config.admissionControlEnabled,
|
||||
ha_restart_priority=ha_restart_priority,
|
||||
ha_vm_tools_monitoring=ha_vm_tools_monitoring,
|
||||
ha_vm_min_up_time=ha_vm_min_up_time,
|
||||
ha_vm_max_failures=ha_vm_max_failures,
|
||||
ha_vm_max_failure_window=ha_vm_max_failure_window,
|
||||
ha_vm_failure_interval=ha_vm_failure_interval,
|
||||
enabled_drs=drs_config.enabled,
|
||||
drs_enable_vm_behavior_overrides=drs_config.enableVmBehaviorOverrides,
|
||||
drs_default_vm_behavior=drs_config.defaultVmBehavior,
|
||||
drs_vmotion_rate=drs_config.vmotionRate,
|
||||
enabled_vsan=enabled_vsan,
|
||||
vsan_auto_claim_storage=vsan_auto_claim_storage,
|
||||
tags=tag_info,
|
||||
)
|
||||
|
||||
self.module.exit_json(**results)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str'),
|
||||
cluster_name=dict(type='str'),
|
||||
show_tag=dict(type='bool', default=False),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'datacenter'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
if module._name == 'vmware_cluster_facts':
|
||||
module.deprecate("The 'vmware_cluster_facts' module has been renamed to 'vmware_cluster_info'", version='2.13')
|
||||
|
||||
pyv = VmwreClusterInfoManager(module)
|
||||
pyv.gather_cluster_info()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,178 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_cluster_vsan
|
||||
short_description: Manages virtual storage area network (vSAN) configuration on VMware vSphere clusters
|
||||
description:
|
||||
- Manages vSAN on VMware vSphere clusters.
|
||||
- All values and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
requirements:
|
||||
- Tested on ESXi 5.5 and 6.5.
|
||||
- PyVmomi installed.
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- The name of the cluster to be managed.
|
||||
type: str
|
||||
required: yes
|
||||
datacenter:
|
||||
description:
|
||||
- The name of the datacenter.
|
||||
type: str
|
||||
required: yes
|
||||
aliases: [ datacenter_name ]
|
||||
enable_vsan:
|
||||
description:
|
||||
- Whether to enable vSAN.
|
||||
type: bool
|
||||
default: 'no'
|
||||
vsan_auto_claim_storage:
|
||||
description:
|
||||
- Whether the VSAN service is configured to automatically claim local storage
|
||||
on VSAN-enabled hosts in the cluster.
|
||||
type: bool
|
||||
default: False
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Enable vSAN
|
||||
vmware_cluster_vsan:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: datacenter
|
||||
cluster_name: cluster
|
||||
enable_vsan: yes
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable vSAN and claim storage automatically
|
||||
vmware_cluster_vsan:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter_name: DC0
|
||||
cluster_name: "{{ cluster_name }}"
|
||||
enable_vsan: True
|
||||
vsan_auto_claim_storage: True
|
||||
delegate_to: localhost
|
||||
"""
|
||||
|
||||
RETURN = r"""#
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name,
|
||||
vmware_argument_spec, wait_for_task)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VMwareCluster(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareCluster, self).__init__(module)
|
||||
self.cluster_name = module.params['cluster_name']
|
||||
self.datacenter_name = module.params['datacenter']
|
||||
self.enable_vsan = module.params['enable_vsan']
|
||||
self.datacenter = None
|
||||
self.cluster = None
|
||||
|
||||
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
|
||||
if self.datacenter is None:
|
||||
self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
|
||||
|
||||
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name)
|
||||
if self.cluster is None:
|
||||
self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
|
||||
|
||||
def check_vsan_config_diff(self):
|
||||
"""
|
||||
Check VSAN configuration diff
|
||||
Returns: True if there is diff, else False
|
||||
|
||||
"""
|
||||
vsan_config = self.cluster.configurationEx.vsanConfigInfo
|
||||
|
||||
if vsan_config.enabled != self.enable_vsan or \
|
||||
vsan_config.defaultConfig.autoClaimStorage != self.params.get('vsan_auto_claim_storage'):
|
||||
return True
|
||||
return False
|
||||
|
||||
def configure_vsan(self):
|
||||
"""
|
||||
Manage VSAN configuration
|
||||
|
||||
"""
|
||||
changed, result = False, None
|
||||
|
||||
if self.check_vsan_config_diff():
|
||||
if not self.module.check_mode:
|
||||
cluster_config_spec = vim.cluster.ConfigSpecEx()
|
||||
cluster_config_spec.vsanConfig = vim.vsan.cluster.ConfigInfo()
|
||||
cluster_config_spec.vsanConfig.enabled = self.enable_vsan
|
||||
cluster_config_spec.vsanConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
|
||||
cluster_config_spec.vsanConfig.defaultConfig.autoClaimStorage = self.params.get('vsan_auto_claim_storage')
|
||||
try:
|
||||
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
|
||||
changed, result = wait_for_task(task)
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except TaskError as task_e:
|
||||
self.module.fail_json(msg=to_native(task_e))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to update cluster"
|
||||
" due to generic exception %s" % to_native(generic_exc))
|
||||
else:
|
||||
changed = True
|
||||
|
||||
self.module.exit_json(changed=changed, result=result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
cluster_name=dict(type='str', required=True),
|
||||
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
|
||||
# VSAN
|
||||
enable_vsan=dict(type='bool', default=False),
|
||||
vsan_auto_claim_storage=dict(type='bool', default=False),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_cluster_vsan = VMwareCluster(module)
|
||||
vmware_cluster_vsan.configure_vsan()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,273 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Ansible Project
|
||||
# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_content_deploy_template
|
||||
short_description: Deploy Virtual Machine from template stored in content library.
|
||||
description:
|
||||
- Module to deploy virtual machine from template in content library.
|
||||
- Content Library feature is introduced in vSphere 6.0 version.
|
||||
- vmtx templates feature is introduced in vSphere 67U1 and APIs for clone template from content library in 67U2.
|
||||
- This module does not work with vSphere version older than 67U2.
|
||||
- All variables and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Pavan Bidkar (@pgbidkar)
|
||||
notes:
|
||||
- Tested on vSphere 6.7 U3
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
- vSphere Automation SDK
|
||||
options:
|
||||
template:
|
||||
description:
|
||||
- The name of template from which VM to be deployed.
|
||||
type: str
|
||||
required: True
|
||||
aliases: ['template_src']
|
||||
name:
|
||||
description:
|
||||
- The name of the VM to be deployed.
|
||||
type: str
|
||||
required: True
|
||||
aliases: ['vm_name']
|
||||
datacenter:
|
||||
description:
|
||||
- Name of the datacenter, where VM to be deployed.
|
||||
type: str
|
||||
required: True
|
||||
datastore:
|
||||
description:
|
||||
- Name of the datastore to store deployed VM and disk.
|
||||
type: str
|
||||
required: True
|
||||
folder:
|
||||
description:
|
||||
- Name of the folder in datacenter in which to place deployed VM.
|
||||
type: str
|
||||
required: True
|
||||
host:
|
||||
description:
|
||||
- Name of the ESX Host in datacenter in which to place deployed VM.
|
||||
type: str
|
||||
required: True
|
||||
resource_pool:
|
||||
description:
|
||||
- Name of the resourcepool in datacenter in which to place deployed VM.
|
||||
type: str
|
||||
required: False
|
||||
cluster:
|
||||
description:
|
||||
- Name of the cluster in datacenter in which to place deployed VM.
|
||||
type: str
|
||||
required: False
|
||||
state:
|
||||
description:
|
||||
- The state of Virtual Machine deployed from template in content library.
|
||||
- If set to C(present) and VM does not exists, then VM is created.
|
||||
- If set to C(present) and VM exists, no action is taken.
|
||||
- If set to C(poweredon) and VM does not exists, then VM is created with powered on state.
|
||||
- If set to C(poweredon) and VM exists, no action is taken.
|
||||
type: str
|
||||
required: False
|
||||
default: 'present'
|
||||
choices: [ 'present', 'poweredon' ]
|
||||
extends_documentation_fragment: vmware_rest_client.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Deploy Virtual Machine from template in content library
|
||||
vmware_content_deploy_template:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
template: rhel_test_template
|
||||
datastore: Shared_NFS_Volume
|
||||
folder: vm
|
||||
datacenter: Sample_DC_1
|
||||
name: Sample_VM
|
||||
resource_pool: test_rp
|
||||
validate_certs: False
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Deploy Virtual Machine from template in content library with PowerON State
|
||||
vmware_content_deploy_template:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
template: rhel_test_template
|
||||
datastore: Shared_NFS_Volume
|
||||
folder: vm
|
||||
datacenter: Sample_DC_1
|
||||
name: Sample_VM
|
||||
resource_pool: test_rp
|
||||
validate_certs: False
|
||||
state: poweredon
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
vm_deploy_info:
|
||||
description: Virtual machine deployment message and vm_id
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"msg": "Deployed Virtual Machine 'Sample_VM'.",
|
||||
"vm_id": "vm-1009"
|
||||
}
|
||||
'''
|
||||
|
||||
import uuid
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware_rest_client import VmwareRestClient
|
||||
from ansible.module_utils.vmware import PyVmomi
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
HAS_VAUTOMATION_PYTHON_SDK = False
|
||||
try:
|
||||
from com.vmware.vcenter.vm_template_client import LibraryItems
|
||||
HAS_VAUTOMATION_PYTHON_SDK = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class VmwareContentDeployTemplate(VmwareRestClient):
|
||||
def __init__(self, module):
|
||||
"""Constructor."""
|
||||
super(VmwareContentDeployTemplate, self).__init__(module)
|
||||
self.template_service = self.api_client.vcenter.vm_template.LibraryItems
|
||||
self.template_name = self.params.get('template')
|
||||
self.vm_name = self.params.get('name')
|
||||
self.datacenter = self.params.get('datacenter')
|
||||
self.datastore = self.params.get('datastore')
|
||||
self.folder = self.params.get('folder')
|
||||
self.resourcepool = self.params.get('resource_pool')
|
||||
self.cluster = self.params.get('cluster')
|
||||
self.host = self.params.get('host')
|
||||
|
||||
def deploy_vm_from_template(self, power_on=False):
|
||||
# Find the datacenter by the given datacenter name
|
||||
self.datacenter_id = self.get_datacenter_by_name(datacenter_name=self.datacenter)
|
||||
if not self.datacenter_id:
|
||||
self.module.fail_json(msg="Failed to find the datacenter %s" % self.datacenter)
|
||||
# Find the datastore by the given datastore name
|
||||
self.datastore_id = self.get_datastore_by_name(self.datacenter, self.datastore)
|
||||
if not self.datastore_id:
|
||||
self.module.fail_json(msg="Failed to find the datastore %s" % self.datastore)
|
||||
# Find the LibraryItem (Template) by the given LibraryItem name
|
||||
self.library_item_id = self.get_library_item_by_name(self.template_name)
|
||||
if not self.library_item_id:
|
||||
self.module.fail_json(msg="Failed to find the library Item %s" % self.template_name)
|
||||
# Find the folder by the given folder name
|
||||
self.folder_id = self.get_folder_by_name(self.datacenter, self.folder)
|
||||
if not self.folder_id:
|
||||
self.module.fail_json(msg="Failed to find the folder %s" % self.folder)
|
||||
# Find the Host by given HostName
|
||||
self.host_id = self.get_host_by_name(self.datacenter, self.host)
|
||||
if not self.host_id:
|
||||
self.module.fail_json(msg="Failed to find the Host %s" % self.host)
|
||||
# Find the resourcepool by the given resourcepool name
|
||||
self.resourcepool_id = None
|
||||
if self.resourcepool:
|
||||
self.resourcepool_id = self.get_resource_pool_by_name(self.datacenter, self.resourcepool)
|
||||
if not self.resourcepool_id:
|
||||
self.module.fail_json(msg="Failed to find the resource_pool %s" % self.resourcepool)
|
||||
# Find the Cluster by the given Cluster name
|
||||
self.cluster_id = None
|
||||
if self.cluster:
|
||||
self.cluster_id = self.get_cluster_by_name(self.datacenter, self.cluster)
|
||||
if not self.cluster_id:
|
||||
self.module.fail_json(msg="Failed to find the Cluster %s" % self.cluster)
|
||||
# Create VM placement specs
|
||||
self.placement_spec = LibraryItems.DeployPlacementSpec(folder=self.folder_id,
|
||||
host=self.host_id
|
||||
)
|
||||
if self.resourcepool_id or self.cluster_id:
|
||||
self.placement_spec.resource_pool = self.resourcepool_id
|
||||
self.placement_spec.cluster = self.cluster_id
|
||||
self.vm_home_storage_spec = LibraryItems.DeploySpecVmHomeStorage(datastore=to_native(self.datastore_id))
|
||||
self.disk_storage_spec = LibraryItems.DeploySpecDiskStorage(datastore=to_native(self.datastore_id))
|
||||
self.deploy_spec = LibraryItems.DeploySpec(name=self.vm_name,
|
||||
placement=self.placement_spec,
|
||||
vm_home_storage=self.vm_home_storage_spec,
|
||||
disk_storage=self.disk_storage_spec,
|
||||
powered_on=power_on
|
||||
)
|
||||
vm_id = self.template_service.deploy(self.library_item_id, self.deploy_spec)
|
||||
if vm_id:
|
||||
self.module.exit_json(
|
||||
changed=True,
|
||||
vm_deploy_info=dict(
|
||||
msg="Deployed Virtual Machine '%s'." % self.vm_name,
|
||||
vm_id=vm_id,
|
||||
)
|
||||
)
|
||||
self.module.exit_json(changed=False,
|
||||
vm_deploy_info=dict(msg="Virtual Machine deployment failed", vm_id=''))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = VmwareRestClient.vmware_client_argument_spec()
|
||||
argument_spec.update(
|
||||
state=dict(type='str', default='present',
|
||||
choices=['present', 'poweredon']),
|
||||
template=dict(type='str', aliases=['template_src'], required=True),
|
||||
name=dict(type='str', required=True, aliases=['vm_name']),
|
||||
datacenter=dict(type='str', required=True),
|
||||
datastore=dict(type='str', required=True),
|
||||
folder=dict(type='str', required=True),
|
||||
host=dict(type='str', required=True),
|
||||
resource_pool=dict(type='str', required=False),
|
||||
cluster=dict(type='str', required=False),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
result = {'failed': False, 'changed': False}
|
||||
pyv = PyVmomi(module=module)
|
||||
vm = pyv.get_vm()
|
||||
if vm:
|
||||
module.exit_json(
|
||||
changed=False,
|
||||
vm_deploy_info=dict(
|
||||
msg="Virtual Machine '%s' already Exists." % module.params['name'],
|
||||
vm_id=vm._moId,
|
||||
)
|
||||
)
|
||||
vmware_contentlib_create = VmwareContentDeployTemplate(module)
|
||||
if module.params['state'] in ['present']:
|
||||
if module.check_mode:
|
||||
result.update(
|
||||
vm_name=module.params['name'],
|
||||
changed=True,
|
||||
desired_operation='Create VM with PowerOff State',
|
||||
)
|
||||
module.exit_json(**result)
|
||||
vmware_contentlib_create.deploy_vm_from_template()
|
||||
if module.params['state'] == 'poweredon':
|
||||
if module.check_mode:
|
||||
result.update(
|
||||
vm_name=module.params['name'],
|
||||
changed=True,
|
||||
desired_operation='Create VM with PowerON State',
|
||||
)
|
||||
module.exit_json(**result)
|
||||
vmware_contentlib_create.deploy_vm_from_template(power_on=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,154 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Ansible Project
|
||||
# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_content_library_info
|
||||
short_description: Gather information about VMWare Content Library
|
||||
description:
|
||||
- Module to list the content libraries.
|
||||
- Module to get information about specific content library.
|
||||
- Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.
|
||||
- All variables and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Pavan Bidkar (@pgbidkar)
|
||||
notes:
|
||||
- Tested on vSphere 6.5, 6.7
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
- vSphere Automation SDK
|
||||
options:
|
||||
library_id:
|
||||
description:
|
||||
- content library id for which details needs to be fetched.
|
||||
type: str
|
||||
required: False
|
||||
extends_documentation_fragment: vmware_rest_client.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get List of Content Libraries
|
||||
vmware_content_library_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Get information about content library
|
||||
vmware_content_library_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
library_id: '13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8'
|
||||
validate_certs: no
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
content_lib_details:
|
||||
description: list of content library metadata
|
||||
returned: on success
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"library_creation_time": "2019-07-02T11:50:52.242000",
|
||||
"library_description": "new description",
|
||||
"library_id": "13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8",
|
||||
"library_name": "demo-local-lib",
|
||||
"library_publish_info": {
|
||||
"authentication_method": "NONE",
|
||||
"persist_json_enabled": false,
|
||||
"publish_url": null,
|
||||
"published": false,
|
||||
"user_name": null
|
||||
},
|
||||
"library_server_guid": "0fd5813b-aac7-4b92-9fb7-f18f16565613",
|
||||
"library_type": "LOCAL",
|
||||
"library_version": "3"
|
||||
}
|
||||
]
|
||||
content_libs:
|
||||
description: list of content libraries
|
||||
returned: on success
|
||||
type: list
|
||||
sample: [
|
||||
"ded9c4d5-0dcd-4837-b1d8-af7398511e33",
|
||||
"36b72549-14ed-4b5f-94cb-6213fecacc02"
|
||||
]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware_rest_client import VmwareRestClient
|
||||
|
||||
|
||||
class VmwareContentLibInfo(VmwareRestClient):
|
||||
def __init__(self, module):
|
||||
"""Constructor."""
|
||||
super(VmwareContentLibInfo, self).__init__(module)
|
||||
self.content_service = self.api_client
|
||||
self.library_info = []
|
||||
|
||||
def get_all_content_libs(self):
|
||||
"""Method to retrieve List of content libraries."""
|
||||
self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list())
|
||||
|
||||
def get_content_lib_details(self, library_id):
|
||||
"""Method to retrieve Details of contentlib with library_id"""
|
||||
try:
|
||||
lib_details = self.content_service.content.LocalLibrary.get(library_id)
|
||||
except Exception as e:
|
||||
self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
|
||||
lib_publish_info = dict(
|
||||
persist_json_enabled=lib_details.publish_info.persist_json_enabled,
|
||||
authentication_method=lib_details.publish_info.authentication_method,
|
||||
publish_url=lib_details.publish_info.publish_url,
|
||||
published=lib_details.publish_info.published,
|
||||
user_name=lib_details.publish_info.user_name
|
||||
)
|
||||
self.library_info.append(
|
||||
dict(
|
||||
library_name=lib_details.name,
|
||||
library_description=lib_details.description,
|
||||
library_id=lib_details.id,
|
||||
library_type=lib_details.type,
|
||||
library_creation_time=lib_details.creation_time,
|
||||
library_server_guid=lib_details.server_guid,
|
||||
library_version=lib_details.version,
|
||||
library_publish_info=lib_publish_info
|
||||
)
|
||||
)
|
||||
|
||||
self.module.exit_json(exists=False, changed=False, content_lib_details=self.library_info)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = VmwareRestClient.vmware_client_argument_spec()
|
||||
argument_spec.update(
|
||||
library_id=dict(type='str', required=False),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
vmware_contentlib_info = VmwareContentLibInfo(module)
|
||||
if module.params.get('library_id'):
|
||||
vmware_contentlib_info.get_content_lib_details(module.params['library_id'])
|
||||
else:
|
||||
vmware_contentlib_info.get_all_content_libs()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,290 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Ansible Project
|
||||
# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_content_library_manager
|
||||
short_description: Create, update and delete VMware content library
|
||||
description:
|
||||
- Module to manage VMware content Library
|
||||
- Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.
|
||||
- All variables and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Pavan Bidkar (@pgbidkar)
|
||||
notes:
|
||||
- Tested on vSphere 6.5, 6.7
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
- vSphere Automation SDK
|
||||
options:
|
||||
library_name:
|
||||
description:
|
||||
- The name of VMware content library to manage.
|
||||
type: str
|
||||
required: True
|
||||
library_description:
|
||||
description:
|
||||
- The content library description.
|
||||
- This is required only if C(state) is set to C(present).
|
||||
- This parameter is ignored, when C(state) is set to C(absent).
|
||||
- Process of updating content library only allows description change.
|
||||
type: str
|
||||
required: False
|
||||
default: ''
|
||||
library_type:
|
||||
description:
|
||||
- The content library type.
|
||||
- This is required only if C(state) is set to C(present).
|
||||
- This parameter is ignored, when C(state) is set to C(absent).
|
||||
type: str
|
||||
required: False
|
||||
default: 'local'
|
||||
choices: [ 'local', 'subscribed' ]
|
||||
datastore_name:
|
||||
description:
|
||||
- Name of the datastore on which backing content library is created.
|
||||
- This is required only if C(state) is set to C(present).
|
||||
- This parameter is ignored, when C(state) is set to C(absent).
|
||||
- Currently only datastore backing creation is supported.
|
||||
type: str
|
||||
required: False
|
||||
aliases: ['datastore']
|
||||
state:
|
||||
description:
|
||||
- The state of content library.
|
||||
- If set to C(present) and library does not exists, then content library is created.
|
||||
- If set to C(present) and library exists, then content library is updated.
|
||||
- If set to C(absent) and library exists, then content library is deleted.
|
||||
- If set to C(absent) and library does not exists, no action is taken.
|
||||
type: str
|
||||
required: False
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent' ]
|
||||
extends_documentation_fragment: vmware_rest_client.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create Content Library
|
||||
vmware_content_library_manager:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
library_name: test-content-lib
|
||||
library_description: 'Library with Datastore Backing'
|
||||
library_type: local
|
||||
datastore_name: datastore
|
||||
validate_certs: False
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Update Content Library
|
||||
vmware_content_library_manager:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
library_name: test-content-lib
|
||||
library_description: 'Library with Datastore Backing'
|
||||
validate_certs: no
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete Content Library
|
||||
vmware_content_library_manager:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
library_name: test-content-lib
|
||||
validate_certs: no
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
content_library_info:
|
||||
description: library creation success and library_id
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"library_id": "d0b92fa9-7039-4f29-8e9c-0debfcb22b72",
|
||||
"library_description": 'Test description',
|
||||
"library_type": 'LOCAL',
|
||||
"msg": "Content Library 'demo-local-lib-4' created.",
|
||||
}
|
||||
'''
|
||||
|
||||
import uuid
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware_rest_client import VmwareRestClient
|
||||
from ansible.module_utils.vmware import PyVmomi
|
||||
|
||||
HAS_VAUTOMATION_PYTHON_SDK = False
|
||||
try:
|
||||
from com.vmware.content_client import LibraryModel
|
||||
from com.vmware.content.library_client import StorageBacking
|
||||
HAS_VAUTOMATION_PYTHON_SDK = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class VmwareContentLibCreate(VmwareRestClient):
|
||||
def __init__(self, module):
|
||||
"""Constructor."""
|
||||
super(VmwareContentLibCreate, self).__init__(module)
|
||||
self.content_service = self.api_client
|
||||
self.local_libraries = dict()
|
||||
self.library_name = self.params.get('library_name')
|
||||
self.library_description = self.params.get('library_description')
|
||||
self.library_type = self.params.get('library_type')
|
||||
self.library_types = dict()
|
||||
self.datastore_name = self.params.get('datastore_name')
|
||||
self.get_all_libraries()
|
||||
self.pyv = PyVmomi(module=module)
|
||||
|
||||
def process_state(self):
|
||||
"""
|
||||
Manage states of Content Library
|
||||
"""
|
||||
self.desired_state = self.params.get('state')
|
||||
library_states = {
|
||||
'absent': {
|
||||
'present': self.state_destroy_library,
|
||||
'absent': self.state_exit_unchanged,
|
||||
},
|
||||
'present': {
|
||||
'present': self.state_update_library,
|
||||
'absent': self.state_create_library,
|
||||
}
|
||||
}
|
||||
library_states[self.desired_state][self.check_content_library_status()]()
|
||||
|
||||
def get_all_libraries(self):
|
||||
content_libs = self.content_service.content.LocalLibrary.list()
|
||||
if content_libs:
|
||||
for content_lib in content_libs:
|
||||
lib_details = self.content_service.content.LocalLibrary.get(content_lib)
|
||||
self.local_libraries[lib_details.name] = dict(
|
||||
lib_name=lib_details.name,
|
||||
lib_description=lib_details.description,
|
||||
lib_id=lib_details.id,
|
||||
lib_type=lib_details.type
|
||||
)
|
||||
|
||||
def check_content_library_status(self):
|
||||
"""
|
||||
Check if Content Library exists or not
|
||||
Returns: 'present' if library found, else 'absent'
|
||||
|
||||
"""
|
||||
ret = 'present' if self.library_name in self.local_libraries else 'absent'
|
||||
return ret
|
||||
|
||||
def state_create_library(self):
|
||||
# Find the datastore by the given datastore name
|
||||
datastore_id = self.pyv.find_datastore_by_name(datastore_name=self.datastore_name)
|
||||
if not datastore_id:
|
||||
self.module.fail_json(msg="Failed to find the datastore %s" % self.datastore_name)
|
||||
self.datastore_id = datastore_id._moId
|
||||
# Build the storage backing for the library to be created
|
||||
storage_backings = []
|
||||
storage_backing = StorageBacking(type=StorageBacking.Type.DATASTORE, datastore_id=self.datastore_id)
|
||||
storage_backings.append(storage_backing)
|
||||
|
||||
# Build the specification for the library to be created
|
||||
create_spec = LibraryModel()
|
||||
create_spec.name = self.library_name
|
||||
create_spec.description = self.library_description
|
||||
self.library_types = {'local': create_spec.LibraryType.LOCAL,
|
||||
'subscribed': create_spec.LibraryType.SUBSCRIBED}
|
||||
create_spec.type = self.library_types[self.library_type]
|
||||
create_spec.storage_backings = storage_backings
|
||||
|
||||
# Create a local content library backed the VC datastore
|
||||
library_id = self.content_service.content.LocalLibrary.create(create_spec=create_spec,
|
||||
client_token=str(uuid.uuid4()))
|
||||
if library_id:
|
||||
self.module.exit_json(
|
||||
changed=True,
|
||||
content_library_info=dict(
|
||||
msg="Content Library '%s' created." % create_spec.name,
|
||||
library_id=library_id,
|
||||
library_description=self.library_description,
|
||||
library_type=create_spec.type,
|
||||
)
|
||||
)
|
||||
self.module.exit_json(changed=False,
|
||||
content_library_info=dict(msg="Content Library not created. Datastore and library_type required", library_id=''))
|
||||
|
||||
def state_update_library(self):
|
||||
"""
|
||||
Update Content Library
|
||||
|
||||
"""
|
||||
changed = False
|
||||
library_id = self.local_libraries[self.library_name]['lib_id']
|
||||
content_library_info = dict(msg="Content Library %s is unchanged." % self.library_name, library_id=library_id)
|
||||
library_update_spec = LibraryModel()
|
||||
library_desc = self.local_libraries[self.library_name]['lib_description']
|
||||
desired_lib_desc = self.params.get('library_description')
|
||||
if library_desc != desired_lib_desc:
|
||||
library_update_spec.description = desired_lib_desc
|
||||
self.content_service.content.LocalLibrary.update(library_id, library_update_spec)
|
||||
content_library_info['msg'] = 'Content Library %s updated.' % self.library_name
|
||||
changed = True
|
||||
|
||||
self.module.exit_json(changed=changed, content_library_info=content_library_info)
|
||||
|
||||
def state_destroy_library(self):
|
||||
"""
|
||||
Delete Content Library
|
||||
|
||||
"""
|
||||
library_id = self.local_libraries[self.library_name]['lib_id']
|
||||
self.content_service.content.LocalLibrary.delete(library_id=library_id)
|
||||
self.module.exit_json(
|
||||
changed=True,
|
||||
content_library_info=dict(
|
||||
msg="Content Library '%s' deleted." % self.library_name,
|
||||
library_id=library_id
|
||||
)
|
||||
)
|
||||
|
||||
def state_exit_unchanged(self):
|
||||
"""
|
||||
Return unchanged state
|
||||
|
||||
"""
|
||||
self.module.exit_json(changed=False)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = VmwareRestClient.vmware_client_argument_spec()
|
||||
argument_spec.update(
|
||||
library_name=dict(type='str', required=False),
|
||||
library_description=dict(type='str', required=False),
|
||||
library_type=dict(type='str', required=False, choices=['local', 'subscribed'], default='local'),
|
||||
datastore_name=dict(type='str', required=False, aliases=['datastore']),
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present', required=False),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
vmware_contentlib_create = VmwareContentLibCreate(module)
|
||||
vmware_contentlib_create.process_state()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,171 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_datacenter
|
||||
short_description: Manage VMware vSphere Datacenters
|
||||
description:
|
||||
- This module can be used to manage (create, delete) VMware vSphere Datacenters.
|
||||
version_added: 2.0
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Kamil Szczygiel (@kamsz)
|
||||
notes:
|
||||
- Tested on vSphere 6.0, 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter_name:
|
||||
description:
|
||||
- The name of the datacenter the cluster will be created in.
|
||||
required: True
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- If the datacenter should be present or absent.
|
||||
choices: [ present, absent ]
|
||||
default: present
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create Datacenter
|
||||
vmware_datacenter:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: '{{ datacenter_name }}'
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete Datacenter
|
||||
vmware_datacenter:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: '{{ datacenter_name }}'
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
register: datacenter_delete_result
|
||||
'''
|
||||
|
||||
RETURN = """#
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, find_datacenter_by_name, vmware_argument_spec, wait_for_task
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VmwareDatacenterManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareDatacenterManager, self).__init__(module)
|
||||
self.datacenter_name = self.params.get('datacenter_name')
|
||||
self.datacenter_obj = self.get_datacenter()
|
||||
|
||||
def ensure(self):
|
||||
state = self.module.params.get('state')
|
||||
|
||||
if state == 'present':
|
||||
self.create_datacenter()
|
||||
|
||||
if state == 'absent':
|
||||
self.destroy_datacenter()
|
||||
|
||||
def get_datacenter(self):
|
||||
try:
|
||||
datacenter_obj = find_datacenter_by_name(self.content, self.datacenter_name)
|
||||
return datacenter_obj
|
||||
except (vmodl.MethodFault, vmodl.RuntimeFault) as runtime_fault:
|
||||
self.module.fail_json(msg="Failed to get datacenter '%s'"
|
||||
" due to : %s" % (self.datacenter_name,
|
||||
to_native(runtime_fault.msg)))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to get datacenter"
|
||||
" '%s' due to generic error: %s" % (self.datacenter_name,
|
||||
to_native(generic_exc)))
|
||||
|
||||
def create_datacenter(self):
|
||||
folder = self.content.rootFolder
|
||||
changed = False
|
||||
try:
|
||||
if not self.datacenter_obj and not self.module.check_mode:
|
||||
changed = True
|
||||
folder.CreateDatacenter(name=self.datacenter_name)
|
||||
self.module.exit_json(changed=changed)
|
||||
except vim.fault.DuplicateName as duplicate_name:
|
||||
self.module.exit_json(changed=changed)
|
||||
except vim.fault.InvalidName as invalid_name:
|
||||
self.module.fail_json(msg="Specified datacenter name '%s' is an"
|
||||
" invalid name : %s" % (self.datacenter_name,
|
||||
to_native(invalid_name.msg)))
|
||||
except vmodl.fault.NotSupported as not_supported:
|
||||
# This should never happen
|
||||
self.module.fail_json(msg="Trying to create a datacenter '%s' on"
|
||||
" an incorrect folder object : %s" % (self.datacenter_name,
|
||||
to_native(not_supported.msg)))
|
||||
except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
|
||||
self.module.fail_json(msg="Failed to create a datacenter"
|
||||
" '%s' due to : %s" % (self.datacenter_name,
|
||||
to_native(runtime_fault.msg)))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to create a datacenter"
|
||||
" '%s' due to generic error: %s" % (self.datacenter_name,
|
||||
to_native(generic_exc)))
|
||||
|
||||
def destroy_datacenter(self):
|
||||
results = dict(changed=False)
|
||||
try:
|
||||
if self.datacenter_obj and not self.module.check_mode:
|
||||
task = self.datacenter_obj.Destroy_Task()
|
||||
changed, result = wait_for_task(task)
|
||||
results['changed'] = changed
|
||||
results['result'] = result
|
||||
self.module.exit_json(**results)
|
||||
except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
|
||||
self.module.fail_json(msg="Failed to delete a datacenter"
|
||||
" '%s' due to : %s" % (self.datacenter_name,
|
||||
to_native(runtime_fault.msg)))
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to delete a datacenter"
|
||||
" '%s' due to generic error: %s" % (self.datacenter_name,
|
||||
to_native(generic_exc)))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
datacenter_name=dict(required=True, type='str'),
|
||||
state=dict(default='present', choices=['present', 'absent'], type='str')
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
vmware_datacenter_mgr = VmwareDatacenterManager(module)
|
||||
vmware_datacenter_mgr.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,309 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Ansible Project
|
||||
# Copyright (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_datastore_cluster
|
||||
short_description: Manage VMware vSphere datastore clusters
|
||||
description:
|
||||
- This module can be used to add and delete datastore cluster in given VMware environment.
|
||||
- All parameters and VMware object values are case sensitive.
|
||||
version_added: 2.6
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.0, 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter_name:
|
||||
description:
|
||||
- The name of the datacenter.
|
||||
- You must specify either a C(datacenter_name) or a C(folder).
|
||||
- Mutually exclusive with C(folder) parameter.
|
||||
required: False
|
||||
aliases: [ datacenter ]
|
||||
type: str
|
||||
datastore_cluster_name:
|
||||
description:
|
||||
- The name of the datastore cluster.
|
||||
required: True
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- If the datastore cluster should be present or absent.
|
||||
choices: [ present, absent ]
|
||||
default: present
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute path to place datastore cluster in.
|
||||
- The folder should include the datacenter.
|
||||
- This parameter is case sensitive.
|
||||
- You must specify either a C(folder) or a C(datacenter_name).
|
||||
- 'Examples:'
|
||||
- ' folder: /datacenter1/datastore'
|
||||
- ' folder: datacenter1/datastore'
|
||||
- ' folder: /datacenter1/datastore/folder1'
|
||||
- ' folder: datacenter1/datastore/folder1'
|
||||
- ' folder: /folder1/datacenter1/datastore'
|
||||
- ' folder: folder1/datacenter1/datastore'
|
||||
- ' folder: /folder1/datacenter1/datastore/folder2'
|
||||
required: False
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
enable_sdrs:
|
||||
description:
|
||||
- Whether or not storage DRS is enabled.
|
||||
default: False
|
||||
type: bool
|
||||
required: False
|
||||
version_added: '2.10'
|
||||
automation_level:
|
||||
description:
|
||||
- Run SDRS automated or manually.
|
||||
choices: [ automated, manual ]
|
||||
default: manual
|
||||
type: str
|
||||
required: False
|
||||
version_added: '2.10'
|
||||
keep_vmdks_together:
|
||||
description:
|
||||
- Specifies whether or not each VM in this datastore cluster should have its virtual disks on the same datastore by default.
|
||||
default: True
|
||||
type: bool
|
||||
required: False
|
||||
version_added: '2.10'
|
||||
loadbalance_interval:
|
||||
description:
|
||||
- Specify the interval in minutes that storage DRS runs to load balance among datastores.
|
||||
default: 480
|
||||
type: int
|
||||
required: False
|
||||
version_added: '2.10'
|
||||
enable_io_loadbalance:
|
||||
description:
|
||||
- Whether or not storage DRS takes into account storage I/O workload when making load balancing and initial placement recommendations.
|
||||
default: False
|
||||
type: bool
|
||||
required: False
|
||||
version_added: '2.10'
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create datastore cluster and enable SDRS
|
||||
vmware_datastore_cluster:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: '{{ datacenter_name }}'
|
||||
datastore_cluster_name: '{{ datastore_cluster_name }}'
|
||||
enable_sdrs: True
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create datastore cluster using folder
|
||||
vmware_datastore_cluster:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
folder: '/{{ datacenter_name }}/datastore/ds_folder'
|
||||
datastore_cluster_name: '{{ datastore_cluster_name }}'
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete datastore cluster
|
||||
vmware_datastore_cluster:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: '{{ datacenter_name }}'
|
||||
datastore_cluster_name: '{{ datastore_cluster_name }}'
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
result:
|
||||
description: information about datastore cluster operation
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Datastore cluster 'DSC2' created successfully."
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VMwareDatastoreClusterManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareDatastoreClusterManager, self).__init__(module)
|
||||
folder = self.params['folder']
|
||||
if folder:
|
||||
self.folder_obj = self.content.searchIndex.FindByInventoryPath(folder)
|
||||
if not self.folder_obj:
|
||||
self.module.fail_json(msg="Failed to find the folder specified by %(folder)s" % self.params)
|
||||
else:
|
||||
datacenter_name = self.params.get('datacenter_name')
|
||||
datacenter_obj = self.find_datacenter_by_name(datacenter_name)
|
||||
if not datacenter_obj:
|
||||
self.module.fail_json(msg="Failed to find datacenter '%s' required"
|
||||
" for managing datastore cluster." % datacenter_name)
|
||||
self.folder_obj = datacenter_obj.datastoreFolder
|
||||
|
||||
self.datastore_cluster_name = self.params.get('datastore_cluster_name')
|
||||
self.datastore_cluster_obj = self.find_datastore_cluster_by_name(self.datastore_cluster_name)
|
||||
|
||||
def ensure(self):
|
||||
"""
|
||||
Manage internal state of datastore cluster
|
||||
|
||||
"""
|
||||
results = dict(changed=False, result='')
|
||||
state = self.module.params.get('state')
|
||||
enable_sdrs = self.params.get('enable_sdrs')
|
||||
automation_level = self.params.get('automation_level')
|
||||
keep_vmdks_together = self.params.get('keep_vmdks_together')
|
||||
enable_io_loadbalance = self.params.get('enable_io_loadbalance')
|
||||
loadbalance_interval = self.params.get('loadbalance_interval')
|
||||
|
||||
if self.datastore_cluster_obj:
|
||||
if state == 'present':
|
||||
results['result'] = "Datastore cluster '%s' already available." % self.datastore_cluster_name
|
||||
sdrs_spec = vim.storageDrs.ConfigSpec()
|
||||
sdrs_spec.podConfigSpec = None
|
||||
if enable_sdrs != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled:
|
||||
if not sdrs_spec.podConfigSpec:
|
||||
sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
|
||||
sdrs_spec.podConfigSpec.enabled = enable_sdrs
|
||||
results['result'] = results['result'] + " Changed SDRS to '%s'." % enable_sdrs
|
||||
if automation_level != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.defaultVmBehavior:
|
||||
if not sdrs_spec.podConfigSpec:
|
||||
sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
|
||||
sdrs_spec.podConfigSpec.defaultVmBehavior = automation_level
|
||||
results['result'] = results['result'] + " Changed automation level to '%s'." % automation_level
|
||||
if keep_vmdks_together != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.defaultIntraVmAffinity:
|
||||
if not sdrs_spec.podConfigSpec:
|
||||
sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
|
||||
sdrs_spec.podConfigSpec.defaultIntraVmAffinity = keep_vmdks_together
|
||||
results['result'] = results['result'] + " Changed VMDK affinity to '%s'." % keep_vmdks_together
|
||||
if enable_io_loadbalance != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.ioLoadBalanceEnabled:
|
||||
if not sdrs_spec.podConfigSpec:
|
||||
sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
|
||||
sdrs_spec.podConfigSpec.ioLoadBalanceEnabled = enable_io_loadbalance
|
||||
results['result'] = results['result'] + " Changed I/O workload balancing to '%s'." % enable_io_loadbalance
|
||||
if loadbalance_interval != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.loadBalanceInterval:
|
||||
if not sdrs_spec.podConfigSpec:
|
||||
sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
|
||||
sdrs_spec.podConfigSpec.loadBalanceInterval = loadbalance_interval
|
||||
results['result'] = results['result'] + " Changed load balance interval to '%s' minutes." % loadbalance_interval
|
||||
if sdrs_spec.podConfigSpec:
|
||||
if not self.module.check_mode:
|
||||
try:
|
||||
task = self.content.storageResourceManager.ConfigureStorageDrsForPod_Task(pod=self.datastore_cluster_obj,
|
||||
spec=sdrs_spec, modify=True)
|
||||
changed, result = wait_for_task(task)
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to configure datastore cluster"
|
||||
" '%s' due to %s" % (self.datastore_cluster_name,
|
||||
to_native(generic_exc)))
|
||||
else:
|
||||
changed = True
|
||||
results['changed'] = changed
|
||||
elif state == 'absent':
|
||||
# Delete datastore cluster
|
||||
if not self.module.check_mode:
|
||||
task = self.datastore_cluster_obj.Destroy_Task()
|
||||
changed, result = wait_for_task(task)
|
||||
else:
|
||||
changed = True
|
||||
if changed:
|
||||
results['result'] = "Datastore cluster '%s' deleted successfully." % self.datastore_cluster_name
|
||||
results['changed'] = changed
|
||||
else:
|
||||
self.module.fail_json(msg="Failed to delete datastore cluster '%s'." % self.datastore_cluster_name)
|
||||
else:
|
||||
if state == 'present':
|
||||
# Create datastore cluster
|
||||
if not self.module.check_mode:
|
||||
try:
|
||||
self.datastore_cluster_obj = self.folder_obj.CreateStoragePod(name=self.datastore_cluster_name)
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to create datastore cluster"
|
||||
" '%s' due to %s" % (self.datastore_cluster_name,
|
||||
to_native(generic_exc)))
|
||||
try:
|
||||
sdrs_spec = vim.storageDrs.ConfigSpec()
|
||||
sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
|
||||
sdrs_spec.podConfigSpec.enabled = enable_sdrs
|
||||
sdrs_spec.podConfigSpec.defaultVmBehavior = automation_level
|
||||
sdrs_spec.podConfigSpec.defaultIntraVmAffinity = keep_vmdks_together
|
||||
sdrs_spec.podConfigSpec.ioLoadBalanceEnabled = enable_io_loadbalance
|
||||
sdrs_spec.podConfigSpec.loadBalanceInterval = loadbalance_interval
|
||||
task = self.content.storageResourceManager.ConfigureStorageDrsForPod_Task(pod=self.datastore_cluster_obj, spec=sdrs_spec, modify=True)
|
||||
changed, result = wait_for_task(task)
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to configure datastore cluster"
|
||||
" '%s' due to %s" % (self.datastore_cluster_name,
|
||||
to_native(generic_exc)))
|
||||
results['changed'] = True
|
||||
results['result'] = "Datastore cluster '%s' created successfully." % self.datastore_cluster_name
|
||||
elif state == 'absent':
|
||||
results['result'] = "Datastore cluster '%s' not available or already deleted." % self.datastore_cluster_name
|
||||
self.module.exit_json(**results)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
datacenter_name=dict(type='str', required=False, aliases=['datacenter']),
|
||||
datastore_cluster_name=dict(type='str', required=True),
|
||||
state=dict(default='present', choices=['present', 'absent'], type='str'),
|
||||
folder=dict(type='str', required=False),
|
||||
enable_sdrs=dict(type='bool', default=False, required=False),
|
||||
keep_vmdks_together=dict(type='bool', default=True, required=False),
|
||||
automation_level=dict(type='str', choices=['automated', 'manual'], default='manual'),
|
||||
enable_io_loadbalance=dict(type='bool', default=False, required=False),
|
||||
loadbalance_interval=dict(type='int', default=480, required=False)
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
['datacenter_name', 'folder'],
|
||||
],
|
||||
required_one_of=[
|
||||
['datacenter_name', 'folder'],
|
||||
]
|
||||
)
|
||||
|
||||
datastore_cluster_mgr = VMwareDatastoreClusterManager(module)
|
||||
datastore_cluster_mgr.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,342 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_datastore_info
|
||||
short_description: Gather info about datastores available in given vCenter
|
||||
description:
|
||||
- This module can be used to gather information about datastores in VMWare infrastructure.
|
||||
- All values and VMware object names are case sensitive.
|
||||
- This module was called C(vmware_datastore_facts) before Ansible 2.9. The usage did not change.
|
||||
version_added: 2.5
|
||||
author:
|
||||
- Tim Rightnour (@garbled1)
|
||||
notes:
|
||||
- Tested on vSphere 5.5, 6.0 and 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the datastore to match.
|
||||
- If set, information of specific datastores are returned.
|
||||
required: False
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- Datacenter to search for datastores.
|
||||
- This parameter is required, if C(cluster) is not supplied.
|
||||
required: False
|
||||
aliases: ['datacenter_name']
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- Cluster to search for datastores.
|
||||
- If set, information of datastores belonging this clusters will be returned.
|
||||
- This parameter is required, if C(datacenter) is not supplied.
|
||||
required: False
|
||||
type: str
|
||||
gather_nfs_mount_info:
|
||||
description:
|
||||
- Gather mount information of NFS datastores.
|
||||
- Disabled per default because this slows down the execution if you have a lot of datastores.
|
||||
- Only valid when C(schema) is C(summary).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.8
|
||||
gather_vmfs_mount_info:
|
||||
description:
|
||||
- Gather mount information of VMFS datastores.
|
||||
- Disabled per default because this slows down the execution if you have a lot of datastores.
|
||||
- Only valid when C(schema) is C(summary).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.8
|
||||
schema:
|
||||
description:
|
||||
- Specify the output schema desired.
|
||||
- The 'summary' output schema is the legacy output from the module
|
||||
- The 'vsphere' output schema is the vSphere API class definition
|
||||
which requires pyvmomi>6.7.1
|
||||
choices: ['summary', 'vsphere']
|
||||
default: 'summary'
|
||||
type: str
|
||||
version_added: '2.10'
|
||||
properties:
|
||||
description:
|
||||
- Specify the properties to retrieve.
|
||||
- If not specified, all properties are retrieved (deeply).
|
||||
- Results are returned in a structure identical to the vsphere API.
|
||||
- 'Example:'
|
||||
- ' properties: ['
|
||||
- ' "name",'
|
||||
- ' "info.vmfs.ssd",'
|
||||
- ' "capability.vsanSparseSupported",'
|
||||
- ' "overallStatus"'
|
||||
- ' ]'
|
||||
- Only valid when C(schema) is C(vsphere).
|
||||
type: list
|
||||
required: False
|
||||
version_added: '2.10'
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather info from standalone ESXi server having datacenter as 'ha-datacenter'
|
||||
vmware_datastore_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: '{{ datacenter_name }}'
|
||||
validate_certs: no
|
||||
delegate_to: localhost
|
||||
register: info
|
||||
|
||||
- name: Gather info from datacenter about specific datastore
|
||||
vmware_datastore_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: '{{ datacenter_name }}'
|
||||
name: datastore1
|
||||
delegate_to: localhost
|
||||
register: info
|
||||
|
||||
- name: Gather some info from a datastore using the vSphere API output schema
|
||||
vmware_datastore_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter_name: '{{ datacenter_name }}'
|
||||
schema: vsphere
|
||||
properties:
|
||||
- name
|
||||
- info.vmfs.ssd
|
||||
- capability.vsanSparseSupported
|
||||
- overallStatus
|
||||
delegate_to: localhost
|
||||
register: info
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
datastores:
|
||||
description: metadata about the available datastores
|
||||
returned: always
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"accessible": false,
|
||||
"capacity": 42681237504,
|
||||
"datastore_cluster": "datacluster0",
|
||||
"freeSpace": 39638269952,
|
||||
"maintenanceMode": "normal",
|
||||
"multipleHostAccess": false,
|
||||
"name": "datastore2",
|
||||
"provisioned": 12289211488,
|
||||
"type": "VMFS",
|
||||
"uncommitted": 9246243936,
|
||||
"url": "ds:///vmfs/volumes/5a69b18a-c03cd88c-36ae-5254001249ce/",
|
||||
"vmfs_blockSize": 1024,
|
||||
"vmfs_uuid": "5a69b18a-c03cd88c-36ae-5254001249ce",
|
||||
"vmfs_version": "6.81"
|
||||
},
|
||||
{
|
||||
"accessible": true,
|
||||
"capacity": 5497558138880,
|
||||
"datastore_cluster": "datacluster0",
|
||||
"freeSpace": 4279000641536,
|
||||
"maintenanceMode": "normal",
|
||||
"multipleHostAccess": true,
|
||||
"name": "datastore3",
|
||||
"nfs_path": "/vol/datastore3",
|
||||
"nfs_server": "nfs_server1",
|
||||
"provisioned": 1708109410304,
|
||||
"type": "NFS",
|
||||
"uncommitted": 489551912960,
|
||||
"url": "ds:///vmfs/volumes/420b3e73-67070776/"
|
||||
},
|
||||
]
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, vmware_argument_spec, get_all_objs,
|
||||
find_cluster_by_name, get_parent_datacenter)
|
||||
|
||||
|
||||
class VMwareHostDatastore(PyVmomi):
|
||||
""" This class populates the datastore list """
|
||||
def __init__(self, module):
|
||||
super(VMwareHostDatastore, self).__init__(module)
|
||||
self.gather_nfs_mount_info = self.module.params['gather_nfs_mount_info']
|
||||
self.gather_vmfs_mount_info = self.module.params['gather_vmfs_mount_info']
|
||||
self.schema = self.module.params['schema']
|
||||
self.properties = self.module.params['properties']
|
||||
|
||||
def check_datastore_host(self, esxi_host, datastore):
|
||||
""" Get all datastores of specified ESXi host """
|
||||
esxi = self.find_hostsystem_by_name(esxi_host)
|
||||
if esxi is None:
|
||||
self.module.fail_json(msg="Failed to find ESXi hostname %s " % esxi_host)
|
||||
storage_system = esxi.configManager.storageSystem
|
||||
host_file_sys_vol_mount_info = storage_system.fileSystemVolumeInfo.mountInfo
|
||||
for host_mount_info in host_file_sys_vol_mount_info:
|
||||
if host_mount_info.volume.name == datastore:
|
||||
return host_mount_info
|
||||
return None
|
||||
|
||||
def build_datastore_list(self, datastore_list):
|
||||
""" Build list with datastores """
|
||||
datastores = list()
|
||||
for datastore in datastore_list:
|
||||
if self.schema == 'summary':
|
||||
summary = datastore.summary
|
||||
datastore_summary = dict()
|
||||
datastore_summary['accessible'] = summary.accessible
|
||||
datastore_summary['capacity'] = summary.capacity
|
||||
datastore_summary['name'] = summary.name
|
||||
datastore_summary['freeSpace'] = summary.freeSpace
|
||||
datastore_summary['maintenanceMode'] = summary.maintenanceMode
|
||||
datastore_summary['multipleHostAccess'] = summary.multipleHostAccess
|
||||
datastore_summary['type'] = summary.type
|
||||
if self.gather_nfs_mount_info or self.gather_vmfs_mount_info:
|
||||
if self.gather_nfs_mount_info and summary.type.startswith("NFS"):
|
||||
# get mount info from the first ESXi host attached to this NFS datastore
|
||||
host_mount_info = self.check_datastore_host(summary.datastore.host[0].key.name, summary.name)
|
||||
datastore_summary['nfs_server'] = host_mount_info.volume.remoteHost
|
||||
datastore_summary['nfs_path'] = host_mount_info.volume.remotePath
|
||||
if self.gather_vmfs_mount_info and summary.type == "VMFS":
|
||||
# get mount info from the first ESXi host attached to this VMFS datastore
|
||||
host_mount_info = self.check_datastore_host(summary.datastore.host[0].key.name, summary.name)
|
||||
datastore_summary['vmfs_blockSize'] = host_mount_info.volume.blockSize
|
||||
datastore_summary['vmfs_version'] = host_mount_info.volume.version
|
||||
datastore_summary['vmfs_uuid'] = host_mount_info.volume.uuid
|
||||
# vcsim does not return uncommitted
|
||||
if not summary.uncommitted:
|
||||
summary.uncommitted = 0
|
||||
datastore_summary['uncommitted'] = summary.uncommitted
|
||||
datastore_summary['url'] = summary.url
|
||||
# Calculated values
|
||||
datastore_summary['provisioned'] = summary.capacity - summary.freeSpace + summary.uncommitted
|
||||
datastore_summary['datastore_cluster'] = 'N/A'
|
||||
if isinstance(datastore.parent, vim.StoragePod):
|
||||
datastore_summary['datastore_cluster'] = datastore.parent.name
|
||||
|
||||
if self.module.params['name']:
|
||||
if datastore_summary['name'] == self.module.params['name']:
|
||||
datastores.extend([datastore_summary])
|
||||
else:
|
||||
datastores.extend([datastore_summary])
|
||||
else:
|
||||
if self.module.params['name']:
|
||||
if datastore.name == self.module.params['name']:
|
||||
datastores.extend(([self.to_json(datastore, self.properties)]))
|
||||
else:
|
||||
datastores.extend(([self.to_json(datastore, self.properties)]))
|
||||
|
||||
return datastores
|
||||
|
||||
|
||||
class PyVmomiCache(object):
|
||||
""" This class caches references to objects which are requested multiples times but not modified """
|
||||
def __init__(self, content, dc_name=None):
|
||||
self.content = content
|
||||
self.dc_name = dc_name
|
||||
self.clusters = {}
|
||||
self.parent_datacenters = {}
|
||||
|
||||
def get_all_objs(self, content, types, confine_to_datacenter=True):
|
||||
""" Wrapper around get_all_objs to set datacenter context """
|
||||
objects = get_all_objs(content, types)
|
||||
if confine_to_datacenter:
|
||||
if hasattr(objects, 'items'):
|
||||
# resource pools come back as a dictionary
|
||||
for k, v in tuple(objects.items()):
|
||||
parent_dc = get_parent_datacenter(k)
|
||||
if parent_dc.name != self.dc_name:
|
||||
del objects[k]
|
||||
else:
|
||||
# everything else should be a list
|
||||
objects = [x for x in objects if get_parent_datacenter(x).name == self.dc_name]
|
||||
|
||||
return objects
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
""" This class gets datastores """
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
|
||||
|
||||
def lookup_datastore(self, confine_to_datacenter):
|
||||
""" Get datastore(s) per ESXi host or vCenter server """
|
||||
datastores = self.cache.get_all_objs(self.content, [vim.Datastore], confine_to_datacenter)
|
||||
return datastores
|
||||
|
||||
def lookup_datastore_by_cluster(self):
|
||||
""" Get datastore(s) per cluster """
|
||||
cluster = find_cluster_by_name(self.content, self.params['cluster'])
|
||||
if not cluster:
|
||||
self.module.fail_json(msg='Failed to find cluster "%(cluster)s"' % self.params)
|
||||
c_dc = cluster.datastore
|
||||
return c_dc
|
||||
|
||||
|
||||
def main():
|
||||
""" Main """
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
datacenter=dict(type='str', aliases=['datacenter_name']),
|
||||
cluster=dict(type='str'),
|
||||
gather_nfs_mount_info=dict(type='bool', default=False),
|
||||
gather_vmfs_mount_info=dict(type='bool', default=False),
|
||||
schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
|
||||
properties=dict(type='list')
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
if module._name == 'vmware_datastore_facts':
|
||||
module.deprecate("The 'vmware_datastore_facts' module has been renamed to 'vmware_datastore_info'", version='2.13')
|
||||
|
||||
result = dict(changed=False)
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
|
||||
if module.params['cluster']:
|
||||
dxs = pyv.lookup_datastore_by_cluster()
|
||||
elif module.params['datacenter']:
|
||||
dxs = pyv.lookup_datastore(confine_to_datacenter=True)
|
||||
else:
|
||||
dxs = pyv.lookup_datastore(confine_to_datacenter=False)
|
||||
|
||||
vmware_host_datastore = VMwareHostDatastore(module)
|
||||
datastores = vmware_host_datastore.build_datastore_list(dxs)
|
||||
|
||||
result['datastores'] = datastores
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,218 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_datastore_maintenancemode
|
||||
short_description: Place a datastore into maintenance mode
|
||||
description:
|
||||
- This module can be used to manage maintenance mode of a datastore.
|
||||
author:
|
||||
- "Abhijeet Kasurde (@Akasurde)"
|
||||
version_added: 2.6
|
||||
notes:
|
||||
- Tested on vSphere 5.5, 6.0 and 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
datastore:
|
||||
description:
|
||||
- Name of datastore to manage.
|
||||
- If C(datastore_cluster) or C(cluster_name) are not set, this parameter is required.
|
||||
type: str
|
||||
datastore_cluster:
|
||||
description:
|
||||
- Name of the datastore cluster from all child datastores to be managed.
|
||||
- If C(datastore) or C(cluster_name) are not set, this parameter is required.
|
||||
type: str
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster where datastore is connected to.
|
||||
- If multiple datastores are connected to the given cluster, then all datastores will be managed by C(state).
|
||||
- If C(datastore) or C(datastore_cluster) are not set, this parameter is required.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- If set to C(present), then enter datastore into maintenance mode.
|
||||
- If set to C(present) and datastore is already in maintenance mode, then no action will be taken.
|
||||
- If set to C(absent) and datastore is in maintenance mode, then exit maintenance mode.
|
||||
- If set to C(absent) and datastore is not in maintenance mode, then no action will be taken.
|
||||
choices: [ present, absent ]
|
||||
default: present
|
||||
required: False
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Enter datastore into Maintenance Mode
|
||||
vmware_datastore_maintenancemode:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datastore: '{{ datastore_name }}'
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enter all datastores under cluster into Maintenance Mode
|
||||
vmware_datastore_maintenancemode:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enter all datastores under datastore cluster into Maintenance Mode
|
||||
vmware_datastore_maintenancemode:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datastore_cluster: '{{ datastore_cluster_name }}'
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Exit datastore into Maintenance Mode
|
||||
vmware_datastore_maintenancemode:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datastore: '{{ datastore_name }}'
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
datastore_status:
|
||||
description: Action taken for datastore
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"ds_226_01": "Datastore 'ds_226_01' is already in maintenance mode."
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, vmware_argument_spec, wait_for_task,
|
||||
find_cluster_by_name, get_all_objs)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VmwareDatastoreMaintenanceMgr(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareDatastoreMaintenanceMgr, self).__init__(module)
|
||||
datastore_name = self.params.get('datastore')
|
||||
cluster_name = self.params.get('cluster_name')
|
||||
datastore_cluster = self.params.get('datastore_cluster')
|
||||
self.datastore_objs = []
|
||||
if datastore_name:
|
||||
ds = self.find_datastore_by_name(datastore_name=datastore_name)
|
||||
if not ds:
|
||||
self.module.fail_json(msg='Failed to find datastore "%(datastore)s".' % self.params)
|
||||
self.datastore_objs = [ds]
|
||||
elif cluster_name:
|
||||
cluster = find_cluster_by_name(self.content, cluster_name)
|
||||
if not cluster:
|
||||
self.module.fail_json(msg='Failed to find cluster "%(cluster_name)s".' % self.params)
|
||||
self.datastore_objs = cluster.datastore
|
||||
elif datastore_cluster:
|
||||
datastore_cluster_obj = get_all_objs(self.content, [vim.StoragePod])
|
||||
if not datastore_cluster_obj:
|
||||
self.module.fail_json(msg='Failed to find datastore cluster "%(datastore_cluster)s".' % self.params)
|
||||
for datastore in datastore_cluster_obj.childEntity:
|
||||
self.datastore_objs.append(datastore)
|
||||
else:
|
||||
self.module.fail_json(msg="Please select one of 'cluster_name', 'datastore' or 'datastore_cluster'.")
|
||||
self.state = self.params.get('state')
|
||||
|
||||
def ensure(self):
|
||||
datastore_results = dict()
|
||||
change_datastore_list = []
|
||||
for datastore in self.datastore_objs:
|
||||
changed = False
|
||||
if self.state == 'present' and datastore.summary.maintenanceMode != 'normal':
|
||||
datastore_results[datastore.name] = "Datastore '%s' is already in maintenance mode." % datastore.name
|
||||
break
|
||||
elif self.state == 'absent' and datastore.summary.maintenanceMode == 'normal':
|
||||
datastore_results[datastore.name] = "Datastore '%s' is not in maintenance mode." % datastore.name
|
||||
break
|
||||
|
||||
try:
|
||||
if self.state == 'present':
|
||||
storage_replacement_result = datastore.DatastoreEnterMaintenanceMode()
|
||||
task = storage_replacement_result.task
|
||||
else:
|
||||
task = datastore.DatastoreExitMaintenanceMode_Task()
|
||||
|
||||
success, result = wait_for_task(task)
|
||||
|
||||
if success:
|
||||
changed = True
|
||||
if self.state == 'present':
|
||||
datastore_results[datastore.name] = "Datastore '%s' entered in maintenance mode." % datastore.name
|
||||
else:
|
||||
datastore_results[datastore.name] = "Datastore '%s' exited from maintenance mode." % datastore.name
|
||||
except vim.fault.InvalidState as invalid_state:
|
||||
if self.state == 'present':
|
||||
msg = "Unable to enter datastore '%s' in" % datastore.name
|
||||
else:
|
||||
msg = "Unable to exit datastore '%s' from" % datastore.name
|
||||
msg += " maintenance mode due to : %s" % to_native(invalid_state.msg)
|
||||
self.module.fail_json(msg=msg)
|
||||
except Exception as exc:
|
||||
if self.state == 'present':
|
||||
msg = "Unable to enter datastore '%s' in" % datastore.name
|
||||
else:
|
||||
msg = "Unable to exit datastore '%s' from" % datastore.name
|
||||
msg += " maintenance mode due to generic exception : %s" % to_native(exc)
|
||||
self.module.fail_json(msg=msg)
|
||||
change_datastore_list.append(changed)
|
||||
|
||||
changed = False
|
||||
if any(change_datastore_list):
|
||||
changed = True
|
||||
self.module.exit_json(changed=changed, datastore_status=datastore_results)
|
||||
|
||||
|
||||
def main():
|
||||
spec = vmware_argument_spec()
|
||||
spec.update(dict(
|
||||
datastore=dict(type='str', required=False),
|
||||
cluster_name=dict(type='str', required=False),
|
||||
datastore_cluster=dict(type='str', required=False),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=spec,
|
||||
required_one_of=[
|
||||
['datastore', 'cluster_name', 'datastore_cluster'],
|
||||
],
|
||||
)
|
||||
|
||||
datastore_maintenance_mgr = VmwareDatastoreMaintenanceMgr(module=module)
|
||||
datastore_maintenance_mgr.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,704 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, Matt Martz <matt@sivel.net>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
author: 'Matt Martz (@sivel)'
|
||||
short_description: 'Deploys a VMware virtual machine from an OVF or OVA file'
|
||||
description:
|
||||
- 'This module can be used to deploy a VMware VM from an OVF or OVA file'
|
||||
module: vmware_deploy_ovf
|
||||
notes: []
|
||||
options:
|
||||
allow_duplicates:
|
||||
default: "yes"
|
||||
description:
|
||||
- Whether or not to allow duplicate VM names. ESXi allows duplicates, vCenter may not.
|
||||
type: bool
|
||||
datacenter:
|
||||
default: ha-datacenter
|
||||
description:
|
||||
- Datacenter to deploy to.
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- Cluster to deploy to.
|
||||
type: str
|
||||
datastore:
|
||||
default: datastore1
|
||||
description:
|
||||
- Datastore to deploy to.
|
||||
- "You can also specify datastore storage cluster. version_added: 2.9"
|
||||
type: str
|
||||
deployment_option:
|
||||
description:
|
||||
- The key of the chosen deployment option.
|
||||
type: str
|
||||
disk_provisioning:
|
||||
choices:
|
||||
- flat
|
||||
- eagerZeroedThick
|
||||
- monolithicSparse
|
||||
- twoGbMaxExtentSparse
|
||||
- twoGbMaxExtentFlat
|
||||
- thin
|
||||
- sparse
|
||||
- thick
|
||||
- seSparse
|
||||
- monolithicFlat
|
||||
default: thin
|
||||
description:
|
||||
- Disk provisioning type.
|
||||
type: str
|
||||
fail_on_spec_warnings:
|
||||
description:
|
||||
- Cause the module to treat OVF Import Spec warnings as errors.
|
||||
default: "no"
|
||||
type: bool
|
||||
folder:
|
||||
description:
|
||||
- Absolute path of folder to place the virtual machine.
|
||||
- If not specified, defaults to the value of C(datacenter.vmFolder).
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
inject_ovf_env:
|
||||
description:
|
||||
- Force the given properties to be inserted into an OVF Environment and injected through VMware Tools.
|
||||
version_added: "2.8"
|
||||
type: bool
|
||||
name:
|
||||
description:
|
||||
- Name of the VM to work with.
|
||||
- Virtual machine names in vCenter are not necessarily unique, which may be problematic.
|
||||
type: str
|
||||
networks:
|
||||
default:
|
||||
VM Network: VM Network
|
||||
description:
|
||||
- 'C(key: value) mapping of OVF network name, to the vCenter network name.'
|
||||
type: dict
|
||||
ovf:
|
||||
description:
|
||||
- 'Path to OVF or OVA file to deploy.'
|
||||
aliases:
|
||||
- ova
|
||||
power_on:
|
||||
default: true
|
||||
description:
|
||||
- 'Whether or not to power on the virtual machine after creation.'
|
||||
type: bool
|
||||
properties:
|
||||
description:
|
||||
- The assignment of values to the properties found in the OVF as key value pairs.
|
||||
type: dict
|
||||
resource_pool:
|
||||
default: Resources
|
||||
description:
|
||||
- Resource Pool to deploy to.
|
||||
type: str
|
||||
wait:
|
||||
default: true
|
||||
description:
|
||||
- 'Wait for the host to power on.'
|
||||
type: bool
|
||||
wait_for_ip_address:
|
||||
default: false
|
||||
description:
|
||||
- Wait until vCenter detects an IP address for the VM.
|
||||
- This requires vmware-tools (vmtoolsd) to properly work after creation.
|
||||
type: bool
|
||||
requirements:
|
||||
- pyvmomi
|
||||
version_added: "2.7"
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- vmware_deploy_ovf:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
ovf: /path/to/ubuntu-16.04-amd64.ovf
|
||||
wait_for_ip_address: true
|
||||
delegate_to: localhost
|
||||
|
||||
# Deploys a new VM named 'NewVM' in specific datacenter/cluster, with network mapping taken from variable and using ova template from an absolute path
|
||||
- vmware_deploy_ovf:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: Datacenter1
|
||||
cluster: Cluster1
|
||||
datastore: vsandatastore
|
||||
name: NewVM
|
||||
networks: "{u'VM Network':u'{{ ProvisioningNetworkLabel }}'}"
|
||||
validate_certs: no
|
||||
power_on: no
|
||||
ovf: /absolute/path/to/template/mytemplate.ova
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
|
||||
RETURN = r'''
|
||||
instance:
|
||||
description: metadata about the new virtual machine
|
||||
returned: always
|
||||
type: dict
|
||||
sample: None
|
||||
'''
|
||||
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import tarfile
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from threading import Thread
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.urls import generic_urlparse, open_url, urlparse, urlunparse
|
||||
from ansible.module_utils.vmware import (find_network_by_name, find_vm_by_name, PyVmomi,
|
||||
gather_vm_facts, vmware_argument_spec, wait_for_task, wait_for_vm_ip)
|
||||
try:
|
||||
from ansible.module_utils.vmware import vim
|
||||
from pyVmomi import vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def path_exists(value):
|
||||
if not isinstance(value, string_types):
|
||||
value = str(value)
|
||||
value = os.path.expanduser(os.path.expandvars(value))
|
||||
if not os.path.exists(value):
|
||||
raise ValueError('%s is not a valid path' % value)
|
||||
return value
|
||||
|
||||
|
||||
class ProgressReader(io.FileIO):
|
||||
def __init__(self, name, mode='r', closefd=True):
|
||||
self.bytes_read = 0
|
||||
io.FileIO.__init__(self, name, mode=mode, closefd=closefd)
|
||||
|
||||
def read(self, size=10240):
|
||||
chunk = io.FileIO.read(self, size)
|
||||
self.bytes_read += len(chunk)
|
||||
return chunk
|
||||
|
||||
|
||||
class TarFileProgressReader(tarfile.ExFileObject):
|
||||
def __init__(self, *args):
|
||||
self.bytes_read = 0
|
||||
tarfile.ExFileObject.__init__(self, *args)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
try:
|
||||
self.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def read(self, size=10240):
|
||||
chunk = tarfile.ExFileObject.read(self, size)
|
||||
self.bytes_read += len(chunk)
|
||||
return chunk
|
||||
|
||||
|
||||
class VMDKUploader(Thread):
|
||||
def __init__(self, vmdk, url, validate_certs=True, tarinfo=None, create=False):
|
||||
Thread.__init__(self)
|
||||
|
||||
self.vmdk = vmdk
|
||||
|
||||
if tarinfo:
|
||||
self.size = tarinfo.size
|
||||
else:
|
||||
self.size = os.stat(vmdk).st_size
|
||||
|
||||
self.url = url
|
||||
self.validate_certs = validate_certs
|
||||
self.tarinfo = tarinfo
|
||||
|
||||
self.f = None
|
||||
self.e = None
|
||||
|
||||
self._create = create
|
||||
|
||||
@property
|
||||
def bytes_read(self):
|
||||
try:
|
||||
return self.f.bytes_read
|
||||
except AttributeError:
|
||||
return 0
|
||||
|
||||
def _request_opts(self):
|
||||
'''
|
||||
Requests for vmdk files differ from other file types. Build the request options here to handle that
|
||||
'''
|
||||
headers = {
|
||||
'Content-Length': self.size,
|
||||
'Content-Type': 'application/octet-stream',
|
||||
}
|
||||
|
||||
if self._create:
|
||||
# Non-VMDK
|
||||
method = 'PUT'
|
||||
headers['Overwrite'] = 't'
|
||||
else:
|
||||
# VMDK
|
||||
method = 'POST'
|
||||
headers['Content-Type'] = 'application/x-vnd.vmware-streamVmdk'
|
||||
|
||||
return {
|
||||
'method': method,
|
||||
'headers': headers,
|
||||
}
|
||||
|
||||
def _open_url(self):
|
||||
open_url(self.url, data=self.f, validate_certs=self.validate_certs, **self._request_opts())
|
||||
|
||||
def run(self):
|
||||
if self.tarinfo:
|
||||
try:
|
||||
with TarFileProgressReader(self.vmdk, self.tarinfo) as self.f:
|
||||
self._open_url()
|
||||
except Exception:
|
||||
self.e = sys.exc_info()
|
||||
else:
|
||||
try:
|
||||
with ProgressReader(self.vmdk, 'rb') as self.f:
|
||||
self._open_url()
|
||||
except Exception:
|
||||
self.e = sys.exc_info()
|
||||
|
||||
|
||||
class VMwareDeployOvf(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareDeployOvf, self).__init__(module)
|
||||
self.module = module
|
||||
self.params = module.params
|
||||
|
||||
self.datastore = None
|
||||
self.datacenter = None
|
||||
self.resource_pool = None
|
||||
self.network_mappings = []
|
||||
|
||||
self.ovf_descriptor = None
|
||||
self.tar = None
|
||||
|
||||
self.lease = None
|
||||
self.import_spec = None
|
||||
self.entity = None
|
||||
|
||||
def get_objects(self):
|
||||
self.datacenter = self.find_datacenter_by_name(self.params['datacenter'])
|
||||
if not self.datacenter:
|
||||
self.module.fail_json(msg='%(datacenter)s could not be located' % self.params)
|
||||
|
||||
self.datastore = None
|
||||
datastore_cluster_obj = self.find_datastore_cluster_by_name(self.params['datastore'])
|
||||
if datastore_cluster_obj:
|
||||
datastore = None
|
||||
datastore_freespace = 0
|
||||
for ds in datastore_cluster_obj.childEntity:
|
||||
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
|
||||
# If datastore field is provided, filter destination datastores
|
||||
if ds.summary.maintenanceMode != 'normal' or not ds.summary.accessible:
|
||||
continue
|
||||
datastore = ds
|
||||
datastore_freespace = ds.summary.freeSpace
|
||||
if datastore:
|
||||
self.datastore = datastore
|
||||
else:
|
||||
self.datastore = self.find_datastore_by_name(self.params['datastore'], self.datacenter)
|
||||
|
||||
if not self.datastore:
|
||||
self.module.fail_json(msg='%(datastore)s could not be located' % self.params)
|
||||
|
||||
if self.params['cluster']:
|
||||
resource_pools = []
|
||||
cluster = self.find_cluster_by_name(self.params['cluster'], datacenter_name=self.datacenter)
|
||||
if cluster is None:
|
||||
self.module.fail_json(msg="Unable to find cluster '%(cluster)s'" % self.params)
|
||||
self.resource_pool = self.find_resource_pool_by_cluster(self.params['resource_pool'], cluster=cluster)
|
||||
else:
|
||||
self.resource_pool = self.find_resource_pool_by_name(self.params['resource_pool'])
|
||||
|
||||
if not self.resource_pool:
|
||||
self.module.fail_json(msg='%(resource_pool)s could not be located' % self.params)
|
||||
|
||||
for key, value in self.params['networks'].items():
|
||||
network = find_network_by_name(self.content, value)
|
||||
if not network:
|
||||
self.module.fail_json(msg='%(network)s could not be located' % self.params)
|
||||
network_mapping = vim.OvfManager.NetworkMapping()
|
||||
network_mapping.name = key
|
||||
network_mapping.network = network
|
||||
self.network_mappings.append(network_mapping)
|
||||
|
||||
return self.datastore, self.datacenter, self.resource_pool, self.network_mappings
|
||||
|
||||
def get_ovf_descriptor(self):
|
||||
if tarfile.is_tarfile(self.params['ovf']):
|
||||
self.tar = tarfile.open(self.params['ovf'])
|
||||
ovf = None
|
||||
for candidate in self.tar.getmembers():
|
||||
dummy, ext = os.path.splitext(candidate.name)
|
||||
if ext.lower() == '.ovf':
|
||||
ovf = candidate
|
||||
break
|
||||
if not ovf:
|
||||
self.module.fail_json(msg='Could not locate OVF file in %(ovf)s' % self.params)
|
||||
|
||||
self.ovf_descriptor = to_native(self.tar.extractfile(ovf).read())
|
||||
else:
|
||||
with open(self.params['ovf']) as f:
|
||||
self.ovf_descriptor = f.read()
|
||||
|
||||
return self.ovf_descriptor
|
||||
|
||||
def get_lease(self):
|
||||
datastore, datacenter, resource_pool, network_mappings = self.get_objects()
|
||||
|
||||
params = {
|
||||
'diskProvisioning': self.params['disk_provisioning'],
|
||||
}
|
||||
if self.params['name']:
|
||||
params['entityName'] = self.params['name']
|
||||
if network_mappings:
|
||||
params['networkMapping'] = network_mappings
|
||||
if self.params['deployment_option']:
|
||||
params['deploymentOption'] = self.params['deployment_option']
|
||||
if self.params['properties']:
|
||||
params['propertyMapping'] = []
|
||||
for key, value in self.params['properties'].items():
|
||||
property_mapping = vim.KeyValue()
|
||||
property_mapping.key = key
|
||||
property_mapping.value = str(value) if isinstance(value, bool) else value
|
||||
params['propertyMapping'].append(property_mapping)
|
||||
|
||||
if self.params['folder']:
|
||||
folder = self.content.searchIndex.FindByInventoryPath(self.params['folder'])
|
||||
if not folder:
|
||||
self.module.fail_json(msg="Unable to find the specified folder %(folder)s" % self.params)
|
||||
else:
|
||||
folder = datacenter.vmFolder
|
||||
|
||||
spec_params = vim.OvfManager.CreateImportSpecParams(**params)
|
||||
|
||||
ovf_descriptor = self.get_ovf_descriptor()
|
||||
|
||||
self.import_spec = self.content.ovfManager.CreateImportSpec(
|
||||
ovf_descriptor,
|
||||
resource_pool,
|
||||
datastore,
|
||||
spec_params
|
||||
)
|
||||
|
||||
errors = [to_native(e.msg) for e in getattr(self.import_spec, 'error', [])]
|
||||
if self.params['fail_on_spec_warnings']:
|
||||
errors.extend(
|
||||
(to_native(w.msg) for w in getattr(self.import_spec, 'warning', []))
|
||||
)
|
||||
if errors:
|
||||
self.module.fail_json(
|
||||
msg='Failure validating OVF import spec: %s' % '. '.join(errors)
|
||||
)
|
||||
|
||||
for warning in getattr(self.import_spec, 'warning', []):
|
||||
self.module.warn('Problem validating OVF import spec: %s' % to_native(warning.msg))
|
||||
|
||||
if not self.params['allow_duplicates']:
|
||||
name = self.import_spec.importSpec.configSpec.name
|
||||
match = find_vm_by_name(self.content, name, folder=folder)
|
||||
if match:
|
||||
self.module.exit_json(instance=gather_vm_facts(self.content, match), changed=False)
|
||||
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, instance={'hw_name': name})
|
||||
|
||||
try:
|
||||
self.lease = resource_pool.ImportVApp(
|
||||
self.import_spec.importSpec,
|
||||
folder
|
||||
)
|
||||
except vmodl.fault.SystemError as e:
|
||||
self.module.fail_json(
|
||||
msg='Failed to start import: %s' % to_native(e.msg)
|
||||
)
|
||||
|
||||
while self.lease.state != vim.HttpNfcLease.State.ready:
|
||||
time.sleep(0.1)
|
||||
|
||||
self.entity = self.lease.info.entity
|
||||
|
||||
return self.lease, self.import_spec
|
||||
|
||||
def _normalize_url(self, url):
|
||||
'''
|
||||
The hostname in URLs from vmware may be ``*`` update it accordingly
|
||||
'''
|
||||
url_parts = generic_urlparse(urlparse(url))
|
||||
if url_parts.hostname == '*':
|
||||
if url_parts.port:
|
||||
url_parts.netloc = '%s:%d' % (self.params['hostname'], url_parts.port)
|
||||
else:
|
||||
url_parts.netloc = self.params['hostname']
|
||||
|
||||
return urlunparse(url_parts.as_list())
|
||||
|
||||
def upload(self):
|
||||
if self.params['ovf'] is None:
|
||||
self.module.fail_json(msg="OVF path is required for upload operation.")
|
||||
|
||||
ovf_dir = os.path.dirname(self.params['ovf'])
|
||||
|
||||
lease, import_spec = self.get_lease()
|
||||
|
||||
uploaders = []
|
||||
|
||||
for file_item in import_spec.fileItem:
|
||||
device_upload_url = None
|
||||
for device_url in lease.info.deviceUrl:
|
||||
if file_item.deviceId == device_url.importKey:
|
||||
device_upload_url = self._normalize_url(device_url.url)
|
||||
break
|
||||
|
||||
if not device_upload_url:
|
||||
lease.HttpNfcLeaseAbort(
|
||||
vmodl.fault.SystemError(reason='Failed to find deviceUrl for file %s' % file_item.path)
|
||||
)
|
||||
self.module.fail_json(
|
||||
msg='Failed to find deviceUrl for file %s' % file_item.path
|
||||
)
|
||||
|
||||
vmdk_tarinfo = None
|
||||
if self.tar:
|
||||
vmdk = self.tar
|
||||
try:
|
||||
vmdk_tarinfo = self.tar.getmember(file_item.path)
|
||||
except KeyError:
|
||||
lease.HttpNfcLeaseAbort(
|
||||
vmodl.fault.SystemError(reason='Failed to find VMDK file %s in OVA' % file_item.path)
|
||||
)
|
||||
self.module.fail_json(
|
||||
msg='Failed to find VMDK file %s in OVA' % file_item.path
|
||||
)
|
||||
else:
|
||||
vmdk = os.path.join(ovf_dir, file_item.path)
|
||||
try:
|
||||
path_exists(vmdk)
|
||||
except ValueError:
|
||||
lease.HttpNfcLeaseAbort(
|
||||
vmodl.fault.SystemError(reason='Failed to find VMDK file at %s' % vmdk)
|
||||
)
|
||||
self.module.fail_json(
|
||||
msg='Failed to find VMDK file at %s' % vmdk
|
||||
)
|
||||
|
||||
uploaders.append(
|
||||
VMDKUploader(
|
||||
vmdk,
|
||||
device_upload_url,
|
||||
self.params['validate_certs'],
|
||||
tarinfo=vmdk_tarinfo,
|
||||
create=file_item.create
|
||||
)
|
||||
)
|
||||
|
||||
total_size = sum(u.size for u in uploaders)
|
||||
total_bytes_read = [0] * len(uploaders)
|
||||
for i, uploader in enumerate(uploaders):
|
||||
uploader.start()
|
||||
while uploader.is_alive():
|
||||
time.sleep(0.1)
|
||||
total_bytes_read[i] = uploader.bytes_read
|
||||
lease.HttpNfcLeaseProgress(int(100.0 * sum(total_bytes_read) / total_size))
|
||||
|
||||
if uploader.e:
|
||||
lease.HttpNfcLeaseAbort(
|
||||
vmodl.fault.SystemError(reason='%s' % to_native(uploader.e[1]))
|
||||
)
|
||||
self.module.fail_json(
|
||||
msg='%s' % to_native(uploader.e[1]),
|
||||
exception=''.join(traceback.format_tb(uploader.e[2]))
|
||||
)
|
||||
|
||||
def complete(self):
|
||||
self.lease.HttpNfcLeaseComplete()
|
||||
|
||||
def inject_ovf_env(self):
|
||||
attrib = {
|
||||
'xmlns': 'http://schemas.dmtf.org/ovf/environment/1',
|
||||
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
|
||||
'xmlns:oe': 'http://schemas.dmtf.org/ovf/environment/1',
|
||||
'xmlns:ve': 'http://www.vmware.com/schema/ovfenv',
|
||||
'oe:id': '',
|
||||
've:esxId': self.entity._moId
|
||||
}
|
||||
env = ET.Element('Environment', **attrib)
|
||||
|
||||
platform = ET.SubElement(env, 'PlatformSection')
|
||||
ET.SubElement(platform, 'Kind').text = self.content.about.name
|
||||
ET.SubElement(platform, 'Version').text = self.content.about.version
|
||||
ET.SubElement(platform, 'Vendor').text = self.content.about.vendor
|
||||
ET.SubElement(platform, 'Locale').text = 'US'
|
||||
|
||||
prop_section = ET.SubElement(env, 'PropertySection')
|
||||
for key, value in self.params['properties'].items():
|
||||
params = {
|
||||
'oe:key': key,
|
||||
'oe:value': str(value) if isinstance(value, bool) else value
|
||||
}
|
||||
ET.SubElement(prop_section, 'Property', **params)
|
||||
|
||||
opt = vim.option.OptionValue()
|
||||
opt.key = 'guestinfo.ovfEnv'
|
||||
opt.value = '<?xml version="1.0" encoding="UTF-8"?>' + to_native(ET.tostring(env))
|
||||
|
||||
config_spec = vim.vm.ConfigSpec()
|
||||
config_spec.extraConfig = [opt]
|
||||
|
||||
task = self.entity.ReconfigVM_Task(config_spec)
|
||||
wait_for_task(task)
|
||||
|
||||
def deploy(self):
|
||||
facts = {}
|
||||
|
||||
if self.params['inject_ovf_env']:
|
||||
self.inject_ovf_env()
|
||||
|
||||
if self.params['power_on']:
|
||||
task = self.entity.PowerOn()
|
||||
if self.params['wait']:
|
||||
wait_for_task(task)
|
||||
if self.params['wait_for_ip_address']:
|
||||
_facts = wait_for_vm_ip(self.content, self.entity)
|
||||
if not _facts:
|
||||
self.module.fail_json(msg='Waiting for IP address timed out')
|
||||
facts.update(_facts)
|
||||
|
||||
if not facts:
|
||||
facts.update(gather_vm_facts(self.content, self.entity))
|
||||
|
||||
return facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update({
|
||||
'name': {},
|
||||
'datastore': {
|
||||
'default': 'datastore1',
|
||||
},
|
||||
'datacenter': {
|
||||
'default': 'ha-datacenter',
|
||||
},
|
||||
'cluster': {
|
||||
'default': None,
|
||||
},
|
||||
'deployment_option': {
|
||||
'default': None,
|
||||
},
|
||||
'folder': {
|
||||
'default': None,
|
||||
},
|
||||
'inject_ovf_env': {
|
||||
'default': False,
|
||||
'type': 'bool',
|
||||
},
|
||||
'resource_pool': {
|
||||
'default': 'Resources',
|
||||
},
|
||||
'networks': {
|
||||
'default': {
|
||||
'VM Network': 'VM Network',
|
||||
},
|
||||
'type': 'dict',
|
||||
},
|
||||
'ovf': {
|
||||
'type': path_exists,
|
||||
'aliases': ['ova'],
|
||||
},
|
||||
'disk_provisioning': {
|
||||
'choices': [
|
||||
'flat',
|
||||
'eagerZeroedThick',
|
||||
'monolithicSparse',
|
||||
'twoGbMaxExtentSparse',
|
||||
'twoGbMaxExtentFlat',
|
||||
'thin',
|
||||
'sparse',
|
||||
'thick',
|
||||
'seSparse',
|
||||
'monolithicFlat'
|
||||
],
|
||||
'default': 'thin',
|
||||
},
|
||||
'power_on': {
|
||||
'type': 'bool',
|
||||
'default': True,
|
||||
},
|
||||
'properties': {
|
||||
'type': 'dict',
|
||||
},
|
||||
'wait': {
|
||||
'type': 'bool',
|
||||
'default': True,
|
||||
},
|
||||
'wait_for_ip_address': {
|
||||
'type': 'bool',
|
||||
'default': False,
|
||||
},
|
||||
'allow_duplicates': {
|
||||
'type': 'bool',
|
||||
'default': True,
|
||||
},
|
||||
'fail_on_spec_warnings': {
|
||||
'type': 'bool',
|
||||
'default': False,
|
||||
},
|
||||
})
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
deploy_ovf = VMwareDeployOvf(module)
|
||||
deploy_ovf.upload()
|
||||
deploy_ovf.complete()
|
||||
facts = deploy_ovf.deploy()
|
||||
|
||||
module.exit_json(instance=facts, changed=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,584 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Karsten Kaj Jakobsen <kj@patientsky.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- "Karsten Kaj Jakobsen (@karstenjakobsen)"
|
||||
description:
|
||||
- "This module can be used to create VM/Host groups in a given cluster. Creates a vm group if C(vms) is set. Creates a host group if C(hosts) is set."
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
module: vmware_drs_group
|
||||
notes:
|
||||
- "Tested on vSphere 6.5 and 6.7"
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- "Cluster to create vm/host group."
|
||||
required: true
|
||||
type: str
|
||||
datacenter:
|
||||
aliases:
|
||||
- datacenter_name
|
||||
description:
|
||||
- "Datacenter to search for given cluster. If not set, we use first cluster we encounter with C(cluster_name)."
|
||||
required: false
|
||||
type: str
|
||||
group_name:
|
||||
description:
|
||||
- "The name of the group to create or remove."
|
||||
required: true
|
||||
type: str
|
||||
hosts:
|
||||
description:
|
||||
- "List of hosts to create in group."
|
||||
- "Required only if C(vms) is not set."
|
||||
required: false
|
||||
type: list
|
||||
state:
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
description:
|
||||
- "If set to C(present) and the group doesn't exists then the group will be created."
|
||||
- "If set to C(absent) and the group exists then the group will be deleted."
|
||||
required: true
|
||||
type: str
|
||||
vms:
|
||||
description:
|
||||
- "List of vms to create in group."
|
||||
- "Required only if C(hosts) is not set."
|
||||
required: false
|
||||
type: list
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
short_description: "Creates vm/host group in a given cluster."
|
||||
version_added: "2.8"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
---
|
||||
- name: "Create DRS VM group"
|
||||
delegate_to: localhost
|
||||
vmware_drs_group:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
cluster_name: DC0_C0
|
||||
datacenter_name: DC0
|
||||
group_name: TEST_VM_01
|
||||
vms:
|
||||
- DC0_C0_RP0_VM0
|
||||
- DC0_C0_RP0_VM1
|
||||
state: present
|
||||
|
||||
- name: "Create DRS Host group"
|
||||
delegate_to: localhost
|
||||
vmware_drs_group:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
cluster_name: DC0_C0
|
||||
datacenter_name: DC0
|
||||
group_name: TEST_HOST_01
|
||||
hosts:
|
||||
- DC0_C0_H0
|
||||
- DC0_C0_H1
|
||||
- DC0_C0_H2
|
||||
state: present
|
||||
|
||||
- name: "Delete DRS Host group"
|
||||
delegate_to: localhost
|
||||
vmware_drs_group:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
cluster_name: DC0_C0
|
||||
datacenter_name: DC0
|
||||
group_name: TEST_HOST_01
|
||||
state: absent
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
drs_group_facts:
|
||||
description: Metadata about DRS group created
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
"drs_group_facts": {
|
||||
"changed": true,
|
||||
"failed": false,
|
||||
"msg": "Created host group TEST_HOST_01 successfully",
|
||||
"result": {
|
||||
"DC0_C0": [
|
||||
{
|
||||
"group_name": "TEST_HOST_01",
|
||||
"hosts": [
|
||||
"DC0_C0_H0",
|
||||
"DC0_C0_H1",
|
||||
"DC0_C0_H2"
|
||||
],
|
||||
"type": "host"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, vmware_argument_spec,
|
||||
wait_for_task, find_cluster_by_name,
|
||||
find_vm_by_id, find_datacenter_by_name, find_vm_by_name)
|
||||
|
||||
|
||||
class VmwareDrsGroupManager(PyVmomi):
|
||||
"""
|
||||
Class to manage DRS groups
|
||||
"""
|
||||
|
||||
def __init__(self, module, cluster_name, group_name, state,
|
||||
datacenter_name=None, vm_list=None, host_list=None):
|
||||
"""
|
||||
Init
|
||||
"""
|
||||
|
||||
super(VmwareDrsGroupManager, self).__init__(module)
|
||||
|
||||
self.__datacenter_name = datacenter_name
|
||||
self.__datacenter_obj = None
|
||||
self.__cluster_name = cluster_name
|
||||
self.__cluster_obj = None
|
||||
self.__group_name = group_name
|
||||
self.__group_obj = None
|
||||
self.__operation = None
|
||||
self.__vm_list = vm_list
|
||||
self.__vm_obj_list = []
|
||||
self.__host_list = host_list
|
||||
self.__host_obj_list = []
|
||||
self.__msg = 'Nothing to see here...'
|
||||
self.__result = dict()
|
||||
self.__changed = False
|
||||
self.__state = state
|
||||
|
||||
if datacenter_name is not None:
|
||||
|
||||
self.__datacenter_obj = find_datacenter_by_name(self.content, self.__datacenter_name)
|
||||
|
||||
if self.__datacenter_obj is None and module.check_mode is False:
|
||||
raise Exception("Datacenter '%s' not found" % self.__datacenter_name)
|
||||
|
||||
self.__cluster_obj = find_cluster_by_name(content=self.content,
|
||||
cluster_name=self.__cluster_name,
|
||||
datacenter=self.__datacenter_obj)
|
||||
|
||||
# Throw error if cluster does not exist
|
||||
if self.__cluster_obj is None:
|
||||
if module.check_mode is False:
|
||||
raise Exception("Cluster '%s' not found" % self.__cluster_name)
|
||||
else:
|
||||
# get group
|
||||
self.__group_obj = self.__get_group_by_name()
|
||||
# Set result here. If nothing is to be updated, result is already set
|
||||
self.__set_result(self.__group_obj)
|
||||
|
||||
# Dont populate lists if we are deleting group
|
||||
if state == 'present':
|
||||
|
||||
if self.__group_obj:
|
||||
self.__operation = 'edit'
|
||||
else:
|
||||
self.__operation = 'add'
|
||||
|
||||
if self.__vm_list is not None:
|
||||
self.__set_vm_obj_list(vm_list=self.__vm_list)
|
||||
|
||||
if self.__host_list is not None:
|
||||
self.__set_host_obj_list(host_list=self.__host_list)
|
||||
else:
|
||||
self.__operation = 'remove'
|
||||
|
||||
def get_msg(self):
|
||||
"""
|
||||
Returns message for Ansible result
|
||||
Args: none
|
||||
|
||||
Returns: string
|
||||
"""
|
||||
return self.__msg
|
||||
|
||||
def get_result(self):
|
||||
"""
|
||||
Returns result for Ansible
|
||||
Args: none
|
||||
|
||||
Returns: dict
|
||||
"""
|
||||
return self.__result
|
||||
|
||||
def __set_result(self, group_obj):
|
||||
"""
|
||||
Creates result for successful run
|
||||
Args:
|
||||
group_obj: group object
|
||||
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
self.__result = dict()
|
||||
|
||||
if (self.__cluster_obj is not None) and (group_obj is not None):
|
||||
self.__result[self.__cluster_obj.name] = []
|
||||
self.__result[self.__cluster_obj.name].append(self.__normalize_group_data(group_obj))
|
||||
|
||||
def get_changed(self):
|
||||
"""
|
||||
Returns if anything changed
|
||||
Args: none
|
||||
|
||||
Returns: boolean
|
||||
"""
|
||||
return self.__changed
|
||||
|
||||
def __set_vm_obj_list(self, vm_list=None, cluster_obj=None):
|
||||
"""
|
||||
Function populate vm object list from list of vms
|
||||
Args:
|
||||
vm_list: List of vm names
|
||||
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
|
||||
if vm_list is None:
|
||||
vm_list = self.__vm_list
|
||||
|
||||
if cluster_obj is None:
|
||||
cluster_obj = self.__cluster_obj
|
||||
|
||||
if vm_list is not None:
|
||||
|
||||
for vm in vm_list:
|
||||
|
||||
if self.module.check_mode is False:
|
||||
|
||||
# Get host data
|
||||
vm_obj = find_vm_by_id(content=self.content, vm_id=vm,
|
||||
vm_id_type='vm_name', cluster=cluster_obj)
|
||||
|
||||
if vm_obj is None:
|
||||
raise Exception("VM %s does not exist in cluster %s" % (vm,
|
||||
self.__cluster_name))
|
||||
|
||||
self.__vm_obj_list.append(vm_obj)
|
||||
|
||||
def __set_host_obj_list(self, host_list=None):
|
||||
"""
|
||||
Function populate host object list from list of hostnames
|
||||
Args:
|
||||
host_list: List of host names
|
||||
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
|
||||
if host_list is None:
|
||||
host_list = self.__host_list
|
||||
|
||||
if host_list is not None:
|
||||
|
||||
for host in host_list:
|
||||
|
||||
if self.module.check_mode is False:
|
||||
|
||||
# Get host data
|
||||
host_obj = self.find_hostsystem_by_name(host)
|
||||
|
||||
if host_obj is None and self.module.check_mode is False:
|
||||
raise Exception("ESXi host %s does not exist in cluster %s" % (host, self.__cluster_name))
|
||||
|
||||
self.__host_obj_list.append(host_obj)
|
||||
|
||||
def __get_group_by_name(self, group_name=None, cluster_obj=None):
|
||||
"""
|
||||
Function to get group by name
|
||||
Args:
|
||||
group_name: Name of group
|
||||
cluster_obj: vim Cluster object
|
||||
|
||||
Returns: Group Object if found or None
|
||||
|
||||
"""
|
||||
|
||||
if group_name is None:
|
||||
group_name = self.__group_name
|
||||
|
||||
if cluster_obj is None:
|
||||
cluster_obj = self.__cluster_obj
|
||||
|
||||
# Allow for group check even if dry run
|
||||
if self.module.check_mode and cluster_obj is None:
|
||||
return None
|
||||
|
||||
for group in cluster_obj.configurationEx.group:
|
||||
if group.name == group_name:
|
||||
return group
|
||||
|
||||
# No group found
|
||||
return None
|
||||
|
||||
def __populate_vm_host_list(self, group_name=None, cluster_obj=None, host_group=False):
|
||||
"""
|
||||
Return all VM/Host names using given group name
|
||||
Args:
|
||||
group_name: group name
|
||||
cluster_obj: Cluster managed object
|
||||
host_group: True if we want only host name from group
|
||||
|
||||
Returns: List of VM/Host names belonging to given group object
|
||||
|
||||
"""
|
||||
obj_name_list = []
|
||||
|
||||
if group_name is None:
|
||||
group_name = self.__group_name
|
||||
|
||||
if cluster_obj is None:
|
||||
cluster_obj = self.__cluster_obj
|
||||
|
||||
if not all([group_name, cluster_obj]):
|
||||
return obj_name_list
|
||||
|
||||
group = self.__group_obj
|
||||
|
||||
if not host_group and isinstance(group, vim.cluster.VmGroup):
|
||||
obj_name_list = [vm.name for vm in group.vm]
|
||||
|
||||
elif host_group and isinstance(group, vim.cluster.HostGroup):
|
||||
obj_name_list = [host.name for host in group.host]
|
||||
|
||||
return obj_name_list
|
||||
|
||||
def __check_if_vms_hosts_changed(self, group_name=None, cluster_obj=None, host_group=False):
|
||||
"""
|
||||
Function to check if VMs/Hosts changed
|
||||
Args:
|
||||
group_name: Name of group
|
||||
cluster_obj: vim Cluster object
|
||||
host_group: True if we want to check hosts, else check vms
|
||||
|
||||
Returns: Bool
|
||||
|
||||
"""
|
||||
|
||||
if group_name is None:
|
||||
group_name = self.__group_name
|
||||
|
||||
if cluster_obj is None:
|
||||
cluster_obj = self.__cluster_obj
|
||||
|
||||
list_a = self.__host_list if host_group else self.__vm_list
|
||||
list_b = self.__populate_vm_host_list(host_group=host_group)
|
||||
|
||||
# By casting lists as a set, you remove duplicates and order doesn't count. Comparing sets is also much faster and more efficient than comparing lists.
|
||||
if set(list_a) == set(list_b):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def __create_host_group(self):
|
||||
|
||||
# Check if anything has changed when editing
|
||||
if self.__operation == 'add' or (self.__operation == 'edit' and self.__check_if_vms_hosts_changed(host_group=True)):
|
||||
|
||||
group = vim.cluster.HostGroup()
|
||||
group.name = self.__group_name
|
||||
group.host = self.__host_obj_list
|
||||
|
||||
group_spec = vim.cluster.GroupSpec(info=group, operation=self.__operation)
|
||||
config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])
|
||||
|
||||
if not self.module.check_mode:
|
||||
task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
|
||||
wait_for_task(task)
|
||||
|
||||
# Set new result since something changed
|
||||
self.__set_result(group)
|
||||
self.__changed = True
|
||||
|
||||
if self.__operation == 'edit':
|
||||
self.__msg = "Updated host group %s successfully" % (self.__group_name)
|
||||
else:
|
||||
self.__msg = "Created host group %s successfully" % (self.__group_name)
|
||||
|
||||
def __create_vm_group(self):
|
||||
|
||||
# Check if anything has changed when editing
|
||||
if self.__operation == 'add' or (self.__operation == 'edit' and self.__check_if_vms_hosts_changed()):
|
||||
|
||||
group = vim.cluster.VmGroup()
|
||||
|
||||
group.name = self.__group_name
|
||||
group.vm = self.__vm_obj_list
|
||||
|
||||
group_spec = vim.cluster.GroupSpec(info=group, operation=self.__operation)
|
||||
config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])
|
||||
|
||||
# Check if dry run
|
||||
if not self.module.check_mode:
|
||||
task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
|
||||
wait_for_task(task)
|
||||
|
||||
self.__set_result(group)
|
||||
self.__changed = True
|
||||
|
||||
if self.__operation == 'edit':
|
||||
self.__msg = "Updated vm group %s successfully" % (self.__group_name)
|
||||
else:
|
||||
self.__msg = "Created vm group %s successfully" % (self.__group_name)
|
||||
|
||||
def __normalize_group_data(self, group_obj):
|
||||
"""
|
||||
Return human readable group spec
|
||||
Args:
|
||||
group_obj: Group object
|
||||
|
||||
Returns: DRS group object fact
|
||||
|
||||
"""
|
||||
if not all([group_obj]):
|
||||
return {}
|
||||
|
||||
# Check if group is a host group
|
||||
if hasattr(group_obj, 'host'):
|
||||
return dict(
|
||||
group_name=group_obj.name,
|
||||
hosts=self.__host_list,
|
||||
type="host"
|
||||
)
|
||||
else:
|
||||
return dict(
|
||||
group_name=group_obj.name,
|
||||
vms=self.__vm_list,
|
||||
type="vm"
|
||||
)
|
||||
|
||||
def create_drs_group(self):
|
||||
"""
|
||||
Function to create a DRS host/vm group
|
||||
"""
|
||||
|
||||
if self.__vm_list is None:
|
||||
self.__create_host_group()
|
||||
elif self.__host_list is None:
|
||||
self.__create_vm_group()
|
||||
else:
|
||||
raise Exception('Failed, no hosts or vms defined')
|
||||
|
||||
def delete_drs_group(self):
|
||||
"""
|
||||
Function to delete a DRS host/vm group
|
||||
"""
|
||||
|
||||
if self.__group_obj is not None:
|
||||
|
||||
self.__changed = True
|
||||
|
||||
# Check if dry run
|
||||
if not self.module.check_mode:
|
||||
|
||||
group_spec = vim.cluster.GroupSpec(removeKey=self.__group_name, operation=self.__operation)
|
||||
config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])
|
||||
|
||||
task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
|
||||
wait_for_task(task)
|
||||
|
||||
# Dont throw error if group does not exist. Simply set changed = False
|
||||
if self.__changed:
|
||||
self.__msg = "Deleted group `%s` successfully" % (self.__group_name)
|
||||
else:
|
||||
self.__msg = "DRS group `%s` does not exists or already deleted" % (self.__group_name)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function
|
||||
"""
|
||||
|
||||
argument_spec = vmware_argument_spec()
|
||||
|
||||
argument_spec.update(
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
datacenter=dict(type='str', required=False, aliases=['datacenter_name']),
|
||||
cluster_name=dict(type='str', required=True),
|
||||
group_name=dict(type='str', required=True),
|
||||
vms=dict(type='list'),
|
||||
hosts=dict(type='list')
|
||||
)
|
||||
|
||||
required_if = [
|
||||
['state', 'absent', ['group_name']]
|
||||
]
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=required_if,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[['vms', 'hosts']],
|
||||
required_one_of=[['vms', 'hosts']]
|
||||
)
|
||||
|
||||
try:
|
||||
# Create instance of VmwareDrsGroupManager
|
||||
vmware_drs_group = VmwareDrsGroupManager(module=module,
|
||||
datacenter_name=module.params.get('datacenter', None),
|
||||
cluster_name=module.params['cluster_name'],
|
||||
group_name=module.params['group_name'],
|
||||
vm_list=module.params['vms'],
|
||||
host_list=module.params['hosts'],
|
||||
state=module.params['state'])
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
# Add DRS group
|
||||
vmware_drs_group.create_drs_group()
|
||||
elif module.params['state'] == 'absent':
|
||||
# Delete DRS group
|
||||
vmware_drs_group.delete_drs_group()
|
||||
|
||||
# Set results
|
||||
results = dict(msg=vmware_drs_group.get_msg(),
|
||||
failed=False,
|
||||
changed=vmware_drs_group.get_changed(),
|
||||
result=vmware_drs_group.get_result())
|
||||
|
||||
except Exception as error:
|
||||
results = dict(failed=True, msg="Error: %s" % error)
|
||||
|
||||
if results['failed']:
|
||||
module.fail_json(**results)
|
||||
else:
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,282 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Karsten Kaj Jakobsen <kj@patientsky.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- "Karsten Kaj Jakobsen (@karstenjakobsen)"
|
||||
description:
|
||||
- "This module can be used to gather information about DRS VM/HOST groups from the given cluster."
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
module: vmware_drs_group_info
|
||||
notes:
|
||||
- "Tested on vSphere 6.5 and 6.7"
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- "Cluster to search for VM/Host groups."
|
||||
- "If set, information of DRS groups belonging this cluster will be returned."
|
||||
- "Not needed if C(datacenter) is set."
|
||||
required: false
|
||||
type: str
|
||||
datacenter:
|
||||
aliases:
|
||||
- datacenter_name
|
||||
description:
|
||||
- "Datacenter to search for DRS VM/Host groups."
|
||||
required: true
|
||||
type: str
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
short_description: "Gathers info about DRS VM/Host groups on the given cluster"
|
||||
version_added: "2.9"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
---
|
||||
- name: "Gather DRS info about given Cluster"
|
||||
register: cluster_drs_group_info
|
||||
vmware_drs_group_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
cluster_name: "{{ cluster_name }}"
|
||||
datacenter: "{{ datacenter }}"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: "Gather DRS group info about all clusters in given datacenter"
|
||||
register: cluster_drs_group_info
|
||||
vmware_drs_group_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
datacenter: "{{ datacenter }}"
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
drs_group_info:
|
||||
description: Metadata about DRS group from given cluster / datacenter
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
"drs_group_info": {
|
||||
"DC0_C0": [
|
||||
{
|
||||
"group_name": "GROUP_HOST_S01",
|
||||
"hosts": [
|
||||
"vm-01.zone",
|
||||
"vm-02.zone"
|
||||
],
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"group_name": "GROUP_HOST_S02",
|
||||
"hosts": [
|
||||
"vm-03.zone",
|
||||
"vm-04.zone"
|
||||
],
|
||||
"type": "host"
|
||||
},
|
||||
{
|
||||
"group_name": "GROUP_VM_S01",
|
||||
"type": "vm",
|
||||
"vms": [
|
||||
"test-node01"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "GROUP_VM_S02",
|
||||
"type": "vm",
|
||||
"vms": [
|
||||
"test-node02"
|
||||
]
|
||||
}
|
||||
],
|
||||
"DC0_C1": []
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs
|
||||
|
||||
|
||||
class VmwareDrsGroupInfoManager(PyVmomi):
|
||||
|
||||
def __init__(self, module, datacenter_name, cluster_name=None):
|
||||
"""
|
||||
Doctring: Init
|
||||
"""
|
||||
|
||||
super(VmwareDrsGroupInfoManager, self).__init__(module)
|
||||
|
||||
self.__datacenter_name = datacenter_name
|
||||
self.__datacenter_obj = None
|
||||
self.__cluster_name = cluster_name
|
||||
self.__cluster_obj = None
|
||||
self.__msg = 'Nothing to see here...'
|
||||
self.__result = dict()
|
||||
self.__changed = False
|
||||
|
||||
if datacenter_name:
|
||||
|
||||
datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
|
||||
self.cluster_obj_list = []
|
||||
|
||||
if datacenter_obj:
|
||||
folder = datacenter_obj.hostFolder
|
||||
self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder)
|
||||
else:
|
||||
raise Exception("Datacenter '%s' not found" % self.__datacenter_name)
|
||||
|
||||
if cluster_name:
|
||||
|
||||
cluster_obj = self.find_cluster_by_name(cluster_name=self.__cluster_name)
|
||||
|
||||
if cluster_obj is None:
|
||||
raise Exception("Cluster '%s' not found" % self.__cluster_name)
|
||||
else:
|
||||
self.cluster_obj_list = [cluster_obj]
|
||||
|
||||
def get_result(self):
|
||||
"""
|
||||
Docstring
|
||||
"""
|
||||
return self.__result
|
||||
|
||||
def __set_result(self, result):
|
||||
"""
|
||||
Sets result
|
||||
Args:
|
||||
result: drs group result list
|
||||
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
self.__result = result
|
||||
|
||||
def __get_all_from_group(self, group_obj, host_group=False):
|
||||
"""
|
||||
Return all VM / Host names using given group
|
||||
Args:
|
||||
group_obj: Group object
|
||||
host_group: True if we want only host name from group
|
||||
|
||||
Returns: List of VM / Host names belonging to given group object
|
||||
|
||||
"""
|
||||
obj_name_list = []
|
||||
|
||||
if not all([group_obj]):
|
||||
return obj_name_list
|
||||
|
||||
if not host_group and isinstance(group_obj, vim.cluster.VmGroup):
|
||||
obj_name_list = [vm.name for vm in group_obj.vm]
|
||||
elif host_group and isinstance(group_obj, vim.cluster.HostGroup):
|
||||
obj_name_list = [host.name for host in group_obj.host]
|
||||
|
||||
return obj_name_list
|
||||
|
||||
def __normalize_group_data(self, group_obj):
|
||||
"""
|
||||
Return human readable group spec
|
||||
Args:
|
||||
group_obj: Group object
|
||||
|
||||
Returns: Dictionary with DRS groups
|
||||
|
||||
"""
|
||||
if not all([group_obj]):
|
||||
return {}
|
||||
|
||||
# Check if group is a host group
|
||||
if hasattr(group_obj, 'host'):
|
||||
return dict(
|
||||
group_name=group_obj.name,
|
||||
hosts=self.__get_all_from_group(group_obj=group_obj, host_group=True),
|
||||
type="host"
|
||||
)
|
||||
else:
|
||||
return dict(
|
||||
group_name=group_obj.name,
|
||||
vms=self.__get_all_from_group(group_obj=group_obj),
|
||||
type="vm"
|
||||
)
|
||||
|
||||
def gather_info(self):
|
||||
"""
|
||||
Gather DRS group information about given cluster
|
||||
Returns: Dictionary of clusters with DRS groups
|
||||
|
||||
"""
|
||||
cluster_group_info = dict()
|
||||
|
||||
for cluster_obj in self.cluster_obj_list:
|
||||
|
||||
cluster_group_info[cluster_obj.name] = []
|
||||
|
||||
for drs_group in cluster_obj.configurationEx.group:
|
||||
cluster_group_info[cluster_obj.name].append(self.__normalize_group_data(drs_group))
|
||||
|
||||
self.__set_result(cluster_group_info)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = vmware_argument_spec()
|
||||
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str', required=False, aliases=['datacenter_name']),
|
||||
cluster_name=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=[['cluster_name', 'datacenter']],
|
||||
mutually_exclusive=[['cluster_name', 'datacenter']],
|
||||
)
|
||||
|
||||
try:
|
||||
# Create instance of VmwareDrsGroupManager
|
||||
vmware_drs_group_info = VmwareDrsGroupInfoManager(
|
||||
module=module,
|
||||
datacenter_name=module.params.get('datacenter'),
|
||||
cluster_name=module.params.get('cluster_name', None))
|
||||
|
||||
vmware_drs_group_info.gather_info()
|
||||
|
||||
# Set results
|
||||
results = dict(failed=False,
|
||||
drs_group_info=vmware_drs_group_info.get_result())
|
||||
|
||||
except Exception as error:
|
||||
results = dict(failed=True, msg="Error: %s" % error)
|
||||
|
||||
if results['failed']:
|
||||
module.fail_json(**results)
|
||||
else:
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,262 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_drs_rule_info
|
||||
short_description: Gathers info about DRS rule on the given cluster
|
||||
description:
|
||||
- 'This module can be used to gather information about DRS VM-VM and VM-HOST rules from the given cluster.'
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
cluster_name:
|
||||
description:
|
||||
- Name of the cluster.
|
||||
- DRS information for the given cluster will be returned.
|
||||
- This is required parameter if C(datacenter) parameter is not provided.
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- Name of the datacenter.
|
||||
- DRS information for all the clusters from the given datacenter will be returned.
|
||||
- This is required parameter if C(cluster_name) parameter is not provided.
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather DRS info about given Cluster
|
||||
vmware_drs_rule_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
cluster_name: '{{ cluster_name }}'
|
||||
delegate_to: localhost
|
||||
register: cluster_drs_info
|
||||
|
||||
- name: Gather DRS info about all Clusters in given datacenter
|
||||
vmware_drs_rule_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: '{{ datacenter_name }}'
|
||||
delegate_to: localhost
|
||||
register: datacenter_drs_info
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
drs_rule_info:
|
||||
description: metadata about DRS rule from given cluster / datacenter
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"DC0_C0": [
|
||||
{
|
||||
"rule_affinity": false,
|
||||
"rule_enabled": true,
|
||||
"rule_key": 1,
|
||||
"rule_mandatory": true,
|
||||
"rule_name": "drs_rule_0001",
|
||||
"rule_type": "vm_vm_rule",
|
||||
"rule_uuid": "52be5061-665a-68dc-3d25-85cd2d37e114",
|
||||
"rule_vms": [
|
||||
"VM_65",
|
||||
"VM_146"
|
||||
]
|
||||
},
|
||||
],
|
||||
"DC1_C1": [
|
||||
{
|
||||
"rule_affine_host_group_name": "host_group_1",
|
||||
"rule_affine_hosts": [
|
||||
"10.76.33.204"
|
||||
],
|
||||
"rule_anti_affine_host_group_name": null,
|
||||
"rule_anti_affine_hosts": [],
|
||||
"rule_enabled": true,
|
||||
"rule_key": 1,
|
||||
"rule_mandatory": false,
|
||||
"rule_name": "vm_host_rule_0001",
|
||||
"rule_type": "vm_host_rule",
|
||||
"rule_uuid": "52687108-4d3a-76f2-d29c-b708c40dbe40",
|
||||
"rule_vm_group_name": "test_vm_group_1",
|
||||
"rule_vms": [
|
||||
"VM_8916",
|
||||
"VM_4010"
|
||||
]
|
||||
}
|
||||
],
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs
|
||||
|
||||
|
||||
class VmwareDrsInfoManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareDrsInfoManager, self).__init__(module)
|
||||
|
||||
datacenter_name = self.params.get('datacenter', None)
|
||||
if datacenter_name:
|
||||
datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
|
||||
self.cluster_obj_list = []
|
||||
if datacenter_obj:
|
||||
folder = datacenter_obj.hostFolder
|
||||
self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder)
|
||||
else:
|
||||
self.module.fail_json(changed=False, msg="Datacenter '%s' not found" % datacenter_name)
|
||||
|
||||
cluster_name = self.params.get('cluster_name', None)
|
||||
if cluster_name:
|
||||
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
|
||||
if cluster_obj is None:
|
||||
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
|
||||
else:
|
||||
self.cluster_obj_list = [cluster_obj]
|
||||
|
||||
def get_all_from_group(self, group_name=None, cluster_obj=None, hostgroup=False):
|
||||
"""
|
||||
Return all VM / Host names using given group name
|
||||
Args:
|
||||
group_name: Rule name
|
||||
cluster_obj: Cluster managed object
|
||||
hostgroup: True if we want only host name from group
|
||||
|
||||
Returns: List of VM / Host names belonging to given group object
|
||||
|
||||
"""
|
||||
obj_name_list = []
|
||||
if not all([group_name, cluster_obj]):
|
||||
return obj_name_list
|
||||
|
||||
for group in cluster_obj.configurationEx.group:
|
||||
if group.name == group_name:
|
||||
if not hostgroup and isinstance(group, vim.cluster.VmGroup):
|
||||
obj_name_list = [vm.name for vm in group.vm]
|
||||
break
|
||||
elif hostgroup and isinstance(group, vim.cluster.HostGroup):
|
||||
obj_name_list = [host.name for host in group.host]
|
||||
break
|
||||
|
||||
return obj_name_list
|
||||
|
||||
@staticmethod
|
||||
def normalize_vm_vm_rule_spec(rule_obj=None):
|
||||
"""
|
||||
Return human readable rule spec
|
||||
Args:
|
||||
rule_obj: Rule managed object
|
||||
|
||||
Returns: Dictionary with DRS VM VM Rule info
|
||||
|
||||
"""
|
||||
if rule_obj is None:
|
||||
return {}
|
||||
return dict(rule_key=rule_obj.key,
|
||||
rule_enabled=rule_obj.enabled,
|
||||
rule_name=rule_obj.name,
|
||||
rule_mandatory=rule_obj.mandatory,
|
||||
rule_uuid=rule_obj.ruleUuid,
|
||||
rule_vms=[vm.name for vm in rule_obj.vm],
|
||||
rule_type="vm_vm_rule",
|
||||
rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False,
|
||||
)
|
||||
|
||||
def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None):
|
||||
"""
|
||||
Return human readable rule spec
|
||||
Args:
|
||||
rule_obj: Rule managed object
|
||||
cluster_obj: Cluster managed object
|
||||
|
||||
Returns: Dictionary with DRS VM HOST Rule info
|
||||
|
||||
"""
|
||||
if not all([rule_obj, cluster_obj]):
|
||||
return {}
|
||||
return dict(rule_key=rule_obj.key,
|
||||
rule_enabled=rule_obj.enabled,
|
||||
rule_name=rule_obj.name,
|
||||
rule_mandatory=rule_obj.mandatory,
|
||||
rule_uuid=rule_obj.ruleUuid,
|
||||
rule_vm_group_name=rule_obj.vmGroupName,
|
||||
rule_affine_host_group_name=rule_obj.affineHostGroupName,
|
||||
rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName,
|
||||
rule_vms=self.get_all_from_group(group_name=rule_obj.vmGroupName,
|
||||
cluster_obj=cluster_obj),
|
||||
rule_affine_hosts=self.get_all_from_group(group_name=rule_obj.affineHostGroupName,
|
||||
cluster_obj=cluster_obj,
|
||||
hostgroup=True),
|
||||
rule_anti_affine_hosts=self.get_all_from_group(group_name=rule_obj.antiAffineHostGroupName,
|
||||
cluster_obj=cluster_obj,
|
||||
hostgroup=True),
|
||||
rule_type="vm_host_rule",
|
||||
)
|
||||
|
||||
def gather_drs_rule_info(self):
|
||||
"""
|
||||
Gather DRS rule information about given cluster
|
||||
Returns: Dictionary of clusters with DRS information
|
||||
|
||||
"""
|
||||
cluster_rule_info = dict()
|
||||
for cluster_obj in self.cluster_obj_list:
|
||||
cluster_rule_info[cluster_obj.name] = []
|
||||
for drs_rule in cluster_obj.configuration.rule:
|
||||
if isinstance(drs_rule, vim.cluster.VmHostRuleInfo):
|
||||
cluster_rule_info[cluster_obj.name].append(self.normalize_vm_host_rule_spec(
|
||||
rule_obj=drs_rule,
|
||||
cluster_obj=cluster_obj))
|
||||
else:
|
||||
cluster_rule_info[cluster_obj.name].append(self.normalize_vm_vm_rule_spec(rule_obj=drs_rule))
|
||||
|
||||
return cluster_rule_info
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str', required=False),
|
||||
cluster_name=dict(type='str', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['cluster_name', 'datacenter'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_drs_info = VmwareDrsInfoManager(module)
|
||||
module.exit_json(changed=False, drs_rule_info=vmware_drs_info.gather_drs_rule_info())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,295 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Copyright: (c) 2019, VMware Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_dvs_host
|
||||
short_description: Add or remove a host from distributed virtual switch
|
||||
description:
|
||||
- Manage a host system from distributed virtual switch.
|
||||
version_added: 2.0
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
- Joseph Andreatta (@vmwjoseph)
|
||||
notes:
|
||||
- Tested on vSphere 5.5
|
||||
requirements:
|
||||
- "python >= 2.7"
|
||||
- PyVmomi
|
||||
options:
|
||||
esxi_hostname:
|
||||
description:
|
||||
- The ESXi hostname.
|
||||
required: True
|
||||
type: str
|
||||
switch_name:
|
||||
description:
|
||||
- The name of the Distributed vSwitch.
|
||||
required: True
|
||||
type: str
|
||||
vmnics:
|
||||
description:
|
||||
- The ESXi hosts vmnics to use with the Distributed vSwitch.
|
||||
required: True
|
||||
type: list
|
||||
state:
|
||||
description:
|
||||
- If the host should be present or absent attached to the vSwitch.
|
||||
choices: [ present, absent ]
|
||||
required: True
|
||||
default: 'present'
|
||||
type: str
|
||||
vendor_specific_config:
|
||||
description:
|
||||
- List of key,value dictionaries for the Vendor Specific Configuration.
|
||||
- 'Element attributes are:'
|
||||
- '- C(key) (str): Key of setting. (default: None)'
|
||||
- '- C(value) (str): Value of setting. (default: None)'
|
||||
required: False
|
||||
version_added: '2.9'
|
||||
type: list
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add Host to dVS
|
||||
vmware_dvs_host:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
switch_name: dvSwitch
|
||||
vmnics:
|
||||
- vmnic0
|
||||
- vmnic1
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Add Host to dVS/enable learnswitch (https://labs.vmware.com/flings/learnswitch)
|
||||
vmware_dvs_host:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
esxi_hostname: '{{ esxi_hostname }}'
|
||||
switch_name: dvSwitch
|
||||
vendor_specific_config:
|
||||
- key: com.vmware.netoverlay.layer1
|
||||
value: learnswitch
|
||||
vmnics:
|
||||
- vmnic0
|
||||
- vmnic1
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
try:
|
||||
from collections import Counter
|
||||
HAS_COLLECTIONS_COUNTER = True
|
||||
except ImportError as e:
|
||||
HAS_COLLECTIONS_COUNTER = False
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError as e:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_hostsystem_by_name,
|
||||
vmware_argument_spec, wait_for_task)
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class VMwareDvsHost(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareDvsHost, self).__init__(module)
|
||||
self.dv_switch = None
|
||||
self.uplink_portgroup = None
|
||||
self.host = None
|
||||
self.dv_switch = None
|
||||
self.nic = None
|
||||
self.state = self.module.params['state']
|
||||
self.switch_name = self.module.params['switch_name']
|
||||
self.esxi_hostname = self.module.params['esxi_hostname']
|
||||
self.vmnics = self.module.params['vmnics']
|
||||
self.vendor_specific_config = self.module.params['vendor_specific_config']
|
||||
|
||||
def process_state(self):
|
||||
dvs_host_states = {
|
||||
'absent': {
|
||||
'present': self.state_destroy_dvs_host,
|
||||
'absent': self.state_exit_unchanged,
|
||||
},
|
||||
'present': {
|
||||
'update': self.state_update_dvs_host,
|
||||
'present': self.state_exit_unchanged,
|
||||
'absent': self.state_create_dvs_host,
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
dvs_host_states[self.state][self.check_dvs_host_state()]()
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg=to_native(e))
|
||||
|
||||
def find_dvs_uplink_pg(self):
|
||||
# There should only always be a single uplink port group on
|
||||
# a distributed virtual switch
|
||||
dvs_uplink_pg = self.dv_switch.config.uplinkPortgroup[0] if len(self.dv_switch.config.uplinkPortgroup) else None
|
||||
return dvs_uplink_pg
|
||||
|
||||
# operation should be edit, add and remove
|
||||
def modify_dvs_host(self, operation):
|
||||
changed, result = False, None
|
||||
spec = vim.DistributedVirtualSwitch.ConfigSpec()
|
||||
spec.configVersion = self.dv_switch.config.configVersion
|
||||
spec.host = [vim.dvs.HostMember.ConfigSpec()]
|
||||
spec.host[0].operation = operation
|
||||
spec.host[0].host = self.host
|
||||
if self.vendor_specific_config:
|
||||
config = list()
|
||||
for item in self.vendor_specific_config:
|
||||
config.append(vim.dvs.KeyedOpaqueBlob(key=item['key'], opaqueData=item['value']))
|
||||
spec.host[0].vendorSpecificConfig = config
|
||||
|
||||
if operation in ("edit", "add"):
|
||||
spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
|
||||
count = 0
|
||||
|
||||
for nic in self.vmnics:
|
||||
spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec())
|
||||
spec.host[0].backing.pnicSpec[count].pnicDevice = nic
|
||||
spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key
|
||||
count += 1
|
||||
|
||||
try:
|
||||
task = self.dv_switch.ReconfigureDvs_Task(spec)
|
||||
changed, result = wait_for_task(task)
|
||||
except vmodl.fault.NotSupported as not_supported:
|
||||
self.module.fail_json(msg="Failed to configure DVS host %s as it is not"
|
||||
" compatible with the VDS version." % self.esxi_hostname,
|
||||
details=to_native(not_supported.msg))
|
||||
return changed, result
|
||||
|
||||
def state_destroy_dvs_host(self):
|
||||
operation, changed, result = ("remove", True, None)
|
||||
|
||||
if not self.module.check_mode:
|
||||
changed, result = self.modify_dvs_host(operation)
|
||||
self.module.exit_json(changed=changed, result=to_native(result))
|
||||
|
||||
def state_exit_unchanged(self):
|
||||
self.module.exit_json(changed=False)
|
||||
|
||||
def state_update_dvs_host(self):
|
||||
operation, changed, result = ("edit", True, None)
|
||||
|
||||
if not self.module.check_mode:
|
||||
changed, result = self.modify_dvs_host(operation)
|
||||
self.module.exit_json(changed=changed, result=to_native(result))
|
||||
|
||||
def state_create_dvs_host(self):
|
||||
operation, changed, result = ("add", True, None)
|
||||
|
||||
if not self.module.check_mode:
|
||||
changed, result = self.modify_dvs_host(operation)
|
||||
self.module.exit_json(changed=changed, result=to_native(result))
|
||||
|
||||
def find_host_attached_dvs(self):
|
||||
for dvs_host_member in self.dv_switch.config.host:
|
||||
if dvs_host_member.config.host.name == self.esxi_hostname:
|
||||
return dvs_host_member.config.host
|
||||
|
||||
return None
|
||||
|
||||
def check_uplinks(self):
|
||||
pnic_device = []
|
||||
|
||||
for dvs_host_member in self.dv_switch.config.host:
|
||||
if dvs_host_member.config.host == self.host:
|
||||
for pnicSpec in dvs_host_member.config.backing.pnicSpec:
|
||||
pnic_device.append(pnicSpec.pnicDevice)
|
||||
|
||||
return Counter(pnic_device) == Counter(self.vmnics)
|
||||
|
||||
def check_dvs_host_state(self):
|
||||
self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
|
||||
|
||||
if self.dv_switch is None:
|
||||
self.module.fail_json(msg="A distributed virtual switch %s "
|
||||
"does not exist" % self.switch_name)
|
||||
|
||||
self.uplink_portgroup = self.find_dvs_uplink_pg()
|
||||
|
||||
if self.uplink_portgroup is None:
|
||||
self.module.fail_json(msg="An uplink portgroup does not exist on"
|
||||
" the distributed virtual switch %s" % self.switch_name)
|
||||
|
||||
self.host = self.find_host_attached_dvs()
|
||||
|
||||
if self.host is None:
|
||||
# We still need the HostSystem object to add the host
|
||||
# to the distributed vswitch
|
||||
self.host = find_hostsystem_by_name(self.content, self.esxi_hostname)
|
||||
if self.host is None:
|
||||
self.module.fail_json(msg="The esxi_hostname %s does not exist "
|
||||
"in vCenter" % self.esxi_hostname)
|
||||
return 'absent'
|
||||
else:
|
||||
if self.check_uplinks():
|
||||
return 'present'
|
||||
else:
|
||||
return 'update'
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
esxi_hostname=dict(required=True, type='str'),
|
||||
switch_name=dict(required=True, type='str'),
|
||||
vmnics=dict(required=True, type='list'),
|
||||
state=dict(default='present', choices=['present', 'absent'], type='str'),
|
||||
vendor_specific_config=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
required=False,
|
||||
options=dict(
|
||||
key=dict(type='str', required=True),
|
||||
value=dict(type='str', required=True),
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_COLLECTIONS_COUNTER:
|
||||
module.fail_json(msg='collections.Counter from Python-2.7 is required for this module')
|
||||
|
||||
vmware_dvs_host = VMwareDvsHost(module)
|
||||
vmware_dvs_host.process_state()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,527 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2017-2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_dvs_portgroup
|
||||
short_description: Create or remove a Distributed vSwitch portgroup.
|
||||
description:
|
||||
- Create or remove a Distributed vSwitch portgroup.
|
||||
version_added: 2.0
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
|
||||
notes:
|
||||
- Tested on vSphere 5.5
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
portgroup_name:
|
||||
description:
|
||||
- The name of the portgroup that is to be created or deleted.
|
||||
required: True
|
||||
type: str
|
||||
switch_name:
|
||||
description:
|
||||
- The name of the distributed vSwitch the port group should be created on.
|
||||
required: True
|
||||
type: str
|
||||
vlan_id:
|
||||
description:
|
||||
- The VLAN ID that should be configured with the portgroup, use 0 for no VLAN.
|
||||
- 'If C(vlan_trunk) is configured to be I(true), this can be a combination of multiple ranges and numbers, example: 1-200, 205, 400-4094.'
|
||||
- The valid C(vlan_id) range is from 0 to 4094. Overlapping ranges are allowed.
|
||||
required: True
|
||||
type: str
|
||||
num_ports:
|
||||
description:
|
||||
- The number of ports the portgroup should contain.
|
||||
required: True
|
||||
type: int
|
||||
portgroup_type:
|
||||
description:
|
||||
- See VMware KB 1022312 regarding portgroup types.
|
||||
required: True
|
||||
choices:
|
||||
- 'earlyBinding'
|
||||
- 'lateBinding'
|
||||
- 'ephemeral'
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Determines if the portgroup should be present or not.
|
||||
required: True
|
||||
type: str
|
||||
choices:
|
||||
- 'present'
|
||||
- 'absent'
|
||||
version_added: '2.5'
|
||||
vlan_trunk:
|
||||
description:
|
||||
- Indicates whether this is a VLAN trunk or not.
|
||||
required: False
|
||||
default: False
|
||||
type: bool
|
||||
version_added: '2.5'
|
||||
network_policy:
|
||||
description:
|
||||
- Dictionary which configures the different security values for portgroup.
|
||||
- 'Valid attributes are:'
|
||||
- '- C(promiscuous) (bool): indicates whether promiscuous mode is allowed. (default: false)'
|
||||
- '- C(forged_transmits) (bool): indicates whether forged transmits are allowed. (default: false)'
|
||||
- '- C(mac_changes) (bool): indicates whether mac changes are allowed. (default: false)'
|
||||
required: False
|
||||
version_added: '2.5'
|
||||
default: {
|
||||
promiscuous: False,
|
||||
forged_transmits: False,
|
||||
mac_changes: False,
|
||||
}
|
||||
type: dict
|
||||
teaming_policy:
|
||||
description:
|
||||
- Dictionary which configures the different teaming values for portgroup.
|
||||
- 'Valid attributes are:'
|
||||
- '- C(load_balance_policy) (string): Network adapter teaming policy. (default: loadbalance_srcid)'
|
||||
- ' - choices: [ loadbalance_ip, loadbalance_srcmac, loadbalance_srcid, loadbalance_loadbased, failover_explicit]'
|
||||
- ' - "loadbalance_loadbased" is available from version 2.6 and onwards'
|
||||
- '- C(inbound_policy) (bool): Indicate whether or not the teaming policy is applied to inbound frames as well. (default: False)'
|
||||
- '- C(notify_switches) (bool): Indicate whether or not to notify the physical switch if a link fails. (default: True)'
|
||||
- '- C(rolling_order) (bool): Indicate whether or not to use a rolling policy when restoring links. (default: False)'
|
||||
required: False
|
||||
version_added: '2.5'
|
||||
default: {
|
||||
'notify_switches': True,
|
||||
'load_balance_policy': 'loadbalance_srcid',
|
||||
'inbound_policy': False,
|
||||
'rolling_order': False
|
||||
}
|
||||
type: dict
|
||||
port_policy:
|
||||
description:
|
||||
- Dictionary which configures the advanced policy settings for the portgroup.
|
||||
- 'Valid attributes are:'
|
||||
- '- C(block_override) (bool): indicates if the block policy can be changed per port. (default: true)'
|
||||
- '- C(ipfix_override) (bool): indicates if the ipfix policy can be changed per port. (default: false)'
|
||||
- '- C(live_port_move) (bool): indicates if a live port can be moved in or out of the portgroup. (default: false)'
|
||||
- '- C(network_rp_override) (bool): indicates if the network resource pool can be changed per port. (default: false)'
|
||||
- '- C(port_config_reset_at_disconnect) (bool): indicates if the configuration of a port is reset automatically after disconnect. (default: true)'
|
||||
- '- C(security_override) (bool): indicates if the security policy can be changed per port. (default: false)'
|
||||
- '- C(shaping_override) (bool): indicates if the shaping policy can be changed per port. (default: false)'
|
||||
- '- C(traffic_filter_override) (bool): indicates if the traffic filter can be changed per port. (default: false)'
|
||||
- '- C(uplink_teaming_override) (bool): indicates if the uplink teaming policy can be changed per port. (default: false)'
|
||||
- '- C(vendor_config_override) (bool): indicates if the vendor config can be changed per port. (default: false)'
|
||||
- '- C(vlan_override) (bool): indicates if the vlan can be changed per port. (default: false)'
|
||||
required: False
|
||||
version_added: '2.5'
|
||||
default: {
|
||||
'traffic_filter_override': False,
|
||||
'network_rp_override': False,
|
||||
'live_port_move': False,
|
||||
'security_override': False,
|
||||
'vendor_config_override': False,
|
||||
'port_config_reset_at_disconnect': True,
|
||||
'uplink_teaming_override': False,
|
||||
'block_override': True,
|
||||
'shaping_override': False,
|
||||
'vlan_override': False,
|
||||
'ipfix_override': False
|
||||
}
|
||||
type: dict
|
||||
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create vlan portgroup
|
||||
vmware_dvs_portgroup:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
portgroup_name: vlan-123-portrgoup
|
||||
switch_name: dvSwitch
|
||||
vlan_id: 123
|
||||
num_ports: 120
|
||||
portgroup_type: earlyBinding
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create vlan trunk portgroup
|
||||
vmware_dvs_portgroup:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
portgroup_name: vlan-trunk-portrgoup
|
||||
switch_name: dvSwitch
|
||||
vlan_id: 1-1000, 1005, 1100-1200
|
||||
vlan_trunk: True
|
||||
num_ports: 120
|
||||
portgroup_type: earlyBinding
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create no-vlan portgroup
|
||||
vmware_dvs_portgroup:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
portgroup_name: no-vlan-portrgoup
|
||||
switch_name: dvSwitch
|
||||
vlan_id: 0
|
||||
num_ports: 120
|
||||
portgroup_type: earlyBinding
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create vlan portgroup with all security and port policies
|
||||
vmware_dvs_portgroup:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
portgroup_name: vlan-123-portrgoup
|
||||
switch_name: dvSwitch
|
||||
vlan_id: 123
|
||||
num_ports: 120
|
||||
portgroup_type: earlyBinding
|
||||
state: present
|
||||
network_policy:
|
||||
promiscuous: yes
|
||||
forged_transmits: yes
|
||||
mac_changes: yes
|
||||
port_policy:
|
||||
block_override: yes
|
||||
ipfix_override: yes
|
||||
live_port_move: yes
|
||||
network_rp_override: yes
|
||||
port_config_reset_at_disconnect: yes
|
||||
security_override: yes
|
||||
shaping_override: yes
|
||||
traffic_filter_override: yes
|
||||
uplink_teaming_override: yes
|
||||
vendor_config_override: yes
|
||||
vlan_override: yes
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError as e:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_dvspg_by_name,
|
||||
vmware_argument_spec, wait_for_task)
|
||||
|
||||
|
||||
class VMwareDvsPortgroup(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareDvsPortgroup, self).__init__(module)
|
||||
self.dvs_portgroup = None
|
||||
self.dv_switch = None
|
||||
|
||||
def create_vlan_list(self):
|
||||
vlan_id_list = []
|
||||
for vlan_id_splitted in self.module.params['vlan_id'].split(','):
|
||||
vlans = vlan_id_splitted.split('-')
|
||||
if len(vlans) > 2:
|
||||
self.module.fail_json(msg="Invalid VLAN range %s." % vlan_id_splitted)
|
||||
if len(vlans) == 2:
|
||||
vlan_id_start = vlans[0].strip()
|
||||
vlan_id_end = vlans[1].strip()
|
||||
if not vlan_id_start.isdigit():
|
||||
self.module.fail_json(msg="Invalid VLAN %s." % vlan_id_start)
|
||||
if not vlan_id_end.isdigit():
|
||||
self.module.fail_json(msg="Invalid VLAN %s." % vlan_id_end)
|
||||
vlan_id_start = int(vlan_id_start)
|
||||
vlan_id_end = int(vlan_id_end)
|
||||
if vlan_id_start not in range(0, 4095) or vlan_id_end not in range(0, 4095):
|
||||
self.module.fail_json(msg="vlan_id range %s specified is incorrect. The valid vlan_id range is from 0 to 4094." % vlan_id_splitted)
|
||||
vlan_id_list.append((vlan_id_start, vlan_id_end))
|
||||
else:
|
||||
vlan_id = vlans[0].strip()
|
||||
if not vlan_id.isdigit():
|
||||
self.module.fail_json(msg="Invalid VLAN %s." % vlan_id)
|
||||
vlan_id = int(vlan_id)
|
||||
vlan_id_list.append((vlan_id, vlan_id))
|
||||
|
||||
vlan_id_list.sort()
|
||||
|
||||
return vlan_id_list
|
||||
|
||||
def build_config(self):
|
||||
config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
|
||||
|
||||
# Basic config
|
||||
config.name = self.module.params['portgroup_name']
|
||||
config.numPorts = self.module.params['num_ports']
|
||||
|
||||
# Default port config
|
||||
config.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
|
||||
if self.module.params['vlan_trunk']:
|
||||
config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
|
||||
config.defaultPortConfig.vlan.vlanId = list(map(lambda x: vim.NumericRange(start=x[0], end=x[1]), self.create_vlan_list()))
|
||||
else:
|
||||
config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
|
||||
config.defaultPortConfig.vlan.vlanId = int(self.module.params['vlan_id'])
|
||||
config.defaultPortConfig.vlan.inherited = False
|
||||
config.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
|
||||
config.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=self.module.params['network_policy']['promiscuous'])
|
||||
config.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=self.module.params['network_policy']['forged_transmits'])
|
||||
config.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=self.module.params['network_policy']['mac_changes'])
|
||||
|
||||
# Teaming Policy
|
||||
teamingPolicy = vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy()
|
||||
teamingPolicy.policy = vim.StringPolicy(value=self.module.params['teaming_policy']['load_balance_policy'])
|
||||
teamingPolicy.reversePolicy = vim.BoolPolicy(value=self.module.params['teaming_policy']['inbound_policy'])
|
||||
teamingPolicy.notifySwitches = vim.BoolPolicy(value=self.module.params['teaming_policy']['notify_switches'])
|
||||
teamingPolicy.rollingOrder = vim.BoolPolicy(value=self.module.params['teaming_policy']['rolling_order'])
|
||||
config.defaultPortConfig.uplinkTeamingPolicy = teamingPolicy
|
||||
|
||||
# PG policy (advanced_policy)
|
||||
config.policy = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy()
|
||||
config.policy.blockOverrideAllowed = self.module.params['port_policy']['block_override']
|
||||
config.policy.ipfixOverrideAllowed = self.module.params['port_policy']['ipfix_override']
|
||||
config.policy.livePortMovingAllowed = self.module.params['port_policy']['live_port_move']
|
||||
config.policy.networkResourcePoolOverrideAllowed = self.module.params['port_policy']['network_rp_override']
|
||||
config.policy.portConfigResetAtDisconnect = self.module.params['port_policy']['port_config_reset_at_disconnect']
|
||||
config.policy.securityPolicyOverrideAllowed = self.module.params['port_policy']['security_override']
|
||||
config.policy.shapingOverrideAllowed = self.module.params['port_policy']['shaping_override']
|
||||
config.policy.trafficFilterOverrideAllowed = self.module.params['port_policy']['traffic_filter_override']
|
||||
config.policy.uplinkTeamingOverrideAllowed = self.module.params['port_policy']['uplink_teaming_override']
|
||||
config.policy.vendorConfigOverrideAllowed = self.module.params['port_policy']['vendor_config_override']
|
||||
config.policy.vlanOverrideAllowed = self.module.params['port_policy']['vlan_override']
|
||||
|
||||
# PG Type
|
||||
config.type = self.module.params['portgroup_type']
|
||||
|
||||
return config
|
||||
|
||||
def process_state(self):
|
||||
dvspg_states = {
|
||||
'absent': {
|
||||
'present': self.state_destroy_dvspg,
|
||||
'absent': self.state_exit_unchanged,
|
||||
},
|
||||
'present': {
|
||||
'update': self.state_update_dvspg,
|
||||
'present': self.state_exit_unchanged,
|
||||
'absent': self.state_create_dvspg,
|
||||
}
|
||||
}
|
||||
try:
|
||||
dvspg_states[self.module.params['state']][self.check_dvspg_state()]()
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=runtime_fault.msg)
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=method_fault.msg)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
def update_port_group(self):
|
||||
config = self.build_config()
|
||||
config.configVersion = self.dvs_portgroup.config.configVersion
|
||||
task = self.dvs_portgroup.ReconfigureDVPortgroup_Task(config)
|
||||
changed, result = wait_for_task(task)
|
||||
return changed, result
|
||||
|
||||
def create_port_group(self):
|
||||
config = self.build_config()
|
||||
task = self.dv_switch.AddDVPortgroup_Task([config])
|
||||
changed, result = wait_for_task(task)
|
||||
return changed, result
|
||||
|
||||
def state_destroy_dvspg(self):
|
||||
changed = True
|
||||
result = None
|
||||
|
||||
if not self.module.check_mode:
|
||||
task = self.dvs_portgroup.Destroy_Task()
|
||||
changed, result = wait_for_task(task)
|
||||
self.module.exit_json(changed=changed, result=str(result))
|
||||
|
||||
def state_exit_unchanged(self):
|
||||
self.module.exit_json(changed=False)
|
||||
|
||||
def state_update_dvspg(self):
|
||||
changed = True
|
||||
result = None
|
||||
|
||||
if not self.module.check_mode:
|
||||
changed, result = self.update_port_group()
|
||||
self.module.exit_json(changed=changed, result=str(result))
|
||||
|
||||
def state_create_dvspg(self):
|
||||
changed = True
|
||||
result = None
|
||||
|
||||
if not self.module.check_mode:
|
||||
changed, result = self.create_port_group()
|
||||
self.module.exit_json(changed=changed, result=str(result))
|
||||
|
||||
def check_dvspg_state(self):
|
||||
self.dv_switch = find_dvs_by_name(self.content, self.module.params['switch_name'])
|
||||
|
||||
if self.dv_switch is None:
|
||||
self.module.fail_json(msg="A distributed virtual switch with name %s does not exist" % self.module.params['switch_name'])
|
||||
self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.module.params['portgroup_name'])
|
||||
|
||||
if self.dvs_portgroup is None:
|
||||
return 'absent'
|
||||
|
||||
# Check config
|
||||
# Basic config
|
||||
if self.dvs_portgroup.config.numPorts != self.module.params['num_ports']:
|
||||
return 'update'
|
||||
|
||||
# Default port config
|
||||
defaultPortConfig = self.dvs_portgroup.config.defaultPortConfig
|
||||
if self.module.params['vlan_trunk']:
|
||||
if not isinstance(defaultPortConfig.vlan, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec):
|
||||
return 'update'
|
||||
if map(lambda x: (x.start, x.end), defaultPortConfig.vlan.vlanId) != self.create_vlan_list():
|
||||
return 'update'
|
||||
else:
|
||||
if not isinstance(defaultPortConfig.vlan, vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec):
|
||||
return 'update'
|
||||
if defaultPortConfig.vlan.vlanId != int(self.module.params['vlan_id']):
|
||||
return 'update'
|
||||
|
||||
if defaultPortConfig.securityPolicy.allowPromiscuous.value != self.module.params['network_policy']['promiscuous'] or \
|
||||
defaultPortConfig.securityPolicy.forgedTransmits.value != self.module.params['network_policy']['forged_transmits'] or \
|
||||
defaultPortConfig.securityPolicy.macChanges.value != self.module.params['network_policy']['mac_changes']:
|
||||
return 'update'
|
||||
|
||||
# Teaming Policy
|
||||
teamingPolicy = self.dvs_portgroup.config.defaultPortConfig.uplinkTeamingPolicy
|
||||
if teamingPolicy.policy.value != self.module.params['teaming_policy']['load_balance_policy'] or \
|
||||
teamingPolicy.reversePolicy.value != self.module.params['teaming_policy']['inbound_policy'] or \
|
||||
teamingPolicy.notifySwitches.value != self.module.params['teaming_policy']['notify_switches'] or \
|
||||
teamingPolicy.rollingOrder.value != self.module.params['teaming_policy']['rolling_order']:
|
||||
return 'update'
|
||||
|
||||
# PG policy (advanced_policy)
|
||||
policy = self.dvs_portgroup.config.policy
|
||||
if policy.blockOverrideAllowed != self.module.params['port_policy']['block_override'] or \
|
||||
policy.ipfixOverrideAllowed != self.module.params['port_policy']['ipfix_override'] or \
|
||||
policy.livePortMovingAllowed != self.module.params['port_policy']['live_port_move'] or \
|
||||
policy.networkResourcePoolOverrideAllowed != self.module.params['port_policy']['network_rp_override'] or \
|
||||
policy.portConfigResetAtDisconnect != self.module.params['port_policy']['port_config_reset_at_disconnect'] or \
|
||||
policy.securityPolicyOverrideAllowed != self.module.params['port_policy']['security_override'] or \
|
||||
policy.shapingOverrideAllowed != self.module.params['port_policy']['shaping_override'] or \
|
||||
policy.trafficFilterOverrideAllowed != self.module.params['port_policy']['traffic_filter_override'] or \
|
||||
policy.uplinkTeamingOverrideAllowed != self.module.params['port_policy']['uplink_teaming_override'] or \
|
||||
policy.vendorConfigOverrideAllowed != self.module.params['port_policy']['vendor_config_override'] or \
|
||||
policy.vlanOverrideAllowed != self.module.params['port_policy']['vlan_override']:
|
||||
return 'update'
|
||||
|
||||
# PG Type
|
||||
if self.dvs_portgroup.config.type != self.module.params['portgroup_type']:
|
||||
return 'update'
|
||||
|
||||
return 'present'
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
portgroup_name=dict(required=True, type='str'),
|
||||
switch_name=dict(required=True, type='str'),
|
||||
vlan_id=dict(required=True, type='str'),
|
||||
num_ports=dict(required=True, type='int'),
|
||||
portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),
|
||||
state=dict(required=True, choices=['present', 'absent'], type='str'),
|
||||
vlan_trunk=dict(type='bool', default=False),
|
||||
network_policy=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
promiscuous=dict(type='bool', default=False),
|
||||
forged_transmits=dict(type='bool', default=False),
|
||||
mac_changes=dict(type='bool', default=False)
|
||||
),
|
||||
default=dict(
|
||||
promiscuous=False,
|
||||
forged_transmits=False,
|
||||
mac_changes=False
|
||||
)
|
||||
),
|
||||
teaming_policy=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
inbound_policy=dict(type='bool', default=False),
|
||||
notify_switches=dict(type='bool', default=True),
|
||||
rolling_order=dict(type='bool', default=False),
|
||||
load_balance_policy=dict(type='str',
|
||||
default='loadbalance_srcid',
|
||||
choices=[
|
||||
'loadbalance_ip',
|
||||
'loadbalance_srcmac',
|
||||
'loadbalance_srcid',
|
||||
'loadbalance_loadbased',
|
||||
'failover_explicit',
|
||||
],
|
||||
)
|
||||
),
|
||||
default=dict(
|
||||
inbound_policy=False,
|
||||
notify_switches=True,
|
||||
rolling_order=False,
|
||||
load_balance_policy='loadbalance_srcid',
|
||||
),
|
||||
),
|
||||
port_policy=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
block_override=dict(type='bool', default=True),
|
||||
ipfix_override=dict(type='bool', default=False),
|
||||
live_port_move=dict(type='bool', default=False),
|
||||
network_rp_override=dict(type='bool', default=False),
|
||||
port_config_reset_at_disconnect=dict(type='bool', default=True),
|
||||
security_override=dict(type='bool', default=False),
|
||||
shaping_override=dict(type='bool', default=False),
|
||||
traffic_filter_override=dict(type='bool', default=False),
|
||||
uplink_teaming_override=dict(type='bool', default=False),
|
||||
vendor_config_override=dict(type='bool', default=False),
|
||||
vlan_override=dict(type='bool', default=False)
|
||||
),
|
||||
default=dict(
|
||||
block_override=True,
|
||||
ipfix_override=False,
|
||||
live_port_move=False,
|
||||
network_rp_override=False,
|
||||
port_config_reset_at_disconnect=True,
|
||||
security_override=False,
|
||||
shaping_override=False,
|
||||
traffic_filter_override=False,
|
||||
uplink_teaming_override=False,
|
||||
vendor_config_override=False,
|
||||
vlan_override=False
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
vmware_dvs_portgroup = VMwareDvsPortgroup(module)
|
||||
vmware_dvs_portgroup.process_state()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,213 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_dvs_portgroup_find
|
||||
short_description: Find portgroup(s) in a VMware environment
|
||||
description:
|
||||
- Find portgroup(s) based on different criteria such as distributed vSwitch, VLAN id or a string in the name.
|
||||
version_added: 2.9
|
||||
author:
|
||||
- David Martinez (@dx0xm)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- PyVmomi
|
||||
options:
|
||||
dvswitch:
|
||||
description:
|
||||
- Name of a distributed vSwitch to look for.
|
||||
type: str
|
||||
vlanid:
|
||||
description:
|
||||
- VLAN id can be any number between 1 and 4094.
|
||||
- This search criteria will looks into VLAN ranges to find possible matches.
|
||||
required: false
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- string to check inside the name of the portgroup.
|
||||
- Basic containment check using python C(in) operation.
|
||||
type: str
|
||||
show_uplink:
|
||||
description:
|
||||
- Show or hide uplink portgroups.
|
||||
- Only relevant when C(vlanid) is supplied.
|
||||
type: bool
|
||||
default: False
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get all portgroups in dvswitch vDS
|
||||
vmware_dvs_portgroup_find:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
dvswitch: 'vDS'
|
||||
validate_certs: no
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Confirm if vlan 15 is present
|
||||
vmware_dvs_portgroup_find:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
vlanid: '15'
|
||||
validate_certs: no
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
dvs_portgroups:
|
||||
description: basic details of portgroups found
|
||||
returned: on success
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"dvswitch": "vDS",
|
||||
"name": "N-51",
|
||||
"pvlan": true,
|
||||
"trunk": true,
|
||||
"vlan_id": "0"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError as e:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_dvs_by_name
|
||||
|
||||
|
||||
class DVSPortgroupFindManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(DVSPortgroupFindManager, self).__init__(module)
|
||||
self.dvs_name = self.params['dvswitch']
|
||||
self.vlan = self.params['vlanid']
|
||||
self.cmp_vlans = True if self.vlan else False
|
||||
self.pgs = self.find_portgroups_by_name(self.content, self.module.params['name'])
|
||||
|
||||
if self.dvs_name:
|
||||
self.pgs = self.find_portgroups_by_dvs(self.pgs, self.dvs_name)
|
||||
|
||||
def find_portgroups_by_name(self, content, name=None):
|
||||
vimtype = [vim.dvs.DistributedVirtualPortgroup]
|
||||
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
|
||||
if not name:
|
||||
obj = container.view
|
||||
else:
|
||||
obj = []
|
||||
for c in container.view:
|
||||
if name in c.name:
|
||||
obj.append(c)
|
||||
|
||||
return obj
|
||||
|
||||
def find_portgroups_by_dvs(self, pgl, dvs):
|
||||
obj = []
|
||||
for c in pgl:
|
||||
if dvs in c.config.distributedVirtualSwitch.name:
|
||||
obj.append(c)
|
||||
|
||||
return obj
|
||||
|
||||
def vlan_match(self, pgup, userup, vlanlst):
|
||||
res = False
|
||||
if pgup and userup:
|
||||
return True
|
||||
|
||||
for ln in vlanlst:
|
||||
if '-' in ln:
|
||||
arr = ln.split('-')
|
||||
if arr[0] < self.vlan and self.vlan < arr[1]:
|
||||
res = True
|
||||
elif ln == str(self.vlan):
|
||||
res = True
|
||||
|
||||
return res
|
||||
|
||||
def get_dvs_portgroup(self):
|
||||
pgroups = self.pgs
|
||||
|
||||
pglist = []
|
||||
for pg in pgroups:
|
||||
trunk = False
|
||||
pvlan = False
|
||||
vlanInfo = pg.config.defaultPortConfig.vlan
|
||||
cl1 = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec
|
||||
cl2 = vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec
|
||||
vlan_id_list = []
|
||||
if isinstance(vlanInfo, cl1):
|
||||
trunk = True
|
||||
for item in vlanInfo.vlanId:
|
||||
if item.start == item.end:
|
||||
vlan_id_list.append(str(item.start))
|
||||
else:
|
||||
vlan_id_list.append(str(item.start) + '-' + str(item.end))
|
||||
elif isinstance(vlanInfo, cl2):
|
||||
pvlan = True
|
||||
vlan_id_list.append(str(vlanInfo.pvlanId))
|
||||
else:
|
||||
vlan_id_list.append(str(vlanInfo.vlanId))
|
||||
|
||||
if self.cmp_vlans:
|
||||
if self.vlan_match(pg.config.uplink, self.module.params['show_uplink'], vlan_id_list):
|
||||
pglist.append(dict(
|
||||
name=pg.name,
|
||||
trunk=trunk,
|
||||
pvlan=pvlan,
|
||||
vlan_id=','.join(vlan_id_list),
|
||||
dvswitch=pg.config.distributedVirtualSwitch.name))
|
||||
else:
|
||||
pglist.append(dict(
|
||||
name=pg.name,
|
||||
trunk=trunk,
|
||||
pvlan=pvlan,
|
||||
vlan_id=','.join(vlan_id_list),
|
||||
dvswitch=pg.config.distributedVirtualSwitch.name))
|
||||
|
||||
return pglist
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dvswitch=dict(type='str', required=False),
|
||||
vlanid=dict(type='int', required=False),
|
||||
name=dict(type='str', required=False),
|
||||
show_uplink=dict(type='bool', default=False),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
['show_uplink', 'True', 'vlanid']
|
||||
]
|
||||
)
|
||||
|
||||
dvs_pg_mgr = DVSPortgroupFindManager(module)
|
||||
module.exit_json(changed=False,
|
||||
dvs_portgroups=dvs_pg_mgr.get_dvs_portgroup())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,275 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_dvs_portgroup_info
|
||||
short_description: Gathers info DVS portgroup configurations
|
||||
description:
|
||||
- This module can be used to gather information about DVS portgroup configurations.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter:
|
||||
description:
|
||||
- Name of the datacenter.
|
||||
required: true
|
||||
type: str
|
||||
dvswitch:
|
||||
description:
|
||||
- Name of a dvswitch to look for.
|
||||
required: false
|
||||
type: str
|
||||
version_added: "2.9"
|
||||
show_network_policy:
|
||||
description:
|
||||
- Show or hide network policies of DVS portgroup.
|
||||
type: bool
|
||||
default: True
|
||||
show_port_policy:
|
||||
description:
|
||||
- Show or hide port policies of DVS portgroup.
|
||||
type: bool
|
||||
default: True
|
||||
show_teaming_policy:
|
||||
description:
|
||||
- Show or hide teaming policies of DVS portgroup.
|
||||
type: bool
|
||||
default: True
|
||||
show_vlan_info:
|
||||
description:
|
||||
- Show or hide vlan information of the DVS portgroup.
|
||||
type: bool
|
||||
default: False
|
||||
version_added: "2.9"
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get info about DVPG
|
||||
vmware_dvs_portgroup_info:
|
||||
hostname: "{{ vcenter_server }}"
|
||||
username: "{{ vcenter_user }}"
|
||||
password: "{{ vcenter_pass }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
register: dvpg_info
|
||||
|
||||
- name: Get number of ports for portgroup 'dvpg_001' in 'dvs_001'
|
||||
debug:
|
||||
msg: "{{ item.num_ports }}"
|
||||
with_items:
|
||||
- "{{ dvpg_info.dvs_portgroup_info['dvs_001'] | json_query(query) }}"
|
||||
vars:
|
||||
query: "[?portgroup_name=='dvpg_001']"
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
dvs_portgroup_info:
|
||||
description: metadata about DVS portgroup configuration
|
||||
returned: on success
|
||||
type: dict
|
||||
sample: {
|
||||
"dvs_0":[
|
||||
{
|
||||
"description": null,
|
||||
"dvswitch_name": "dvs_001",
|
||||
"network_policy": {
|
||||
"forged_transmits": false,
|
||||
"mac_changes": false,
|
||||
"promiscuous": false
|
||||
},
|
||||
"num_ports": 8,
|
||||
"port_policy": {
|
||||
"block_override": true,
|
||||
"ipfix_override": false,
|
||||
"live_port_move": false,
|
||||
"network_rp_override": false,
|
||||
"port_config_reset_at_disconnect": true,
|
||||
"security_override": false,
|
||||
"shaping_override": false,
|
||||
"traffic_filter_override": false,
|
||||
"uplink_teaming_override": false,
|
||||
"vendor_config_override": false,
|
||||
"vlan_override": false
|
||||
},
|
||||
"portgroup_name": "dvpg_001",
|
||||
"teaming_policy": {
|
||||
"inbound_policy": true,
|
||||
"notify_switches": true,
|
||||
"policy": "loadbalance_srcid",
|
||||
"rolling_order": false
|
||||
},
|
||||
"vlan_info": {
|
||||
"trunk": false,
|
||||
"pvlan": false,
|
||||
"vlan_id": 0
|
||||
},
|
||||
"type": "earlyBinding"
|
||||
},
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError as e:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs, find_dvs_by_name
|
||||
|
||||
|
||||
class DVSPortgroupInfoManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(DVSPortgroupInfoManager, self).__init__(module)
|
||||
self.dc_name = self.params['datacenter']
|
||||
self.dvs_name = self.params['dvswitch']
|
||||
|
||||
datacenter = self.find_datacenter_by_name(self.dc_name)
|
||||
if datacenter is None:
|
||||
self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name)
|
||||
if self.dvs_name:
|
||||
# User specified specific dvswitch name to gather information
|
||||
dvsn = find_dvs_by_name(self.content, self.dvs_name)
|
||||
if dvsn is None:
|
||||
self.module.fail_json(msg="Failed to find the dvswitch %s" % self.dvs_name)
|
||||
|
||||
self.dvsls = [dvsn]
|
||||
else:
|
||||
# default behaviour, gather information about all dvswitches
|
||||
self.dvsls = get_all_objs(self.content, [vim.DistributedVirtualSwitch], folder=datacenter.networkFolder)
|
||||
|
||||
def get_vlan_info(self, vlan_obj=None):
|
||||
"""
|
||||
Return vlan information from given object
|
||||
Args:
|
||||
vlan_obj: vlan managed object
|
||||
Returns: Dict of vlan details of the specific object
|
||||
"""
|
||||
|
||||
vdret = dict()
|
||||
if not vlan_obj:
|
||||
return vdret
|
||||
|
||||
if isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec):
|
||||
vlan_id_list = []
|
||||
for vli in vlan_obj.vlanId:
|
||||
if vli.start == vli.end:
|
||||
vlan_id_list.append(str(vli.start))
|
||||
else:
|
||||
vlan_id_list.append(str(vli.start) + "-" + str(vli.end))
|
||||
vdret = dict(trunk=True, pvlan=False, vlan_id=vlan_id_list)
|
||||
elif isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec):
|
||||
vdret = dict(trunk=False, pvlan=True, vlan_id=str(vlan_obj.pvlanId))
|
||||
else:
|
||||
vdret = dict(trunk=False, pvlan=False, vlan_id=str(vlan_obj.vlanId))
|
||||
|
||||
return vdret
|
||||
|
||||
def gather_dvs_portgroup_info(self):
|
||||
dvs_lists = self.dvsls
|
||||
result = dict()
|
||||
for dvs in dvs_lists:
|
||||
result[dvs.name] = list()
|
||||
for dvs_pg in dvs.portgroup:
|
||||
network_policy = dict()
|
||||
teaming_policy = dict()
|
||||
port_policy = dict()
|
||||
vlan_info = dict()
|
||||
|
||||
if self.module.params['show_network_policy'] and dvs_pg.config.defaultPortConfig.securityPolicy:
|
||||
network_policy = dict(
|
||||
forged_transmits=dvs_pg.config.defaultPortConfig.securityPolicy.forgedTransmits.value,
|
||||
promiscuous=dvs_pg.config.defaultPortConfig.securityPolicy.allowPromiscuous.value,
|
||||
mac_changes=dvs_pg.config.defaultPortConfig.securityPolicy.macChanges.value
|
||||
)
|
||||
if self.module.params['show_teaming_policy']:
|
||||
# govcsim does not have uplinkTeamingPolicy, remove this check once
|
||||
# PR https://github.com/vmware/govmomi/pull/1524 merged.
|
||||
if dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy:
|
||||
teaming_policy = dict(
|
||||
policy=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.policy.value,
|
||||
inbound_policy=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.reversePolicy.value,
|
||||
notify_switches=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.notifySwitches.value,
|
||||
rolling_order=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.rollingOrder.value,
|
||||
)
|
||||
|
||||
if self.params['show_port_policy']:
|
||||
# govcsim does not have port policy
|
||||
if dvs_pg.config.policy:
|
||||
port_policy = dict(
|
||||
block_override=dvs_pg.config.policy.blockOverrideAllowed,
|
||||
ipfix_override=dvs_pg.config.policy.ipfixOverrideAllowed,
|
||||
live_port_move=dvs_pg.config.policy.livePortMovingAllowed,
|
||||
network_rp_override=dvs_pg.config.policy.networkResourcePoolOverrideAllowed,
|
||||
port_config_reset_at_disconnect=dvs_pg.config.policy.portConfigResetAtDisconnect,
|
||||
security_override=dvs_pg.config.policy.securityPolicyOverrideAllowed,
|
||||
shaping_override=dvs_pg.config.policy.shapingOverrideAllowed,
|
||||
traffic_filter_override=dvs_pg.config.policy.trafficFilterOverrideAllowed,
|
||||
uplink_teaming_override=dvs_pg.config.policy.uplinkTeamingOverrideAllowed,
|
||||
vendor_config_override=dvs_pg.config.policy.vendorConfigOverrideAllowed,
|
||||
vlan_override=dvs_pg.config.policy.vlanOverrideAllowed
|
||||
)
|
||||
|
||||
if self.params['show_vlan_info']:
|
||||
vlan_info = self.get_vlan_info(dvs_pg.config.defaultPortConfig.vlan)
|
||||
|
||||
dvpg_details = dict(
|
||||
portgroup_name=dvs_pg.name,
|
||||
num_ports=dvs_pg.config.numPorts,
|
||||
dvswitch_name=dvs_pg.config.distributedVirtualSwitch.name,
|
||||
description=dvs_pg.config.description,
|
||||
type=dvs_pg.config.type,
|
||||
teaming_policy=teaming_policy,
|
||||
port_policy=port_policy,
|
||||
network_policy=network_policy,
|
||||
vlan_info=vlan_info,
|
||||
)
|
||||
result[dvs.name].append(dvpg_details)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str', required=True),
|
||||
show_network_policy=dict(type='bool', default=True),
|
||||
show_teaming_policy=dict(type='bool', default=True),
|
||||
show_port_policy=dict(type='bool', default=True),
|
||||
dvswitch=dict(),
|
||||
show_vlan_info=dict(type='bool', default=False),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
dvs_pg_mgr = DVSPortgroupInfoManager(module)
|
||||
module.exit_json(changed=False,
|
||||
dvs_portgroup_info=dvs_pg_mgr.gather_dvs_portgroup_info())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,754 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_dvswitch
|
||||
short_description: Create or remove a Distributed Switch
|
||||
description:
|
||||
- This module can be used to create, remove a Distributed Switch.
|
||||
version_added: 2.0
|
||||
author:
|
||||
- Joseph Callen (@jcpowermac)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
- Christian Kotte (@ckotte)
|
||||
notes:
|
||||
- Tested on vSphere 6.5 and 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter_name:
|
||||
description:
|
||||
- The name of the datacenter that will contain the Distributed Switch.
|
||||
- This parameter is optional, if C(folder) is provided.
|
||||
- Mutually exclusive with C(folder) parameter.
|
||||
required: False
|
||||
aliases: ['datacenter']
|
||||
type: str
|
||||
switch_name:
|
||||
description:
|
||||
- The name of the distribute vSwitch to create or remove.
|
||||
required: True
|
||||
aliases: ['switch', 'dvswitch']
|
||||
type: str
|
||||
switch_version:
|
||||
description:
|
||||
- The version of the Distributed Switch to create.
|
||||
- Can be 6.0.0, 5.5.0, 5.1.0, 5.0.0 with a vCenter running vSphere 6.0 and 6.5.
|
||||
- Can be 6.6.0, 6.5.0, 6.0.0 with a vCenter running vSphere 6.7.
|
||||
- The version must match the version of the ESXi hosts you want to connect.
|
||||
- The version of the vCenter server is used if not specified.
|
||||
- Required only if C(state) is set to C(present).
|
||||
version_added: 2.5
|
||||
choices: ['5.0.0', '5.1.0', '5.5.0', '6.0.0', '6.5.0', '6.6.0']
|
||||
aliases: ['version']
|
||||
type: str
|
||||
mtu:
|
||||
description:
|
||||
- The switch maximum transmission unit.
|
||||
- Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
|
||||
- Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
|
||||
- Accepts value between 1280 to 9000 (both inclusive).
|
||||
type: int
|
||||
default: 1500
|
||||
multicast_filtering_mode:
|
||||
description:
|
||||
- The multicast filtering mode.
|
||||
- 'C(basic) mode: multicast traffic for virtual machines is forwarded according to the destination MAC address of the multicast group.'
|
||||
- 'C(snooping) mode: the Distributed Switch provides IGMP and MLD snooping according to RFC 4541.'
|
||||
type: str
|
||||
choices: ['basic', 'snooping']
|
||||
default: 'basic'
|
||||
version_added: 2.8
|
||||
uplink_quantity:
|
||||
description:
|
||||
- Quantity of uplink per ESXi host added to the Distributed Switch.
|
||||
- The uplink quantity can be increased or decreased, but a decrease will only be successfull if the uplink isn't used by a portgroup.
|
||||
- Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
|
||||
- Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
|
||||
type: int
|
||||
uplink_prefix:
|
||||
description:
|
||||
- The prefix used for the naming of the uplinks.
|
||||
- Only valid if the Distributed Switch will be created. Not used if the Distributed Switch is already present.
|
||||
- Uplinks are created as Uplink 1, Uplink 2, etc. pp. by default.
|
||||
default: 'Uplink '
|
||||
version_added: 2.8
|
||||
type: str
|
||||
discovery_proto:
|
||||
description:
|
||||
- Link discovery protocol between Cisco and Link Layer discovery.
|
||||
- Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
|
||||
- Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
|
||||
- 'C(cdp): Use Cisco Discovery Protocol (CDP).'
|
||||
- 'C(lldp): Use Link Layer Discovery Protocol (LLDP).'
|
||||
- 'C(disabled): Do not use a discovery protocol.'
|
||||
choices: ['cdp', 'lldp', 'disabled']
|
||||
default: 'cdp'
|
||||
aliases: [ 'discovery_protocol' ]
|
||||
type: str
|
||||
discovery_operation:
|
||||
description:
|
||||
- Select the discovery operation.
|
||||
- Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
|
||||
- Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
|
||||
choices: ['both', 'advertise', 'listen']
|
||||
default: 'listen'
|
||||
type: str
|
||||
contact:
|
||||
description:
|
||||
- Dictionary which configures administrator contact name and description for the Distributed Switch.
|
||||
- 'Valid attributes are:'
|
||||
- '- C(name) (str): Administrator name.'
|
||||
- '- C(description) (str): Description or other details.'
|
||||
type: dict
|
||||
version_added: 2.8
|
||||
description:
|
||||
description:
|
||||
- Description of the Distributed Switch.
|
||||
type: str
|
||||
version_added: 2.8
|
||||
health_check:
|
||||
description:
|
||||
- Dictionary which configures Health Check for the Distributed Switch.
|
||||
- 'Valid attributes are:'
|
||||
- '- C(vlan_mtu) (bool): VLAN and MTU health check. (default: False)'
|
||||
- '- C(teaming_failover) (bool): Teaming and failover health check. (default: False)'
|
||||
- '- C(vlan_mtu_interval) (int): VLAN and MTU health check interval (minutes). (default: 0)'
|
||||
- '- The default for C(vlan_mtu_interval) is 1 in the vSphere Client if the VLAN and MTU health check is enabled.'
|
||||
- '- C(teaming_failover_interval) (int): Teaming and failover health check interval (minutes). (default: 0)'
|
||||
- '- The default for C(teaming_failover_interval) is 1 in the vSphere Client if the Teaming and failover health check is enabled.'
|
||||
type: dict
|
||||
default: {
|
||||
vlan_mtu: False,
|
||||
teaming_failover: False,
|
||||
vlan_mtu_interval: 0,
|
||||
teaming_failover_interval: 0,
|
||||
}
|
||||
version_added: 2.8
|
||||
state:
|
||||
description:
|
||||
- If set to C(present) and the Distributed Switch doesn't exists then the Distributed Switch will be created.
|
||||
- If set to C(absent) and the Distributed Switch exists then the Distributed Switch will be deleted.
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute path to place dvswitch in.
|
||||
- The folder should include the datacenter.
|
||||
- This parameter is case sensitive.
|
||||
- This parameter is optional, if C(datacenter) is provided.
|
||||
- 'Examples:'
|
||||
- ' folder: /datacenter1/network'
|
||||
- ' folder: datacenter1/network'
|
||||
- ' folder: /datacenter1/network/folder1'
|
||||
- ' folder: datacenter1/network/folder1'
|
||||
- ' folder: /folder1/datacenter1/network'
|
||||
- ' folder: folder1/datacenter1/network'
|
||||
- ' folder: /folder1/datacenter1/network/folder2'
|
||||
required: False
|
||||
type: str
|
||||
version_added: 2.9
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create dvSwitch
|
||||
vmware_dvswitch:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: '{{ datacenter }}'
|
||||
switch: dvSwitch
|
||||
version: 6.0.0
|
||||
mtu: 9000
|
||||
uplink_quantity: 2
|
||||
discovery_protocol: lldp
|
||||
discovery_operation: both
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create dvSwitch with all options
|
||||
vmware_dvswitch:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: '{{ datacenter }}'
|
||||
switch: dvSwitch
|
||||
version: 6.5.0
|
||||
mtu: 9000
|
||||
uplink_quantity: 2
|
||||
uplink_prefix: 'Uplink_'
|
||||
discovery_protocol: cdp
|
||||
discovery_operation: both
|
||||
multicast_filtering_mode: snooping
|
||||
health_check:
|
||||
vlan_mtu: true
|
||||
vlan_mtu_interval: 1
|
||||
teaming_failover: true
|
||||
teaming_failover_interval: 1
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete dvSwitch
|
||||
vmware_dvswitch:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: '{{ datacenter }}'
|
||||
switch: dvSwitch
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
result:
|
||||
description: information about performed operation
|
||||
returned: always
|
||||
type: str
|
||||
sample: {
|
||||
"changed": false,
|
||||
"contact": null,
|
||||
"contact_details": null,
|
||||
"description": null,
|
||||
"discovery_operation": "both",
|
||||
"discovery_protocol": "cdp",
|
||||
"dvswitch": "test",
|
||||
"health_check_teaming": false,
|
||||
"health_check_teaming_interval": 0,
|
||||
"health_check_vlan": false,
|
||||
"health_check_vlan_interval": 0,
|
||||
"mtu": 9000,
|
||||
"multicast_filtering_mode": "basic",
|
||||
"result": "DVS already configured properly",
|
||||
"uplink_quantity": 2,
|
||||
"uplinks": [
|
||||
"Uplink_1",
|
||||
"Uplink_2"
|
||||
],
|
||||
"version": "6.6.0"
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import (
|
||||
PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
|
||||
)
|
||||
|
||||
|
||||
class VMwareDvSwitch(PyVmomi):
|
||||
"""Class to manage a Distributed Virtual Switch"""
|
||||
def __init__(self, module):
|
||||
super(VMwareDvSwitch, self).__init__(module)
|
||||
self.dvs = None
|
||||
|
||||
self.switch_name = self.module.params['switch_name']
|
||||
self.switch_version = self.module.params['switch_version']
|
||||
if self.content.about.version == '6.7.0':
|
||||
self.vcenter_switch_version = '6.6.0'
|
||||
else:
|
||||
self.vcenter_switch_version = self.content.about.version
|
||||
folder = self.params['folder']
|
||||
if folder:
|
||||
self.folder_obj = self.content.searchIndex.FindByInventoryPath(folder)
|
||||
if not self.folder_obj:
|
||||
self.module.fail_json(msg="Failed to find the folder specified by %(folder)s" % self.params)
|
||||
else:
|
||||
datacenter_name = self.params.get('datacenter_name')
|
||||
datacenter_obj = self.find_datacenter_by_name(datacenter_name)
|
||||
if not datacenter_obj:
|
||||
self.module.fail_json(msg="Failed to find datacenter '%s' required"
|
||||
" for managing distributed vSwitch." % datacenter_name)
|
||||
self.folder_obj = datacenter_obj.networkFolder
|
||||
|
||||
self.mtu = self.module.params['mtu']
|
||||
# MTU sanity check
|
||||
if not 1280 <= self.mtu <= 9000:
|
||||
self.module.fail_json(
|
||||
msg="MTU value should be between 1280 and 9000 (both inclusive), provided %d." % self.mtu
|
||||
)
|
||||
self.multicast_filtering_mode = self.module.params['multicast_filtering_mode']
|
||||
self.uplink_quantity = self.module.params['uplink_quantity']
|
||||
self.uplink_prefix = self.module.params['uplink_prefix']
|
||||
self.discovery_protocol = self.module.params['discovery_proto']
|
||||
self.discovery_operation = self.module.params['discovery_operation']
|
||||
# TODO: add port mirroring
|
||||
self.health_check_vlan = self.params['health_check'].get('vlan_mtu')
|
||||
self.health_check_vlan_interval = self.params['health_check'].get('vlan_mtu_interval')
|
||||
self.health_check_teaming = self.params['health_check'].get('teaming_failover')
|
||||
self.health_check_teaming_interval = self.params['health_check'].get('teaming_failover_interval')
|
||||
if self.params['contact']:
|
||||
self.contact_name = self.params['contact'].get('name')
|
||||
self.contact_details = self.params['contact'].get('details')
|
||||
else:
|
||||
self.contact_name = None
|
||||
self.contact_details = None
|
||||
self.description = self.module.params['description']
|
||||
self.state = self.module.params['state']
|
||||
|
||||
def process_state(self):
|
||||
"""Process the current state of the DVS"""
|
||||
dvs_states = {
|
||||
'absent': {
|
||||
'present': self.destroy_dvswitch,
|
||||
'absent': self.exit_unchanged,
|
||||
},
|
||||
'present': {
|
||||
'present': self.update_dvswitch,
|
||||
'absent': self.create_dvswitch,
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
dvs_states[self.state][self.check_dvs()]()
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
self.module.fail_json(msg=to_native(method_fault.msg))
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg=to_native(e))
|
||||
|
||||
def check_dvs(self):
|
||||
"""Check if DVS is present"""
|
||||
self.dvs = find_dvs_by_name(self.content, self.switch_name, folder=self.folder_obj)
|
||||
if self.dvs is None:
|
||||
return 'absent'
|
||||
return 'present'
|
||||
|
||||
def create_dvswitch(self):
|
||||
"""Create a DVS"""
|
||||
changed = True
|
||||
results = dict(changed=changed)
|
||||
|
||||
spec = vim.DistributedVirtualSwitch.CreateSpec()
|
||||
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
|
||||
# Name
|
||||
results['dvswitch'] = self.switch_name
|
||||
spec.configSpec.name = self.switch_name
|
||||
# MTU
|
||||
results['mtu'] = self.mtu
|
||||
spec.configSpec.maxMtu = self.mtu
|
||||
# Discovery Protocol type and operation
|
||||
results['discovery_protocol'] = self.discovery_protocol
|
||||
results['discovery_operation'] = self.discovery_operation
|
||||
spec.configSpec.linkDiscoveryProtocolConfig = self.create_ldp_spec()
|
||||
# Administrator contact
|
||||
results['contact'] = self.contact_name
|
||||
results['contact_details'] = self.contact_details
|
||||
if self.contact_name or self.contact_details:
|
||||
spec.contact = self.create_contact_spec()
|
||||
# Description
|
||||
results['description'] = self.description
|
||||
if self.description:
|
||||
spec.description = self.description
|
||||
# Uplinks
|
||||
results['uplink_quantity'] = self.uplink_quantity
|
||||
spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
|
||||
for count in range(1, self.uplink_quantity + 1):
|
||||
spec.configSpec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
|
||||
results['uplinks'] = spec.configSpec.uplinkPortPolicy.uplinkPortName
|
||||
# Version
|
||||
results['version'] = self.switch_version
|
||||
if self.switch_version:
|
||||
spec.productInfo = self.create_product_spec(self.switch_version)
|
||||
|
||||
if self.module.check_mode:
|
||||
result = "DVS would be created"
|
||||
else:
|
||||
# Create DVS
|
||||
network_folder = self.folder_obj
|
||||
task = network_folder.CreateDVS_Task(spec)
|
||||
try:
|
||||
wait_for_task(task)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(
|
||||
msg="Failed to create DVS : %s" % to_native(invalid_argument)
|
||||
)
|
||||
# Find new DVS
|
||||
self.dvs = find_dvs_by_name(self.content, self.switch_name)
|
||||
changed_multicast = False
|
||||
spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
|
||||
# Use the same version in the new spec; The version will be increased by one by the API automatically
|
||||
spec.configVersion = self.dvs.config.configVersion
|
||||
# Set multicast filtering mode
|
||||
results['multicast_filtering_mode'] = self.multicast_filtering_mode
|
||||
multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
|
||||
if self.dvs.config.multicastFilteringMode != multicast_filtering_mode:
|
||||
changed_multicast = True
|
||||
spec.multicastFilteringMode = multicast_filtering_mode
|
||||
spec.multicastFilteringMode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
|
||||
if changed_multicast:
|
||||
self.update_dvs_config(self.dvs, spec)
|
||||
# Set Health Check config
|
||||
results['health_check_vlan'] = self.health_check_vlan
|
||||
results['health_check_teaming'] = self.health_check_teaming
|
||||
result = self.check_health_check_config(self.dvs.config.healthCheckConfig)
|
||||
changed_health_check = result[1]
|
||||
if changed_health_check:
|
||||
self.update_health_check_config(self.dvs, result[0])
|
||||
result = "DVS created"
|
||||
self.module.exit_json(changed=changed, result=to_native(result))
|
||||
|
||||
def create_ldp_spec(self):
|
||||
"""Create Link Discovery Protocol config spec"""
|
||||
ldp_config_spec = vim.host.LinkDiscoveryProtocolConfig()
|
||||
if self.discovery_protocol == 'disabled':
|
||||
ldp_config_spec.protocol = 'cdp'
|
||||
ldp_config_spec.operation = 'none'
|
||||
else:
|
||||
ldp_config_spec.protocol = self.discovery_protocol
|
||||
ldp_config_spec.operation = self.discovery_operation
|
||||
return ldp_config_spec
|
||||
|
||||
def create_product_spec(self, switch_version):
|
||||
"""Create product info spec"""
|
||||
product_info_spec = vim.dvs.ProductSpec()
|
||||
product_info_spec.version = switch_version
|
||||
return product_info_spec
|
||||
|
||||
@staticmethod
|
||||
def get_api_mc_filtering_mode(mode):
|
||||
"""Get Multicast filtering mode"""
|
||||
if mode == 'basic':
|
||||
return 'legacyFiltering'
|
||||
return 'snooping'
|
||||
|
||||
def create_contact_spec(self):
|
||||
"""Create contact info spec"""
|
||||
contact_info_spec = vim.DistributedVirtualSwitch.ContactInfo()
|
||||
contact_info_spec.name = self.contact_name
|
||||
contact_info_spec.contact = self.contact_details
|
||||
return contact_info_spec
|
||||
|
||||
def update_dvs_config(self, switch_object, spec):
|
||||
"""Update DVS config"""
|
||||
try:
|
||||
task = switch_object.ReconfigureDvs_Task(spec)
|
||||
wait_for_task(task)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(
|
||||
msg="Failed to update DVS : %s" % to_native(invalid_argument)
|
||||
)
|
||||
|
||||
def check_health_check_config(self, health_check_config):
|
||||
"""Check Health Check config"""
|
||||
changed = changed_vlan = changed_vlan_interval = changed_teaming = changed_teaming_interval = False
|
||||
vlan_previous = teaming_previous = None
|
||||
vlan_interval_previous = teaming_interval_previous = 0
|
||||
for config in health_check_config:
|
||||
if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.VlanMtuHealthCheckConfig):
|
||||
if config.enable != self.health_check_vlan:
|
||||
changed = changed_vlan = True
|
||||
vlan_previous = config.enable
|
||||
config.enable = self.health_check_vlan
|
||||
if config.enable and config.interval != self.health_check_vlan_interval:
|
||||
changed = changed_vlan_interval = True
|
||||
vlan_interval_previous = config.interval
|
||||
config.interval = self.health_check_vlan_interval
|
||||
if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.TeamingHealthCheckConfig):
|
||||
if config.enable != self.health_check_teaming:
|
||||
changed = changed_teaming = True
|
||||
teaming_previous = config.enable
|
||||
config.enable = self.health_check_teaming
|
||||
if config.enable and config.interval != self.health_check_teaming_interval:
|
||||
changed = changed_teaming_interval = True
|
||||
teaming_interval_previous = config.interval
|
||||
config.interval = self.health_check_teaming_interval
|
||||
return (health_check_config, changed, changed_vlan, vlan_previous, changed_vlan_interval, vlan_interval_previous,
|
||||
changed_teaming, teaming_previous, changed_teaming_interval, teaming_interval_previous)
|
||||
|
||||
def update_health_check_config(self, switch_object, health_check_config):
|
||||
"""Update Health Check config"""
|
||||
try:
|
||||
task = switch_object.UpdateDVSHealthCheckConfig_Task(healthCheckConfig=health_check_config)
|
||||
except vim.fault.DvsFault as dvs_fault:
|
||||
self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault))
|
||||
except vmodl.fault.NotSupported as not_supported:
|
||||
self.module.fail_json(msg="Health check not supported on the switch : %s" % to_native(not_supported))
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(msg="Failed to configure health check : %s" % to_native(invalid_argument))
|
||||
try:
|
||||
wait_for_task(task)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(msg="Failed to update health check config : %s" % to_native(invalid_argument))
|
||||
|
||||
def exit_unchanged(self):
|
||||
"""Exit with status message"""
|
||||
changed = False
|
||||
results = dict(changed=changed)
|
||||
results['dvswitch'] = self.switch_name
|
||||
results['result'] = "DVS not present"
|
||||
self.module.exit_json(**results)
|
||||
|
||||
def destroy_dvswitch(self):
|
||||
"""Delete a DVS"""
|
||||
changed = True
|
||||
results = dict(changed=changed)
|
||||
results['dvswitch'] = self.switch_name
|
||||
if self.module.check_mode:
|
||||
results['result'] = "DVS would be deleted"
|
||||
else:
|
||||
try:
|
||||
task = self.dvs.Destroy_Task()
|
||||
except vim.fault.VimFault as vim_fault:
|
||||
self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault))
|
||||
wait_for_task(task)
|
||||
results['result'] = "DVS deleted"
|
||||
self.module.exit_json(**results)
|
||||
|
||||
def update_dvswitch(self):
|
||||
"""Check and update DVS settings"""
|
||||
changed = changed_settings = changed_ldp = changed_version = changed_health_check = False
|
||||
results = dict(changed=changed)
|
||||
results['dvswitch'] = self.switch_name
|
||||
changed_list = []
|
||||
|
||||
config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
|
||||
# Use the same version in the new spec; The version will be increased by one by the API automatically
|
||||
config_spec.configVersion = self.dvs.config.configVersion
|
||||
|
||||
# Check MTU
|
||||
results['mtu'] = self.mtu
|
||||
if self.dvs.config.maxMtu != self.mtu:
|
||||
changed = changed_settings = True
|
||||
changed_list.append("mtu")
|
||||
results['mtu_previous'] = config_spec.maxMtu
|
||||
config_spec.maxMtu = self.mtu
|
||||
|
||||
# Check Discovery Protocol type and operation
|
||||
ldp_protocol = self.dvs.config.linkDiscoveryProtocolConfig.protocol
|
||||
ldp_operation = self.dvs.config.linkDiscoveryProtocolConfig.operation
|
||||
if self.discovery_protocol == 'disabled':
|
||||
results['discovery_protocol'] = self.discovery_protocol
|
||||
results['discovery_operation'] = 'n/a'
|
||||
if ldp_protocol != 'cdp' or ldp_operation != 'none':
|
||||
changed_ldp = True
|
||||
results['discovery_protocol_previous'] = ldp_protocol
|
||||
results['discovery_operation_previous'] = ldp_operation
|
||||
else:
|
||||
results['discovery_protocol'] = self.discovery_protocol
|
||||
results['discovery_operation'] = self.discovery_operation
|
||||
if ldp_protocol != self.discovery_protocol or ldp_operation != self.discovery_operation:
|
||||
changed_ldp = True
|
||||
if ldp_protocol != self.discovery_protocol:
|
||||
results['discovery_protocol_previous'] = ldp_protocol
|
||||
if ldp_operation != self.discovery_operation:
|
||||
results['discovery_operation_previous'] = ldp_operation
|
||||
if changed_ldp:
|
||||
changed = changed_settings = True
|
||||
changed_list.append("discovery protocol")
|
||||
config_spec.linkDiscoveryProtocolConfig = self.create_ldp_spec()
|
||||
|
||||
# Check Multicast filtering mode
|
||||
results['multicast_filtering_mode'] = self.multicast_filtering_mode
|
||||
multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
|
||||
if self.dvs.config.multicastFilteringMode != multicast_filtering_mode:
|
||||
changed = changed_settings = True
|
||||
changed_list.append("multicast filtering")
|
||||
results['multicast_filtering_mode_previous'] = self.dvs.config.multicastFilteringMode
|
||||
config_spec.multicastFilteringMode = multicast_filtering_mode
|
||||
|
||||
# Check administrator contact
|
||||
results['contact'] = self.contact_name
|
||||
results['contact_details'] = self.contact_details
|
||||
if self.dvs.config.contact.name != self.contact_name or self.dvs.config.contact.contact != self.contact_details:
|
||||
changed = changed_settings = True
|
||||
changed_list.append("contact")
|
||||
results['contact_previous'] = self.dvs.config.contact.name
|
||||
results['contact_details_previous'] = self.dvs.config.contact.contact
|
||||
config_spec.contact = self.create_contact_spec()
|
||||
|
||||
# Check description
|
||||
results['description'] = self.description
|
||||
if self.dvs.config.description != self.description:
|
||||
changed = changed_settings = True
|
||||
changed_list.append("description")
|
||||
results['description_previous'] = self.dvs.config.description
|
||||
if self.description is None:
|
||||
# need to use empty string; will be set to None by API
|
||||
config_spec.description = ''
|
||||
else:
|
||||
config_spec.description = self.description
|
||||
|
||||
# Check uplinks
|
||||
results['uplink_quantity'] = self.uplink_quantity
|
||||
if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) != self.uplink_quantity:
|
||||
changed = changed_settings = True
|
||||
changed_list.append("uplink quantity")
|
||||
results['uplink_quantity_previous'] = len(self.dvs.config.uplinkPortPolicy.uplinkPortName)
|
||||
config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
|
||||
# just replace the uplink array if uplinks need to be added
|
||||
if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) < self.uplink_quantity:
|
||||
for count in range(1, self.uplink_quantity + 1):
|
||||
config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
|
||||
# just replace the uplink array if uplinks need to be removed
|
||||
if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) > self.uplink_quantity:
|
||||
for count in range(1, self.uplink_quantity + 1):
|
||||
config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
|
||||
results['uplinks'] = config_spec.uplinkPortPolicy.uplinkPortName
|
||||
results['uplinks_previous'] = self.dvs.config.uplinkPortPolicy.uplinkPortName
|
||||
else:
|
||||
# No uplink name check; uplink names can't be changed easily if they are used by a portgroup
|
||||
results['uplinks'] = self.dvs.config.uplinkPortPolicy.uplinkPortName
|
||||
|
||||
# Check Health Check
|
||||
results['health_check_vlan'] = self.health_check_vlan
|
||||
results['health_check_teaming'] = self.health_check_teaming
|
||||
results['health_check_vlan_interval'] = self.health_check_vlan_interval
|
||||
results['health_check_teaming_interval'] = self.health_check_teaming_interval
|
||||
(health_check_config, changed_health_check, changed_vlan, vlan_previous,
|
||||
changed_vlan_interval, vlan_interval_previous, changed_teaming, teaming_previous,
|
||||
changed_teaming_interval, teaming_interval_previous) = \
|
||||
self.check_health_check_config(self.dvs.config.healthCheckConfig)
|
||||
if changed_health_check:
|
||||
changed = True
|
||||
changed_list.append("health check")
|
||||
if changed_vlan:
|
||||
results['health_check_vlan_previous'] = vlan_previous
|
||||
if changed_vlan_interval:
|
||||
results['health_check_vlan_interval_previous'] = vlan_interval_previous
|
||||
if changed_teaming:
|
||||
results['health_check_teaming_previous'] = teaming_previous
|
||||
if changed_teaming_interval:
|
||||
results['health_check_teaming_interval_previous'] = teaming_interval_previous
|
||||
|
||||
# Check switch version
|
||||
if self.switch_version:
|
||||
results['version'] = self.switch_version
|
||||
if self.dvs.config.productInfo.version != self.switch_version:
|
||||
changed_version = True
|
||||
spec_product = self.create_product_spec(self.switch_version)
|
||||
else:
|
||||
results['version'] = self.vcenter_switch_version
|
||||
if self.dvs.config.productInfo.version != self.vcenter_switch_version:
|
||||
changed_version = True
|
||||
spec_product = self.create_product_spec(self.vcenter_switch_version)
|
||||
if changed_version:
|
||||
changed = True
|
||||
changed_list.append("switch version")
|
||||
results['version_previous'] = self.dvs.config.productInfo.version
|
||||
|
||||
if changed:
|
||||
if self.module.check_mode:
|
||||
changed_suffix = ' would be changed'
|
||||
else:
|
||||
changed_suffix = ' changed'
|
||||
if len(changed_list) > 2:
|
||||
message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
|
||||
elif len(changed_list) == 2:
|
||||
message = ' and '.join(changed_list)
|
||||
elif len(changed_list) == 1:
|
||||
message = changed_list[0]
|
||||
message += changed_suffix
|
||||
if not self.module.check_mode:
|
||||
if changed_settings:
|
||||
self.update_dvs_config(self.dvs, config_spec)
|
||||
if changed_health_check:
|
||||
self.update_health_check_config(self.dvs, health_check_config)
|
||||
if changed_version:
|
||||
task = self.dvs.PerformDvsProductSpecOperation_Task("upgrade", spec_product)
|
||||
try:
|
||||
wait_for_task(task)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(msg="Failed to update DVS version : %s" % to_native(invalid_argument))
|
||||
else:
|
||||
message = "DVS already configured properly"
|
||||
results['changed'] = changed
|
||||
results['result'] = message
|
||||
|
||||
self.module.exit_json(**results)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
datacenter_name=dict(aliases=['datacenter']),
|
||||
folder=dict(),
|
||||
switch_name=dict(required=True, aliases=['switch', 'dvswitch']),
|
||||
mtu=dict(type='int', default=1500),
|
||||
multicast_filtering_mode=dict(type='str', default='basic', choices=['basic', 'snooping']),
|
||||
switch_version=dict(
|
||||
choices=['5.0.0', '5.1.0', '5.5.0', '6.0.0', '6.5.0', '6.6.0'],
|
||||
aliases=['version'],
|
||||
default=None
|
||||
),
|
||||
uplink_quantity=dict(type='int'),
|
||||
uplink_prefix=dict(type='str', default='Uplink '),
|
||||
discovery_proto=dict(
|
||||
type='str', choices=['cdp', 'lldp', 'disabled'], default='cdp', aliases=['discovery_protocol']
|
||||
),
|
||||
discovery_operation=dict(type='str', choices=['both', 'advertise', 'listen'], default='listen'),
|
||||
health_check=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
vlan_mtu=dict(type='bool', default=False),
|
||||
teaming_failover=dict(type='bool', default=False),
|
||||
vlan_mtu_interval=dict(type='int', default=0),
|
||||
teaming_failover_interval=dict(type='int', default=0),
|
||||
),
|
||||
default=dict(
|
||||
vlan_mtu=False,
|
||||
teaming_failover=False,
|
||||
vlan_mtu_interval=0,
|
||||
teaming_failover_interval=0,
|
||||
),
|
||||
),
|
||||
contact=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
name=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
),
|
||||
),
|
||||
description=dict(type='str'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
('state', 'present',
|
||||
['uplink_quantity']),
|
||||
],
|
||||
required_one_of=[
|
||||
['folder', 'datacenter_name'],
|
||||
],
|
||||
mutually_exclusive=[
|
||||
['folder', 'datacenter_name'],
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_dvswitch = VMwareDvSwitch(module)
|
||||
vmware_dvswitch.process_state()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,404 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_dvswitch_lacp
|
||||
short_description: Manage LACP configuration on a Distributed Switch
|
||||
description:
|
||||
- This module can be used to configure Link Aggregation Control Protocol (LACP) support mode and Link Aggregation Groups (LAGs).
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Christian Kotte (@ckotte)
|
||||
notes:
|
||||
- Tested on vSphere 6.7
|
||||
- You need to run the task two times if you want to remove all LAGs and change the support mode to 'basic'
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
switch:
|
||||
description:
|
||||
- The name of the Distributed Switch to manage.
|
||||
required: True
|
||||
aliases: ['dvswitch']
|
||||
type: str
|
||||
support_mode:
|
||||
description:
|
||||
- The LACP support mode.
|
||||
- 'C(basic): One Link Aggregation Control Protocol group in the switch (singleLag).'
|
||||
- 'C(enhanced): Multiple Link Aggregation Control Protocol groups in the switch (multipleLag).'
|
||||
type: str
|
||||
default: 'basic'
|
||||
choices: ['basic', 'enhanced']
|
||||
link_aggregation_groups:
|
||||
description:
|
||||
- Can only be used if C(lacp_support) is set to C(enhanced).
|
||||
- 'The following parameters are required:'
|
||||
- '- C(name) (string): Name of the LAG.'
|
||||
- '- C(uplink_number) (int): Number of uplinks. Can 1 to 30.'
|
||||
- '- C(mode) (string): The negotiating state of the uplinks/ports.'
|
||||
- ' - choices: [ active, passive ]'
|
||||
- '- C(load_balancing_mode) (string): Load balancing algorithm.'
|
||||
- ' - Valid attributes are:'
|
||||
- ' - srcTcpUdpPort: Source TCP/UDP port number.'
|
||||
- ' - srcDestIpTcpUdpPortVlan: Source and destination IP, source and destination TCP/UDP port number and VLAN.'
|
||||
- ' - srcIpVlan: Source IP and VLAN.'
|
||||
- ' - srcDestTcpUdpPort: Source and destination TCP/UDP port number.'
|
||||
- ' - srcMac: Source MAC address.'
|
||||
- ' - destIp: Destination IP.'
|
||||
- ' - destMac: Destination MAC address.'
|
||||
- ' - vlan: VLAN only.'
|
||||
- ' - srcDestIp: Source and Destination IP.'
|
||||
- ' - srcIpTcpUdpPortVlan: Source IP, TCP/UDP port number and VLAN.'
|
||||
- ' - srcDestIpTcpUdpPort: Source and destination IP and TCP/UDP port number.'
|
||||
- ' - srcDestMac: Source and destination MAC address.'
|
||||
- ' - destIpTcpUdpPort: Destination IP and TCP/UDP port number.'
|
||||
- ' - srcPortId: Source Virtual Port Id.'
|
||||
- ' - srcIp: Source IP.'
|
||||
- ' - srcIpTcpUdpPort: Source IP and TCP/UDP port number.'
|
||||
- ' - destIpTcpUdpPortVlan: Destination IP, TCP/UDP port number and VLAN.'
|
||||
- ' - destTcpUdpPort: Destination TCP/UDP port number.'
|
||||
- ' - destIpVlan: Destination IP and VLAN.'
|
||||
- ' - srcDestIpVlan: Source and destination IP and VLAN.'
|
||||
- ' - The default load balancing mode in the vSphere Client is srcDestIpTcpUdpPortVlan.'
|
||||
- Please see examples for more information.
|
||||
type: list
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Enable enhanced mode on a Distributed Switch
|
||||
vmware_dvswitch_lacp:
|
||||
hostname: '{{ inventory_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
switch: dvSwitch
|
||||
support_mode: enhanced
|
||||
validate_certs: "{{ validate_vcenter_certs }}"
|
||||
delegate_to: localhost
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
with_items: "{{ vcenter_distributed_switches }}"
|
||||
|
||||
- name: Enable enhanced mode and create two LAGs on a Distributed Switch
|
||||
vmware_dvswitch_lacp:
|
||||
hostname: '{{ inventory_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
switch: dvSwitch
|
||||
support_mode: enhanced
|
||||
link_aggregation_groups:
|
||||
- name: lag1
|
||||
uplink_number: 2
|
||||
mode: active
|
||||
load_balancing_mode: srcDestIpTcpUdpPortVlan
|
||||
- name: lag2
|
||||
uplink_number: 2
|
||||
mode: passive
|
||||
load_balancing_mode: srcDestIp
|
||||
validate_certs: "{{ validate_vcenter_certs }}"
|
||||
delegate_to: localhost
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
with_items: "{{ vcenter_distributed_switches }}"
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
result:
|
||||
description: information about performed operation
|
||||
returned: always
|
||||
type: str
|
||||
sample: {
|
||||
"changed": true,
|
||||
"dvswitch": "dvSwitch",
|
||||
"link_aggregation_groups": [
|
||||
{"load_balancing_mode": "srcDestIpTcpUdpPortVlan", "mode": "active", "name": "lag1", "uplink_number": 2},
|
||||
{"load_balancing_mode": "srcDestIp", "mode": "active", "name": "lag2", "uplink_number": 2}
|
||||
],
|
||||
"link_aggregation_groups_previous": [],
|
||||
"support_mode": "enhanced",
|
||||
"result": "lacp lags changed"
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import (
|
||||
PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
|
||||
)
|
||||
|
||||
|
||||
class VMwareDvSwitchLacp(PyVmomi):
|
||||
"""Class to manage a LACP on a Distributed Virtual Switch"""
|
||||
def __init__(self, module):
|
||||
super(VMwareDvSwitchLacp, self).__init__(module)
|
||||
self.switch_name = self.module.params['switch']
|
||||
self.support_mode = self.module.params['support_mode']
|
||||
self.link_aggregation_groups = self.module.params['link_aggregation_groups']
|
||||
if self.support_mode == 'basic' and (
|
||||
self.link_aggregation_groups and not (
|
||||
len(self.link_aggregation_groups) == 1 and self.link_aggregation_groups[0] == '')):
|
||||
self.module.fail_json(
|
||||
msg="LAGs can only be configured if 'support_mode' is set to 'enhanced'!"
|
||||
)
|
||||
self.dvs = find_dvs_by_name(self.content, self.switch_name)
|
||||
if self.dvs is None:
|
||||
self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name)
|
||||
|
||||
def ensure(self):
|
||||
"""Manage LACP configuration"""
|
||||
changed = changed_support_mode = changed_lags = False
|
||||
results = dict(changed=changed)
|
||||
results['dvswitch'] = self.switch_name
|
||||
changed_list = []
|
||||
|
||||
spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
|
||||
spec.configVersion = self.dvs.config.configVersion
|
||||
|
||||
# Check support mode
|
||||
results['support_mode'] = self.support_mode
|
||||
lacp_support_mode = self.get_lacp_support_mode(self.support_mode)
|
||||
if self.dvs.config.lacpApiVersion != lacp_support_mode:
|
||||
changed = changed_support_mode = True
|
||||
changed_list.append("support mode")
|
||||
results['support_mode_previous'] = self.get_lacp_support_mode(self.dvs.config.lacpApiVersion)
|
||||
spec.lacpApiVersion = lacp_support_mode
|
||||
|
||||
# Check LAGs
|
||||
results['link_aggregation_groups'] = self.link_aggregation_groups
|
||||
if self.link_aggregation_groups and not (
|
||||
len(self.link_aggregation_groups) == 1 and self.link_aggregation_groups[0] == ''):
|
||||
if self.dvs.config.lacpGroupConfig:
|
||||
lacp_lag_list = []
|
||||
# Check if desired LAGs are configured
|
||||
for lag in self.link_aggregation_groups:
|
||||
lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode = self.get_lacp_lag_options(lag)
|
||||
lag_found = False
|
||||
for lacp_group in self.dvs.config.lacpGroupConfig:
|
||||
if lacp_group.name == lag_name:
|
||||
lag_found = True
|
||||
if (lag_mode != lacp_group.mode or
|
||||
lag_uplink_number != lacp_group.uplinkNum or
|
||||
lag_load_balancing_mode != lacp_group.loadbalanceAlgorithm):
|
||||
changed = changed_lags = True
|
||||
lacp_lag_list.append(
|
||||
self.create_lacp_group_spec(
|
||||
'edit',
|
||||
lacp_group.key, lag_name, lag_uplink_number, lag_mode, lag_load_balancing_mode
|
||||
)
|
||||
)
|
||||
break
|
||||
if lag_found is False:
|
||||
changed = changed_lags = True
|
||||
lacp_lag_list.append(
|
||||
self.create_lacp_group_spec(
|
||||
'add', None, lag_name, lag_uplink_number, lag_mode, lag_load_balancing_mode
|
||||
)
|
||||
)
|
||||
# Check if LAGs need to be removed
|
||||
for lacp_group in self.dvs.config.lacpGroupConfig:
|
||||
lag_found = False
|
||||
for lag in self.link_aggregation_groups:
|
||||
result = self.get_lacp_lag_options(lag)
|
||||
if lacp_group.name == result[0]:
|
||||
lag_found = True
|
||||
break
|
||||
if lag_found is False:
|
||||
changed = changed_lags = True
|
||||
lacp_lag_list.append(
|
||||
self.create_lacp_group_spec('remove', lacp_group.key, lacp_group.name, None, None, None)
|
||||
)
|
||||
else:
|
||||
changed = changed_lags = True
|
||||
lacp_lag_list = []
|
||||
for lag in self.link_aggregation_groups:
|
||||
lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode = self.get_lacp_lag_options(lag)
|
||||
lacp_lag_list.append(
|
||||
self.create_lacp_group_spec(
|
||||
'add', None, lag_name, lag_uplink_number, lag_mode, lag_load_balancing_mode
|
||||
)
|
||||
)
|
||||
else:
|
||||
if self.dvs.config.lacpGroupConfig:
|
||||
changed = changed_lags = True
|
||||
lacp_lag_list = []
|
||||
for lacp_group in self.dvs.config.lacpGroupConfig:
|
||||
lacp_lag_list.append(
|
||||
self.create_lacp_group_spec('remove', lacp_group.key, lacp_group.name, None, None, None)
|
||||
)
|
||||
if changed_lags:
|
||||
changed_list.append("link aggregation groups")
|
||||
current_lags_list = []
|
||||
for lacp_group in self.dvs.config.lacpGroupConfig:
|
||||
temp_lag = dict()
|
||||
temp_lag['name'] = lacp_group.name
|
||||
temp_lag['uplink_number'] = lacp_group.uplinkNum
|
||||
temp_lag['mode'] = lacp_group.mode
|
||||
temp_lag['load_balancing_mode'] = lacp_group.loadbalanceAlgorithm
|
||||
current_lags_list.append(temp_lag)
|
||||
results['link_aggregation_groups_previous'] = current_lags_list
|
||||
|
||||
if changed:
|
||||
if self.module.check_mode:
|
||||
changed_suffix = ' would be changed'
|
||||
else:
|
||||
changed_suffix = ' changed'
|
||||
if len(changed_list) > 2:
|
||||
message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
|
||||
elif len(changed_list) == 2:
|
||||
message = ' and '.join(changed_list)
|
||||
elif len(changed_list) == 1:
|
||||
message = changed_list[0]
|
||||
message += changed_suffix
|
||||
if not self.module.check_mode:
|
||||
if changed_support_mode and self.support_mode == 'basic' and changed_lags:
|
||||
self.update_lacp_group_config(self.dvs, lacp_lag_list)
|
||||
# NOTE: You need to run the task again to change the support mode to 'basic' as well
|
||||
# No matter how long you sleep, you will always get the following error in vCenter:
|
||||
# 'Cannot complete operation due to concurrent modification by another operation.'
|
||||
# self.update_dvs_config(self.dvs, spec)
|
||||
else:
|
||||
if changed_support_mode:
|
||||
self.update_dvs_config(self.dvs, spec)
|
||||
if changed_lags:
|
||||
self.update_lacp_group_config(self.dvs, lacp_lag_list)
|
||||
else:
|
||||
message = "LACP already configured properly"
|
||||
results['changed'] = changed
|
||||
results['result'] = message
|
||||
|
||||
self.module.exit_json(**results)
|
||||
|
||||
@staticmethod
|
||||
def get_lacp_support_mode(mode):
|
||||
"""Get LACP support mode"""
|
||||
return_mode = None
|
||||
if mode == 'basic':
|
||||
return_mode = 'singleLag'
|
||||
elif mode == 'enhanced':
|
||||
return_mode = 'multipleLag'
|
||||
elif mode == 'singleLag':
|
||||
return_mode = 'basic'
|
||||
elif mode == 'multipleLag':
|
||||
return_mode = 'enhanced'
|
||||
return return_mode
|
||||
|
||||
def get_lacp_lag_options(self, lag):
|
||||
"""Get and check LACP LAG options"""
|
||||
lag_name = lag.get('name', None)
|
||||
if lag_name is None:
|
||||
self.module.fail_json(msg="Please specify name in lag options as it's a required parameter")
|
||||
lag_mode = lag.get('mode', None)
|
||||
if lag_mode is None:
|
||||
self.module.fail_json(msg="Please specify mode in lag options as it's a required parameter")
|
||||
lag_uplink_number = lag.get('uplink_number', None)
|
||||
if lag_uplink_number is None:
|
||||
self.module.fail_json(msg="Please specify uplink_number in lag options as it's a required parameter")
|
||||
elif lag_uplink_number > 30:
|
||||
self.module.fail_json(msg="More than 30 uplinks are not supported in a single LAG!")
|
||||
lag_load_balancing_mode = lag.get('load_balancing_mode', None)
|
||||
supported_lb_modes = ['srcTcpUdpPort', 'srcDestIpTcpUdpPortVlan', 'srcIpVlan', 'srcDestTcpUdpPort',
|
||||
'srcMac', 'destIp', 'destMac', 'vlan', 'srcDestIp', 'srcIpTcpUdpPortVlan',
|
||||
'srcDestIpTcpUdpPort', 'srcDestMac', 'destIpTcpUdpPort', 'srcPortId', 'srcIp',
|
||||
'srcIpTcpUdpPort', 'destIpTcpUdpPortVlan', 'destTcpUdpPort', 'destIpVlan', 'srcDestIpVlan']
|
||||
if lag_load_balancing_mode is None:
|
||||
self.module.fail_json(msg="Please specify load_balancing_mode in lag options as it's a required parameter")
|
||||
elif lag_load_balancing_mode not in supported_lb_modes:
|
||||
self.module.fail_json(msg="The specified load balancing mode '%s' isn't supported!" % lag_load_balancing_mode)
|
||||
return lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode
|
||||
|
||||
@staticmethod
|
||||
def create_lacp_group_spec(operation, key, name, uplink_number, mode, load_balancing_mode):
|
||||
"""
|
||||
Create LACP group spec
|
||||
operation: add, edit, or remove
|
||||
Returns: LACP group spec
|
||||
"""
|
||||
lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupSpec()
|
||||
lacp_spec.operation = operation
|
||||
lacp_spec.lacpGroupConfig = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupConfig()
|
||||
lacp_spec.lacpGroupConfig.name = name
|
||||
if operation in ('edit', 'remove'):
|
||||
lacp_spec.lacpGroupConfig.key = key
|
||||
if not operation == 'remove':
|
||||
lacp_spec.lacpGroupConfig.uplinkNum = uplink_number
|
||||
lacp_spec.lacpGroupConfig.mode = mode
|
||||
lacp_spec.lacpGroupConfig.loadbalanceAlgorithm = load_balancing_mode
|
||||
# greyed out in vSphere Client!?
|
||||
# lacp_spec.vlan = vim.dvs.VmwareDistributedVirtualSwitch.LagVlanConfig()
|
||||
# lacp_spec.vlan.vlanId = [vim.NumericRange(...)]
|
||||
# lacp_spec.ipfix = vim.dvs.VmwareDistributedVirtualSwitch.LagIpfixConfig()
|
||||
# lacp_spec.ipfix.ipfixEnabled = True/False
|
||||
return lacp_spec
|
||||
|
||||
def update_dvs_config(self, switch_object, spec):
|
||||
"""Update DVS config"""
|
||||
try:
|
||||
task = switch_object.ReconfigureDvs_Task(spec)
|
||||
result = wait_for_task(task)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(
|
||||
msg="Failed to update DVS : %s" % to_native(invalid_argument)
|
||||
)
|
||||
return result
|
||||
|
||||
def update_lacp_group_config(self, switch_object, lacp_group_spec):
|
||||
"""Update LACP group config"""
|
||||
try:
|
||||
task = switch_object.UpdateDVSLacpGroupConfig_Task(lacpGroupSpec=lacp_group_spec)
|
||||
result = wait_for_task(task)
|
||||
except vim.fault.DvsFault as dvs_fault:
|
||||
self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault))
|
||||
except vmodl.fault.NotSupported as not_supported:
|
||||
self.module.fail_json(
|
||||
msg="Multiple Link Aggregation Control Protocol groups not supported on the switch : %s" %
|
||||
to_native(not_supported)
|
||||
)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(
|
||||
msg="Failed to update Link Aggregation Group : %s" % to_native(invalid_argument)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
switch=dict(required=True, aliases=['dvswitch']),
|
||||
support_mode=dict(default='basic', choices=['basic', 'enhanced']),
|
||||
link_aggregation_groups=dict(default=[], type='list'),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_dvswitch_lacp = VMwareDvSwitchLacp(module)
|
||||
vmware_dvswitch_lacp.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,399 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, VMware, Inc.
|
||||
# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_dvswitch_nioc
|
||||
short_description: Manage distributed switch Network IO Control
|
||||
description:
|
||||
- This module can be used to manage distributed switch Network IO Control configurations.
|
||||
version_added: "2.9"
|
||||
author:
|
||||
- Joseph Andreatta (@vmwjoseph)
|
||||
notes:
|
||||
- Tested on vSphere 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
switch:
|
||||
description:
|
||||
- The name of the distributed switch.
|
||||
required: True
|
||||
aliases: ['dvswitch']
|
||||
type: str
|
||||
version:
|
||||
description:
|
||||
- Network IO control version.
|
||||
choices:
|
||||
- 'version2'
|
||||
- 'version3'
|
||||
required: False
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Enable or disable NIOC on the distributed switch.
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
required: False
|
||||
type: str
|
||||
resources:
|
||||
description:
|
||||
- 'List of dicts containing
|
||||
{ name: Resource name is one of the following: "faultTolerance", "hbr", "iSCSI", "management", "nfs", "vdp",
|
||||
"virtualMachine", "vmotion", "vsan"
|
||||
limit: The maximum allowed usage for a traffic class belonging to this resource pool per host physical NIC.
|
||||
reservation: (Ignored if NIOC version is set to version2) Amount of bandwidth resource that is
|
||||
guaranteed available to the host infrastructure traffic class. If the utilization is less than the
|
||||
reservation, the extra bandwidth is used for other host infrastructure traffic class types.
|
||||
Reservation is not allowed to exceed the value of limit, if limit is set. Unit is Mbits/sec.
|
||||
shares_level: The allocation level ("low", "normal", "high", "custom"). The level is a simplified view
|
||||
of shares. Levels map to a pre-determined set of numeric values for shares.
|
||||
shares: Ignored unless shares_level is "custom". The number of shares allocated.
|
||||
reservation: Ignored unless version is "version3". Amount of bandwidth resource that is guaranteed
|
||||
available to the host infrastructure traffic class.
|
||||
}'
|
||||
required: False
|
||||
type: list
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
dvswitch_nioc_status:
|
||||
description:
|
||||
- result of the changes
|
||||
returned: success
|
||||
type: str
|
||||
resources_changed:
|
||||
description:
|
||||
- list of resources which were changed
|
||||
returned: success
|
||||
type: list
|
||||
sample: [ "vmotion", "vsan" ]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Enable NIOC
|
||||
vmware_dvswitch_nioc:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
switch: dvSwitch
|
||||
version: version3
|
||||
resources:
|
||||
- name: vmotion
|
||||
limit: -1
|
||||
reservation: 128
|
||||
shares_level: normal
|
||||
- name: vsan
|
||||
limit: -1
|
||||
shares_level: custom
|
||||
shares: 99
|
||||
reservation: 256
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Disable NIOC
|
||||
vmware_dvswitch_nioc:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
switch: dvSwitch
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, find_datacenter_by_name, find_dvs_by_name, vmware_argument_spec, wait_for_task
|
||||
|
||||
|
||||
class VMwareDVSwitchNIOC(PyVmomi):
|
||||
|
||||
def __init__(self, module):
|
||||
super(VMwareDVSwitchNIOC, self).__init__(module)
|
||||
self.dvs = None
|
||||
self.resource_changes = list()
|
||||
self.switch = module.params['switch']
|
||||
self.version = module.params.get('version')
|
||||
self.state = module.params['state']
|
||||
self.resources = module.params.get('resources')
|
||||
self.result = {
|
||||
'changed': False,
|
||||
'dvswitch_nioc_status': 'Unchanged',
|
||||
'resources_changed': list(),
|
||||
}
|
||||
|
||||
def process_state(self):
|
||||
nioc_states = {
|
||||
'absent': {
|
||||
'present': self.state_disable_nioc,
|
||||
'absent': self.state_exit,
|
||||
},
|
||||
'present': {
|
||||
'version': self.state_update_nioc_version,
|
||||
'update': self.state_update_nioc_resources,
|
||||
'present': self.state_exit,
|
||||
'absent': self.state_enable_nioc,
|
||||
}
|
||||
}
|
||||
nioc_states[self.state][self.check_nioc_state()]()
|
||||
self.state_exit()
|
||||
|
||||
def state_exit(self):
|
||||
self.module.exit_json(**self.result)
|
||||
|
||||
def state_disable_nioc(self):
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
self.set_nioc_enabled(False)
|
||||
self.result['dvswitch_nioc_status'] = 'Disabled NIOC'
|
||||
|
||||
def state_enable_nioc(self):
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
self.set_nioc_enabled(True)
|
||||
self.set_nioc_version()
|
||||
self.result['dvswitch_nioc_status'] = "Enabled NIOC with version %s" % self.version
|
||||
|
||||
# Check resource state and apply all required changes
|
||||
if self.check_resources() == 'update':
|
||||
self.set_nioc_resources(self.resource_changes)
|
||||
|
||||
def state_update_nioc_version(self):
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
self.set_nioc_version()
|
||||
self.result['dvswitch_nioc_status'] = "Set NIOC to version %s" % self.version
|
||||
|
||||
# Check resource state and apply all required changes
|
||||
if self.check_resources() == 'update':
|
||||
self.set_nioc_resources(self.resource_changes)
|
||||
|
||||
def state_update_nioc_resources(self):
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
self.result['dvswitch_nioc_status'] = "Resource configuration modified"
|
||||
self.set_nioc_resources(self.resource_changes)
|
||||
|
||||
def set_nioc_enabled(self, state):
|
||||
try:
|
||||
self.dvs.EnableNetworkResourceManagement(enable=state)
|
||||
except vim.fault.DvsFault as dvs_fault:
|
||||
self.module.fail_json(msg='DvsFault while setting NIOC enabled=%r: %s' % (state, to_native(dvs_fault.msg)))
|
||||
except vim.fault.DvsNotAuthorized as auth_fault:
|
||||
self.module.fail_json(msg='Not authorized to set NIOC enabled=%r: %s' % (state, to_native(auth_fault.msg)))
|
||||
except vmodl.fault.NotSupported as support_fault:
|
||||
self.module.fail_json(msg='NIOC not supported by DVS: %s' % to_native(support_fault.msg))
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg='RuntimeFault while setting NIOC enabled=%r: %s' % (state, to_native(runtime_fault.msg)))
|
||||
|
||||
def set_nioc_version(self):
|
||||
upgrade_spec = vim.DistributedVirtualSwitch.ConfigSpec()
|
||||
upgrade_spec.configVersion = self.dvs.config.configVersion
|
||||
if not self.version:
|
||||
self.version = 'version2'
|
||||
upgrade_spec.networkResourceControlVersion = self.version
|
||||
|
||||
try:
|
||||
task = self.dvs.ReconfigureDvs_Task(spec=upgrade_spec)
|
||||
wait_for_task(task)
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
self.module.fail_json(msg="RuntimeFault when setting NIOC version: %s " % to_native(runtime_fault.msg))
|
||||
|
||||
def check_nioc_state(self):
|
||||
self.dvs = find_dvs_by_name(self.content, self.switch)
|
||||
|
||||
if self.dvs is None:
|
||||
self.module.fail_json(msg='DVS %s was not found.' % self.switch)
|
||||
else:
|
||||
if not self.dvs.config.networkResourceManagementEnabled:
|
||||
return 'absent'
|
||||
if self.version and self.dvs.config.networkResourceControlVersion != self.version:
|
||||
return 'version'
|
||||
|
||||
# NIOC is enabled and the correct version, so return the state of the resources
|
||||
return self.check_resources()
|
||||
|
||||
def check_resources(self):
|
||||
self.dvs = find_dvs_by_name(self.content, self.switch)
|
||||
if self.dvs is None:
|
||||
self.module.fail_json(msg="DVS named '%s' was not found" % self.switch)
|
||||
|
||||
for resource in self.resources:
|
||||
if self.check_resource_state(resource) == 'update':
|
||||
self.resource_changes.append(resource)
|
||||
self.result['resources_changed'].append(resource['name'])
|
||||
|
||||
if len(self.resource_changes) > 0:
|
||||
return 'update'
|
||||
return 'present'
|
||||
|
||||
def check_resource_state(self, resource):
|
||||
resource_cfg = self.find_netioc_by_key(resource['name'])
|
||||
if resource_cfg is None:
|
||||
self.module.fail_json(msg="NetIOC resource named '%s' was not found" % resource['name'])
|
||||
|
||||
rc = {
|
||||
"limit": resource_cfg.allocationInfo.limit,
|
||||
"shares_level": resource_cfg.allocationInfo.shares.level
|
||||
}
|
||||
if resource_cfg.allocationInfo.shares.level == 'custom':
|
||||
rc["shares"] = resource_cfg.allocationInfo.shares.shares
|
||||
if self.dvs.config.networkResourceControlVersion == "version3":
|
||||
rc["reservation"] = resource_cfg.allocationInfo.reservation
|
||||
|
||||
for k, v in rc.items():
|
||||
if k in resource and v != resource[k]:
|
||||
return 'update'
|
||||
return 'valid'
|
||||
|
||||
def set_nioc_resources(self, resources):
|
||||
if self.dvs.config.networkResourceControlVersion == 'version3':
|
||||
self._update_version3_resources(resources)
|
||||
elif self.dvs.config.networkResourceControlVersion == 'version2':
|
||||
self._update_version2_resources(resources)
|
||||
|
||||
def _update_version3_resources(self, resources):
|
||||
allocations = list()
|
||||
|
||||
for resource in resources:
|
||||
allocation = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource()
|
||||
allocation.allocationInfo = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource.ResourceAllocation()
|
||||
allocation.key = resource['name']
|
||||
if 'limit' in resource:
|
||||
allocation.allocationInfo.limit = resource['limit']
|
||||
if 'reservation' in resource:
|
||||
allocation.allocationInfo.reservation = resource['reservation']
|
||||
if 'shares_level' in resource:
|
||||
allocation.allocationInfo.shares = vim.SharesInfo()
|
||||
allocation.allocationInfo.shares.level = resource['shares_level']
|
||||
if 'shares' in resource and resource['shares_level'] == 'custom':
|
||||
allocation.allocationInfo.shares.shares = resource['shares']
|
||||
elif resource['shares_level'] == 'custom':
|
||||
self.module.fail_json(
|
||||
msg="Resource %s, shares_level set to custom but shares not specified" % resource['name']
|
||||
)
|
||||
|
||||
allocations.append(allocation)
|
||||
|
||||
spec = vim.DistributedVirtualSwitch.ConfigSpec()
|
||||
spec.configVersion = self.dvs.config.configVersion
|
||||
spec.infrastructureTrafficResourceConfig = allocations
|
||||
|
||||
task = self.dvs.ReconfigureDvs_Task(spec)
|
||||
wait_for_task(task)
|
||||
|
||||
def _update_version2_resources(self, resources):
|
||||
allocations = list()
|
||||
|
||||
for resource in resources:
|
||||
resource_cfg = self.find_netioc_by_key(resource['name'])
|
||||
allocation = vim.DVSNetworkResourcePoolConfigSpec()
|
||||
allocation.allocationInfo = vim.DVSNetworkResourcePoolAllocationInfo()
|
||||
allocation.key = resource['name']
|
||||
allocation.configVersion = resource_cfg.configVersion
|
||||
if 'limit' in resource:
|
||||
allocation.allocationInfo.limit = resource['limit']
|
||||
if 'shares_level' in resource:
|
||||
allocation.allocationInfo.shares = vim.SharesInfo()
|
||||
allocation.allocationInfo.shares.level = resource['shares_level']
|
||||
if 'shares' in resource and resource['shares_level'] == 'custom':
|
||||
allocation.allocationInfo.shares.shares = resource['shares']
|
||||
|
||||
allocations.append(allocation)
|
||||
|
||||
self.dvs.UpdateNetworkResourcePool(allocations)
|
||||
|
||||
def find_netioc_by_key(self, resource_name):
|
||||
config = None
|
||||
if self.dvs.config.networkResourceControlVersion == "version3":
|
||||
config = self.dvs.config.infrastructureTrafficResourceConfig
|
||||
elif self.dvs.config.networkResourceControlVersion == "version2":
|
||||
config = self.dvs.networkResourcePool
|
||||
|
||||
for obj in config:
|
||||
if obj.key == resource_name:
|
||||
return obj
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
|
||||
argument_spec.update(
|
||||
dict(
|
||||
switch=dict(required=True, type='str', aliases=['dvswitch']),
|
||||
version=dict(type='str', choices=['version2', 'version3']),
|
||||
state=dict(default='present', choices=['present', 'absent'], type='str'),
|
||||
resources=dict(
|
||||
type='list',
|
||||
default=list(),
|
||||
elements='dict',
|
||||
options=dict(
|
||||
name=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
choices=[
|
||||
'faultTolerance',
|
||||
'hbr',
|
||||
'iSCSI',
|
||||
'management',
|
||||
'nfs',
|
||||
'vdp',
|
||||
'virtualMachine',
|
||||
'vmotion',
|
||||
'vsan'
|
||||
]
|
||||
),
|
||||
limit=dict(type='int', default=-1),
|
||||
shares_level=dict(
|
||||
type='str',
|
||||
required=False,
|
||||
choices=[
|
||||
'low',
|
||||
'normal',
|
||||
'high',
|
||||
'custom'
|
||||
]
|
||||
),
|
||||
shares=dict(type='int', required=False),
|
||||
reservation=dict(type='int', default=0)
|
||||
)
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||
|
||||
try:
|
||||
vmware_dvswitch_nioc = VMwareDVSwitchNIOC(module)
|
||||
vmware_dvswitch_nioc.process_state()
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
module.fail_json(msg=to_native(method_fault.msg))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,533 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_dvswitch_pvlans
|
||||
short_description: Manage Private VLAN configuration of a Distributed Switch
|
||||
description:
|
||||
- This module can be used to configure Private VLANs (PVLANs) on a Distributed Switch.
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Christian Kotte (@ckotte)
|
||||
notes:
|
||||
- Tested on vSphere 6.5 and 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
switch:
|
||||
description:
|
||||
- The name of the Distributed Switch.
|
||||
type: str
|
||||
required: True
|
||||
aliases: ['dvswitch']
|
||||
primary_pvlans:
|
||||
description:
|
||||
- A list of VLAN IDs that should be configured as Primary PVLANs.
|
||||
- If C(primary_pvlans) isn't specified, all PVLANs will be deleted if present.
|
||||
- Each member of the list requires primary_pvlan_id (int) set.
|
||||
- The secondary promiscuous PVLAN will be created automatically.
|
||||
- If C(secondary_pvlans) isn't specified, the primary PVLANs and each secondary promiscuous PVLAN will be created.
|
||||
- Please see examples for more information.
|
||||
type: list
|
||||
default: []
|
||||
secondary_pvlans:
|
||||
description:
|
||||
- A list of VLAN IDs that should be configured as Secondary PVLANs.
|
||||
- 'C(primary_pvlans) need to be specified to create any Secondary PVLAN.'
|
||||
- If C(primary_pvlans) isn't specified, all PVLANs will be deleted if present.
|
||||
- Each member of the list requires primary_pvlan_id (int), secondary_pvlan_id (int), and pvlan_type (str) to be set.
|
||||
- The type of the secondary PVLAN can be isolated or community. The secondary promiscuous PVLAN will be created automatically.
|
||||
- Please see examples for more information.
|
||||
type: list
|
||||
default: []
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create PVLANs on a Distributed Switch
|
||||
vmware_dvswitch_pvlans:
|
||||
hostname: '{{ inventory_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
switch: dvSwitch
|
||||
primary_pvlans:
|
||||
- primary_pvlan_id: 1
|
||||
- primary_pvlan_id: 4
|
||||
secondary_pvlans:
|
||||
- primary_pvlan_id: 1
|
||||
secondary_pvlan_id: 2
|
||||
pvlan_type: isolated
|
||||
- primary_pvlan_id: 1
|
||||
secondary_pvlan_id: 3
|
||||
pvlan_type: community
|
||||
- primary_pvlan_id: 4
|
||||
secondary_pvlan_id: 5
|
||||
pvlan_type: community
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create primary PVLAN and secondary promiscuous PVLAN on a Distributed Switch
|
||||
vmware_dvswitch_pvlans:
|
||||
hostname: '{{ inventory_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
switch: dvSwitch
|
||||
primary_pvlans:
|
||||
- primary_pvlan_id: 1
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Remove all PVLANs from a Distributed Switch
|
||||
vmware_dvswitch_pvlans:
|
||||
hostname: '{{ inventory_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
switch: dvSwitch
|
||||
primary_pvlans: []
|
||||
secondary_pvlans: []
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
result:
|
||||
description: information about performed operation
|
||||
returned: always
|
||||
type: str
|
||||
sample: {
|
||||
"changed": true,
|
||||
"dvswitch": "dvSwitch",
|
||||
"private_vlans": [
|
||||
{
|
||||
"primary_pvlan_id": 1,
|
||||
"pvlan_type": "promiscuous",
|
||||
"secondary_pvlan_id": 1
|
||||
},
|
||||
{
|
||||
"primary_pvlan_id": 1,
|
||||
"pvlan_type": "isolated",
|
||||
"secondary_pvlan_id": 2
|
||||
},
|
||||
{
|
||||
"primary_pvlan_id": 1,
|
||||
"pvlan_type": "community",
|
||||
"secondary_pvlan_id": 3
|
||||
}
|
||||
],
|
||||
"private_vlans_previous": [],
|
||||
"result": "All private VLANs added"
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import (
|
||||
PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
|
||||
)
|
||||
|
||||
|
||||
class VMwareDvSwitchPvlans(PyVmomi):
|
||||
"""Class to manage Private VLANs on a Distributed Virtual Switch"""
|
||||
|
||||
def __init__(self, module):
|
||||
super(VMwareDvSwitchPvlans, self).__init__(module)
|
||||
self.switch_name = self.module.params['switch']
|
||||
if self.module.params['primary_pvlans']:
|
||||
self.primary_pvlans = self.module.params['primary_pvlans']
|
||||
if self.module.params['secondary_pvlans']:
|
||||
self.secondary_pvlans = self.module.params['secondary_pvlans']
|
||||
else:
|
||||
self.secondary_pvlans = None
|
||||
self.do_pvlan_sanity_checks()
|
||||
else:
|
||||
self.primary_pvlans = None
|
||||
self.secondary_pvlans = None
|
||||
self.dvs = find_dvs_by_name(self.content, self.switch_name)
|
||||
if self.dvs is None:
|
||||
self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name)
|
||||
|
||||
def do_pvlan_sanity_checks(self):
|
||||
"""Do sanity checks for primary and secondary PVLANs"""
|
||||
# Check if primary PVLANs are unique
|
||||
for primary_vlan in self.primary_pvlans:
|
||||
count = 0
|
||||
primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
|
||||
for primary_vlan_2 in self.primary_pvlans:
|
||||
primary_pvlan_id_2 = self.get_primary_pvlan_option(primary_vlan_2)
|
||||
if primary_pvlan_id == primary_pvlan_id_2:
|
||||
count += 1
|
||||
if count > 1:
|
||||
self.module.fail_json(
|
||||
msg="The primary PVLAN ID '%s' must be unique!" % primary_pvlan_id
|
||||
)
|
||||
if self.secondary_pvlans:
|
||||
# Check if secondary PVLANs are unique
|
||||
for secondary_pvlan in self.secondary_pvlans:
|
||||
count = 0
|
||||
result = self.get_secondary_pvlan_options(secondary_pvlan)
|
||||
for secondary_pvlan_2 in self.secondary_pvlans:
|
||||
result_2 = self.get_secondary_pvlan_options(secondary_pvlan_2)
|
||||
if result[0] == result_2[0]:
|
||||
count += 1
|
||||
if count > 1:
|
||||
self.module.fail_json(
|
||||
msg="The secondary PVLAN ID '%s' must be unique!" % result[0]
|
||||
)
|
||||
# Check if secondary PVLANs are already used as primary PVLANs
|
||||
for primary_vlan in self.primary_pvlans:
|
||||
primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
|
||||
for secondary_pvlan in self.secondary_pvlans:
|
||||
result = self.get_secondary_pvlan_options(secondary_pvlan)
|
||||
if primary_pvlan_id == result[0]:
|
||||
self.module.fail_json(
|
||||
msg="The secondary PVLAN ID '%s' is already used as a primary PVLAN!" %
|
||||
result[0]
|
||||
)
|
||||
# Check if a primary PVLAN is present for every secondary PVLANs
|
||||
for secondary_pvlan in self.secondary_pvlans:
|
||||
primary_pvlan_found = False
|
||||
result = self.get_secondary_pvlan_options(secondary_pvlan)
|
||||
for primary_vlan in self.primary_pvlans:
|
||||
primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
|
||||
if result[1] == primary_pvlan_id:
|
||||
primary_pvlan_found = True
|
||||
break
|
||||
if not primary_pvlan_found:
|
||||
self.module.fail_json(
|
||||
msg="The primary PVLAN ID '%s' isn't defined for the secondary PVLAN ID '%s'!" %
|
||||
(result[1], result[0])
|
||||
)
|
||||
|
||||
def ensure(self):
|
||||
"""Manage Private VLANs"""
|
||||
changed = False
|
||||
results = dict(changed=changed)
|
||||
results['dvswitch'] = self.switch_name
|
||||
changed_list_add = []
|
||||
changed_list_remove = []
|
||||
|
||||
config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
|
||||
# Use the same version in the new spec; The version will be increased by one by the API automatically
|
||||
config_spec.configVersion = self.dvs.config.configVersion
|
||||
|
||||
# Check Private VLANs
|
||||
results['private_vlans'] = None
|
||||
if self.primary_pvlans:
|
||||
desired_pvlan_list = []
|
||||
for primary_vlan in self.primary_pvlans:
|
||||
primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
|
||||
temp_pvlan = dict()
|
||||
temp_pvlan['primary_pvlan_id'] = primary_pvlan_id
|
||||
temp_pvlan['secondary_pvlan_id'] = primary_pvlan_id
|
||||
temp_pvlan['pvlan_type'] = 'promiscuous'
|
||||
desired_pvlan_list.append(temp_pvlan)
|
||||
if self.secondary_pvlans:
|
||||
for secondary_pvlan in self.secondary_pvlans:
|
||||
(secondary_pvlan_id,
|
||||
secondary_vlan_primary_vlan_id,
|
||||
pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan)
|
||||
temp_pvlan = dict()
|
||||
temp_pvlan['primary_pvlan_id'] = secondary_vlan_primary_vlan_id
|
||||
temp_pvlan['secondary_pvlan_id'] = secondary_pvlan_id
|
||||
temp_pvlan['pvlan_type'] = pvlan_type
|
||||
desired_pvlan_list.append(temp_pvlan)
|
||||
results['private_vlans'] = desired_pvlan_list
|
||||
if self.dvs.config.pvlanConfig:
|
||||
pvlan_spec_list = []
|
||||
# Check if desired PVLANs are configured
|
||||
for primary_vlan in self.primary_pvlans:
|
||||
primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
|
||||
promiscuous_found = other_found = False
|
||||
for pvlan_object in self.dvs.config.pvlanConfig:
|
||||
if pvlan_object.primaryVlanId == primary_pvlan_id and pvlan_object.pvlanType == 'promiscuous':
|
||||
promiscuous_found = True
|
||||
break
|
||||
if not promiscuous_found:
|
||||
changed = True
|
||||
changed_list_add.append('promiscuous (%s, %s)' % (primary_pvlan_id, primary_pvlan_id))
|
||||
pvlan_spec_list.append(
|
||||
self.create_pvlan_config_spec(
|
||||
operation='add',
|
||||
primary_pvlan_id=primary_pvlan_id,
|
||||
secondary_pvlan_id=primary_pvlan_id,
|
||||
pvlan_type='promiscuous'
|
||||
)
|
||||
)
|
||||
if self.secondary_pvlans:
|
||||
for secondary_pvlan in self.secondary_pvlans:
|
||||
(secondary_pvlan_id,
|
||||
secondary_vlan_primary_vlan_id,
|
||||
pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan)
|
||||
if primary_pvlan_id == secondary_vlan_primary_vlan_id:
|
||||
for pvlan_object_2 in self.dvs.config.pvlanConfig:
|
||||
if (pvlan_object_2.primaryVlanId == secondary_vlan_primary_vlan_id
|
||||
and pvlan_object_2.secondaryVlanId == secondary_pvlan_id
|
||||
and pvlan_object_2.pvlanType == pvlan_type):
|
||||
other_found = True
|
||||
break
|
||||
if not other_found:
|
||||
changed = True
|
||||
changed_list_add.append(
|
||||
'%s (%s, %s)' % (pvlan_type, primary_pvlan_id, secondary_pvlan_id)
|
||||
)
|
||||
pvlan_spec_list.append(
|
||||
self.create_pvlan_config_spec(
|
||||
operation='add',
|
||||
primary_pvlan_id=primary_pvlan_id,
|
||||
secondary_pvlan_id=secondary_pvlan_id,
|
||||
pvlan_type=pvlan_type
|
||||
)
|
||||
)
|
||||
# Check if a PVLAN needs to be removed
|
||||
for pvlan_object in self.dvs.config.pvlanConfig:
|
||||
promiscuous_found = other_found = False
|
||||
if (pvlan_object.primaryVlanId == pvlan_object.secondaryVlanId
|
||||
and pvlan_object.pvlanType == 'promiscuous'):
|
||||
for primary_vlan in self.primary_pvlans:
|
||||
primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
|
||||
if pvlan_object.primaryVlanId == primary_pvlan_id and pvlan_object.pvlanType == 'promiscuous':
|
||||
promiscuous_found = True
|
||||
break
|
||||
if not promiscuous_found:
|
||||
changed = True
|
||||
changed_list_remove.append(
|
||||
'promiscuous (%s, %s)' % (pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId)
|
||||
)
|
||||
pvlan_spec_list.append(
|
||||
self.create_pvlan_config_spec(
|
||||
operation='remove',
|
||||
primary_pvlan_id=pvlan_object.primaryVlanId,
|
||||
secondary_pvlan_id=pvlan_object.secondaryVlanId,
|
||||
pvlan_type='promiscuous'
|
||||
)
|
||||
)
|
||||
elif self.secondary_pvlans:
|
||||
for secondary_pvlan in self.secondary_pvlans:
|
||||
(secondary_pvlan_id,
|
||||
secondary_vlan_primary_vlan_id,
|
||||
pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan)
|
||||
if (pvlan_object.primaryVlanId == secondary_vlan_primary_vlan_id
|
||||
and pvlan_object.secondaryVlanId == secondary_pvlan_id
|
||||
and pvlan_object.pvlanType == pvlan_type):
|
||||
other_found = True
|
||||
break
|
||||
if not other_found:
|
||||
changed = True
|
||||
changed_list_remove.append(
|
||||
'%s (%s, %s)' % (
|
||||
pvlan_object.pvlanType, pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId
|
||||
)
|
||||
)
|
||||
pvlan_spec_list.append(
|
||||
self.create_pvlan_config_spec(
|
||||
operation='remove',
|
||||
primary_pvlan_id=pvlan_object.primaryVlanId,
|
||||
secondary_pvlan_id=pvlan_object.secondaryVlanId,
|
||||
pvlan_type=pvlan_object.pvlanType
|
||||
)
|
||||
)
|
||||
else:
|
||||
changed = True
|
||||
changed_list_remove.append(
|
||||
'%s (%s, %s)' % (
|
||||
pvlan_object.pvlanType, pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId
|
||||
)
|
||||
)
|
||||
pvlan_spec_list.append(
|
||||
self.create_pvlan_config_spec(
|
||||
operation='remove',
|
||||
primary_pvlan_id=pvlan_object.primaryVlanId,
|
||||
secondary_pvlan_id=pvlan_object.secondaryVlanId,
|
||||
pvlan_type=pvlan_object.pvlanType
|
||||
)
|
||||
)
|
||||
else:
|
||||
changed = True
|
||||
changed_list_add.append('All private VLANs')
|
||||
pvlan_spec_list = []
|
||||
for primary_vlan in self.primary_pvlans:
|
||||
# the first secondary VLAN's type is always promiscuous
|
||||
primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
|
||||
pvlan_spec_list.append(
|
||||
self.create_pvlan_config_spec(
|
||||
operation='add',
|
||||
primary_pvlan_id=primary_pvlan_id,
|
||||
secondary_pvlan_id=primary_pvlan_id,
|
||||
pvlan_type='promiscuous'
|
||||
)
|
||||
)
|
||||
if self.secondary_pvlans:
|
||||
for secondary_pvlan in self.secondary_pvlans:
|
||||
(secondary_pvlan_id,
|
||||
secondary_vlan_primary_vlan_id,
|
||||
pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan)
|
||||
if primary_pvlan_id == secondary_vlan_primary_vlan_id:
|
||||
pvlan_spec_list.append(
|
||||
self.create_pvlan_config_spec(
|
||||
operation='add',
|
||||
primary_pvlan_id=primary_pvlan_id,
|
||||
secondary_pvlan_id=secondary_pvlan_id,
|
||||
pvlan_type=pvlan_type
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Remove PVLAN configuration if present
|
||||
if self.dvs.config.pvlanConfig:
|
||||
changed = True
|
||||
changed_list_remove.append('All private VLANs')
|
||||
pvlan_spec_list = []
|
||||
for pvlan_object in self.dvs.config.pvlanConfig:
|
||||
pvlan_spec_list.append(
|
||||
self.create_pvlan_config_spec(
|
||||
operation='remove',
|
||||
primary_pvlan_id=pvlan_object.primaryVlanId,
|
||||
secondary_pvlan_id=pvlan_object.secondaryVlanId,
|
||||
pvlan_type=pvlan_object.pvlanType
|
||||
)
|
||||
)
|
||||
|
||||
if changed:
|
||||
message_add = message_remove = None
|
||||
if changed_list_add:
|
||||
message_add = self.build_change_message('add', changed_list_add)
|
||||
if changed_list_remove:
|
||||
message_remove = self.build_change_message('remove', changed_list_remove)
|
||||
if message_add and message_remove:
|
||||
message = message_add + '. ' + message_remove + '.'
|
||||
elif message_add:
|
||||
message = message_add
|
||||
elif message_remove:
|
||||
message = message_remove
|
||||
current_pvlan_list = []
|
||||
for pvlan_object in self.dvs.config.pvlanConfig:
|
||||
temp_pvlan = dict()
|
||||
temp_pvlan['primary_pvlan_id'] = pvlan_object.primaryVlanId
|
||||
temp_pvlan['secondary_pvlan_id'] = pvlan_object.secondaryVlanId
|
||||
temp_pvlan['pvlan_type'] = pvlan_object.pvlanType
|
||||
current_pvlan_list.append(temp_pvlan)
|
||||
results['private_vlans_previous'] = current_pvlan_list
|
||||
config_spec.pvlanConfigSpec = pvlan_spec_list
|
||||
if not self.module.check_mode:
|
||||
try:
|
||||
task = self.dvs.ReconfigureDvs_Task(config_spec)
|
||||
wait_for_task(task)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(
|
||||
msg="Failed to update DVS : %s" % to_native(invalid_argument)
|
||||
)
|
||||
else:
|
||||
message = "PVLANs already configured properly"
|
||||
results['changed'] = changed
|
||||
results['result'] = message
|
||||
|
||||
self.module.exit_json(**results)
|
||||
|
||||
def get_primary_pvlan_option(self, primary_vlan):
|
||||
"""Get Primary PVLAN option"""
|
||||
primary_pvlan_id = primary_vlan.get('primary_pvlan_id', None)
|
||||
if primary_pvlan_id is None:
|
||||
self.module.fail_json(
|
||||
msg="Please specify primary_pvlan_id in primary_pvlans options as it's a required parameter"
|
||||
)
|
||||
if primary_pvlan_id in (0, 4095):
|
||||
self.module.fail_json(msg="The VLAN IDs of 0 and 4095 are reserved and cannot be used as a primary PVLAN.")
|
||||
return primary_pvlan_id
|
||||
|
||||
def get_secondary_pvlan_options(self, secondary_pvlan):
|
||||
"""Get Secondary PVLAN option"""
|
||||
secondary_pvlan_id = secondary_pvlan.get('secondary_pvlan_id', None)
|
||||
if secondary_pvlan_id is None:
|
||||
self.module.fail_json(
|
||||
msg="Please specify secondary_pvlan_id in secondary_pvlans options as it's a required parameter"
|
||||
)
|
||||
primary_pvlan_id = secondary_pvlan.get('primary_pvlan_id', None)
|
||||
if primary_pvlan_id is None:
|
||||
self.module.fail_json(
|
||||
msg="Please specify primary_pvlan_id in secondary_pvlans options as it's a required parameter"
|
||||
)
|
||||
if secondary_pvlan_id in (0, 4095) or primary_pvlan_id in (0, 4095):
|
||||
self.module.fail_json(
|
||||
msg="The VLAN IDs of 0 and 4095 are reserved and cannot be used as a primary or secondary PVLAN."
|
||||
)
|
||||
pvlan_type = secondary_pvlan.get('pvlan_type', None)
|
||||
supported_pvlan_types = ['isolated', 'community']
|
||||
if pvlan_type is None:
|
||||
self.module.fail_json(msg="Please specify pvlan_type in secondary_pvlans options as it's a required parameter")
|
||||
elif pvlan_type not in supported_pvlan_types:
|
||||
self.module.fail_json(msg="The specified PVLAN type '%s' isn't supported!" % pvlan_type)
|
||||
return secondary_pvlan_id, primary_pvlan_id, pvlan_type
|
||||
|
||||
@staticmethod
|
||||
def create_pvlan_config_spec(operation, primary_pvlan_id, secondary_pvlan_id, pvlan_type):
|
||||
"""
|
||||
Create PVLAN config spec
|
||||
operation: add, edit, or remove
|
||||
Returns: PVLAN config spec
|
||||
"""
|
||||
pvlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.PvlanConfigSpec()
|
||||
pvlan_spec.operation = operation
|
||||
pvlan_spec.pvlanEntry = vim.dvs.VmwareDistributedVirtualSwitch.PvlanMapEntry()
|
||||
pvlan_spec.pvlanEntry.primaryVlanId = primary_pvlan_id
|
||||
pvlan_spec.pvlanEntry.secondaryVlanId = secondary_pvlan_id
|
||||
pvlan_spec.pvlanEntry.pvlanType = pvlan_type
|
||||
return pvlan_spec
|
||||
|
||||
def build_change_message(self, operation, changed_list):
|
||||
"""Build the changed message"""
|
||||
if operation == 'add':
|
||||
changed_operation = 'added'
|
||||
elif operation == 'remove':
|
||||
changed_operation = 'removed'
|
||||
if self.module.check_mode:
|
||||
changed_suffix = ' would be %s' % changed_operation
|
||||
else:
|
||||
changed_suffix = ' %s' % changed_operation
|
||||
if len(changed_list) > 2:
|
||||
message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
|
||||
elif len(changed_list) == 2:
|
||||
message = ' and '.join(changed_list)
|
||||
elif len(changed_list) == 1:
|
||||
message = changed_list[0]
|
||||
message += changed_suffix
|
||||
return message
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
switch=dict(required=True, aliases=['dvswitch']),
|
||||
primary_pvlans=dict(type='list', default=list(), required=False),
|
||||
secondary_pvlans=dict(type='list', default=list(), required=False),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_dvswitch_pvlans = VMwareDvSwitchPvlans(module)
|
||||
vmware_dvswitch_pvlans.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,480 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_dvswitch_uplink_pg
|
||||
short_description: Manage uplink portproup configuration of a Distributed Switch
|
||||
description:
|
||||
- This module can be used to configure the uplink portgroup of a Distributed Switch.
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Christian Kotte (@ckotte)
|
||||
notes:
|
||||
- Tested on vSphere 6.5 and 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
switch:
|
||||
description:
|
||||
- The name of the Distributed Switch.
|
||||
type: str
|
||||
required: True
|
||||
aliases: ['dvswitch']
|
||||
name:
|
||||
description:
|
||||
- The name of the uplink portgroup.
|
||||
- The current name will be used if not specified.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- The description of the uplink portgroup.
|
||||
type: str
|
||||
advanced:
|
||||
description:
|
||||
- Dictionary which configures the advanced policy settings for the uplink portgroup.
|
||||
- 'Valid attributes are:'
|
||||
- '- C(port_config_reset_at_disconnect) (bool): indicates if the configuration of a port is reset automatically after disconnect. (default: true)'
|
||||
- '- C(block_override) (bool): indicates if the block policy can be changed per port. (default: true)'
|
||||
- '- C(netflow_override) (bool): indicates if the NetFlow policy can be changed per port. (default: false)'
|
||||
- '- C(traffic_filter_override) (bool): indicates if the traffic filter can be changed per port. (default: false)'
|
||||
- '- C(vendor_config_override) (bool): indicates if the vendor config can be changed per port. (default: false)'
|
||||
- '- C(vlan_override) (bool): indicates if the vlan can be changed per port. (default: false)'
|
||||
required: False
|
||||
default: {
|
||||
port_config_reset_at_disconnect: True,
|
||||
block_override: True,
|
||||
vendor_config_override: False,
|
||||
vlan_override: False,
|
||||
netflow_override: False,
|
||||
traffic_filter_override: False,
|
||||
}
|
||||
aliases: ['port_policy']
|
||||
type: dict
|
||||
vlan_trunk_range:
|
||||
description:
|
||||
- The VLAN trunk range that should be configured with the uplink portgroup.
|
||||
- 'This can be a combination of multiple ranges and numbers, example: [ 2-3967, 4049-4092 ].'
|
||||
type: list
|
||||
default: [ '0-4094' ]
|
||||
lacp:
|
||||
description:
|
||||
- Dictionary which configures the LACP settings for the uplink portgroup.
|
||||
- The options are only used if the LACP support mode is set to 'basic'.
|
||||
- 'The following parameters are required:'
|
||||
- '- C(status) (str): Indicates if LACP is enabled. (default: disabled)'
|
||||
- '- C(mode) (str): The negotiating state of the uplinks/ports. (default: passive)'
|
||||
required: False
|
||||
default: {
|
||||
status: 'disabled',
|
||||
mode: 'passive',
|
||||
}
|
||||
type: dict
|
||||
netflow_enabled:
|
||||
description:
|
||||
- Indicates if NetFlow is enabled on the uplink portgroup.
|
||||
type: bool
|
||||
default: False
|
||||
block_all_ports:
|
||||
description:
|
||||
- Indicates if all ports are blocked on the uplink portgroup.
|
||||
type: bool
|
||||
default: False
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Configure Uplink portgroup
|
||||
vmware_dvswitch_uplink_pg:
|
||||
hostname: '{{ inventory_hostname }}'
|
||||
username: '{{ vcsa_username }}'
|
||||
password: '{{ vcsa_password }}'
|
||||
switch: dvSwitch
|
||||
name: dvSwitch-DVUplinks
|
||||
advanced:
|
||||
port_config_reset_at_disconnect: True
|
||||
block_override: True
|
||||
vendor_config_override: False
|
||||
vlan_override: False
|
||||
netflow_override: False
|
||||
traffic_filter_override: False
|
||||
vlan_trunk_range:
|
||||
- '0-4094'
|
||||
netflow_enabled: False
|
||||
block_all_ports: False
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enabled LACP on Uplink portgroup
|
||||
vmware_dvswitch_uplink_pg:
|
||||
hostname: '{{ inventory_hostname }}'
|
||||
username: '{{ vcsa_username }}'
|
||||
password: '{{ vcsa_password }}'
|
||||
switch: dvSwitch
|
||||
lacp:
|
||||
status: enabled
|
||||
mode: active
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
result:
|
||||
description: information about performed operation
|
||||
returned: always
|
||||
type: str
|
||||
sample: {
|
||||
"adv_block_ports": true,
|
||||
"adv_netflow": false,
|
||||
"adv_reset_at_disconnect": true,
|
||||
"adv_traffic_filtering": false,
|
||||
"adv_vendor_conf": false,
|
||||
"adv_vlan": false,
|
||||
"block_all_ports": false,
|
||||
"changed": false,
|
||||
"description": null,
|
||||
"dvswitch": "dvSwitch",
|
||||
"lacp_status": "disabled",
|
||||
"lacp_status_previous": "enabled",
|
||||
"name": "dvSwitch-DVUplinks",
|
||||
"netflow_enabled": false,
|
||||
"result": "Uplink portgroup already configured properly",
|
||||
"vlan_trunk_range": [
|
||||
"2-3967",
|
||||
"4049-4092"
|
||||
]
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import (
|
||||
PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
|
||||
)
|
||||
|
||||
|
||||
class VMwareDvSwitchUplinkPortgroup(PyVmomi):
|
||||
"""Class to manage a uplink portgroup on a Distributed Virtual Switch"""
|
||||
|
||||
def __init__(self, module):
|
||||
super(VMwareDvSwitchUplinkPortgroup, self).__init__(module)
|
||||
self.switch_name = self.module.params['switch']
|
||||
self.uplink_pg_name = self.params['name']
|
||||
self.uplink_pg_description = self.params['description']
|
||||
self.uplink_pg_reset = self.params['advanced'].get('port_config_reset_at_disconnect')
|
||||
self.uplink_pg_block_ports = self.params['advanced'].get('block_override')
|
||||
self.uplink_pg_vendor_conf = self.params['advanced'].get('vendor_config_override')
|
||||
self.uplink_pg_vlan = self.params['advanced'].get('vlan_override')
|
||||
self.uplink_pg_netflow = self.params['advanced'].get('netflow_override')
|
||||
self.uplink_pg_tf = self.params['advanced'].get('traffic_filter_override')
|
||||
self.uplink_pg_vlan_trunk_range = self.params['vlan_trunk_range']
|
||||
self.uplink_pg_netflow_enabled = self.params['netflow_enabled']
|
||||
self.uplink_pg_block_all_ports = self.params['block_all_ports']
|
||||
self.lacp_status = self.params['lacp'].get('status')
|
||||
self.lacp_mode = self.params['lacp'].get('mode')
|
||||
self.dvs = find_dvs_by_name(self.content, self.switch_name)
|
||||
if self.dvs is None:
|
||||
self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name)
|
||||
self.support_mode = self.dvs.config.lacpApiVersion
|
||||
|
||||
def ensure(self):
|
||||
"""Manage uplink portgroup"""
|
||||
changed = changed_uplink_pg_policy = changed_vlan_trunk_range = changed_lacp = False
|
||||
results = dict(changed=changed)
|
||||
results['dvswitch'] = self.switch_name
|
||||
changed_list = []
|
||||
|
||||
uplink_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
|
||||
# Use the same version in the new spec; The version will be increased by one by the API automatically
|
||||
uplink_pg_spec.configVersion = self.dvs.config.uplinkPortgroup[0].config.configVersion
|
||||
uplink_pg_config = self.dvs.config.uplinkPortgroup[0].config
|
||||
|
||||
# Check name
|
||||
if self.uplink_pg_name:
|
||||
results['name'] = self.uplink_pg_name
|
||||
if uplink_pg_config.name != self.uplink_pg_name:
|
||||
changed = True
|
||||
changed_list.append("name")
|
||||
results['name_previous'] = uplink_pg_config.name
|
||||
uplink_pg_spec.name = self.uplink_pg_name
|
||||
else:
|
||||
results['name'] = uplink_pg_config.name
|
||||
|
||||
# Check description
|
||||
results['description'] = self.uplink_pg_description
|
||||
if uplink_pg_config.description != self.uplink_pg_description:
|
||||
changed = True
|
||||
changed_list.append("description")
|
||||
results['description_previous'] = uplink_pg_config.description
|
||||
uplink_pg_spec.description = self.uplink_pg_description
|
||||
|
||||
# Check port policies
|
||||
results['adv_reset_at_disconnect'] = self.uplink_pg_reset
|
||||
results['adv_block_ports'] = self.uplink_pg_block_ports
|
||||
results['adv_vendor_conf'] = self.uplink_pg_vendor_conf
|
||||
results['adv_vlan'] = self.uplink_pg_vlan
|
||||
results['adv_netflow'] = self.uplink_pg_netflow
|
||||
results['adv_traffic_filtering'] = self.uplink_pg_tf
|
||||
uplink_pg_policy_spec = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy()
|
||||
uplink_pg_policy_spec.portConfigResetAtDisconnect = self.uplink_pg_reset
|
||||
uplink_pg_policy_spec.blockOverrideAllowed = self.uplink_pg_block_ports
|
||||
uplink_pg_policy_spec.vendorConfigOverrideAllowed = self.uplink_pg_vendor_conf
|
||||
uplink_pg_policy_spec.vlanOverrideAllowed = self.uplink_pg_vlan
|
||||
uplink_pg_policy_spec.ipfixOverrideAllowed = self.uplink_pg_netflow
|
||||
uplink_pg_policy_spec.trafficFilterOverrideAllowed = self.uplink_pg_tf
|
||||
# There's no information available if the following option are deprecated, but
|
||||
# they aren't visible in the vSphere Client
|
||||
uplink_pg_policy_spec.shapingOverrideAllowed = False
|
||||
uplink_pg_policy_spec.livePortMovingAllowed = False
|
||||
uplink_pg_policy_spec.uplinkTeamingOverrideAllowed = False
|
||||
uplink_pg_policy_spec.securityPolicyOverrideAllowed = False
|
||||
uplink_pg_policy_spec.networkResourcePoolOverrideAllowed = False
|
||||
# Check policies
|
||||
if uplink_pg_config.policy.portConfigResetAtDisconnect != self.uplink_pg_reset:
|
||||
changed_uplink_pg_policy = True
|
||||
results['adv_reset_at_disconnect_previous'] = uplink_pg_config.policy.portConfigResetAtDisconnect
|
||||
if uplink_pg_config.policy.blockOverrideAllowed != self.uplink_pg_block_ports:
|
||||
changed_uplink_pg_policy = True
|
||||
results['adv_block_ports_previous'] = uplink_pg_config.policy.blockOverrideAllowed
|
||||
if uplink_pg_config.policy.vendorConfigOverrideAllowed != self.uplink_pg_vendor_conf:
|
||||
changed_uplink_pg_policy = True
|
||||
results['adv_vendor_conf_previous'] = uplink_pg_config.policy.vendorConfigOverrideAllowed
|
||||
if uplink_pg_config.policy.vlanOverrideAllowed != self.uplink_pg_vlan:
|
||||
changed_uplink_pg_policy = True
|
||||
results['adv_vlan_previous'] = uplink_pg_config.policy.vlanOverrideAllowed
|
||||
if uplink_pg_config.policy.ipfixOverrideAllowed != self.uplink_pg_netflow:
|
||||
changed_uplink_pg_policy = True
|
||||
results['adv_netflow_previous'] = uplink_pg_config.policy.ipfixOverrideAllowed
|
||||
if uplink_pg_config.policy.trafficFilterOverrideAllowed != self.uplink_pg_tf:
|
||||
changed_uplink_pg_policy = True
|
||||
results['adv_traffic_filtering_previous'] = uplink_pg_config.policy.trafficFilterOverrideAllowed
|
||||
if changed_uplink_pg_policy:
|
||||
changed = True
|
||||
changed_list.append("advanced")
|
||||
uplink_pg_spec.policy = uplink_pg_policy_spec
|
||||
|
||||
uplink_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
|
||||
|
||||
# Check VLAN trunk
|
||||
results['vlan_trunk_range'] = self.uplink_pg_vlan_trunk_range
|
||||
vlan_id_ranges = self.uplink_pg_vlan_trunk_range
|
||||
trunk_vlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
|
||||
vlan_id_list = []
|
||||
for vlan_id_range in vlan_id_ranges:
|
||||
vlan_id_range_found = False
|
||||
vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range)
|
||||
# Check if range is already configured
|
||||
for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
|
||||
if current_vlan_id_range.start == int(vlan_id_start) and current_vlan_id_range.end == int(vlan_id_end):
|
||||
vlan_id_range_found = True
|
||||
break
|
||||
if vlan_id_range_found is False:
|
||||
changed_vlan_trunk_range = True
|
||||
vlan_id_list.append(
|
||||
vim.NumericRange(start=int(vlan_id_start), end=int(vlan_id_end))
|
||||
)
|
||||
# Check if range needs to be removed
|
||||
for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
|
||||
vlan_id_range_found = False
|
||||
for vlan_id_range in vlan_id_ranges:
|
||||
vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range)
|
||||
if (current_vlan_id_range.start == int(vlan_id_start)
|
||||
and current_vlan_id_range.end == int(vlan_id_end)):
|
||||
vlan_id_range_found = True
|
||||
break
|
||||
if vlan_id_range_found is False:
|
||||
changed_vlan_trunk_range = True
|
||||
trunk_vlan_spec.vlanId = vlan_id_list
|
||||
if changed_vlan_trunk_range:
|
||||
changed = True
|
||||
changed_list.append("vlan trunk range")
|
||||
current_vlan_id_list = []
|
||||
for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
|
||||
if current_vlan_id_range.start == current_vlan_id_range.end:
|
||||
current_vlan_id_range_string = current_vlan_id_range.start
|
||||
else:
|
||||
current_vlan_id_range_string = '-'.join(
|
||||
[str(current_vlan_id_range.start), str(current_vlan_id_range.end)]
|
||||
)
|
||||
current_vlan_id_list.append(current_vlan_id_range_string)
|
||||
results['vlan_trunk_range_previous'] = current_vlan_id_list
|
||||
uplink_pg_spec.defaultPortConfig.vlan = trunk_vlan_spec
|
||||
|
||||
# Check LACP
|
||||
lacp_support_mode = self.get_lacp_support_mode(self.support_mode)
|
||||
if lacp_support_mode == 'basic':
|
||||
results['lacp_status'] = self.lacp_status
|
||||
lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.UplinkLacpPolicy()
|
||||
lacp_enabled = False
|
||||
if self.lacp_status == 'enabled':
|
||||
lacp_enabled = True
|
||||
if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value != lacp_enabled:
|
||||
changed_lacp = True
|
||||
changed_list.append("lacp status")
|
||||
if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value:
|
||||
results['lacp_status_previous'] = 'enabled'
|
||||
else:
|
||||
results['lacp_status_previous'] = 'disabled'
|
||||
lacp_spec.enable = vim.BoolPolicy()
|
||||
lacp_spec.enable.inherited = False
|
||||
lacp_spec.enable.value = lacp_enabled
|
||||
if lacp_enabled and uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value != self.lacp_mode:
|
||||
results['lacp_mode'] = self.lacp_mode
|
||||
changed_lacp = True
|
||||
changed_list.append("lacp mode")
|
||||
results['lacp_mode_previous'] = uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value
|
||||
lacp_spec.mode = vim.StringPolicy()
|
||||
lacp_spec.mode.inherited = False
|
||||
lacp_spec.mode.value = self.lacp_mode
|
||||
if changed_lacp:
|
||||
changed = True
|
||||
uplink_pg_spec.defaultPortConfig.lacpPolicy = lacp_spec
|
||||
|
||||
# Check NetFlow
|
||||
results['netflow_enabled'] = self.uplink_pg_netflow_enabled
|
||||
netflow_enabled_spec = vim.BoolPolicy()
|
||||
netflow_enabled_spec.inherited = False
|
||||
netflow_enabled_spec.value = self.uplink_pg_netflow_enabled
|
||||
if uplink_pg_config.defaultPortConfig.ipfixEnabled.value != self.uplink_pg_netflow_enabled:
|
||||
changed = True
|
||||
results['netflow_enabled_previous'] = uplink_pg_config.defaultPortConfig.ipfixEnabled.value
|
||||
changed_list.append("netflow")
|
||||
uplink_pg_spec.defaultPortConfig.ipfixEnabled = netflow_enabled_spec
|
||||
|
||||
# TODO: Check Traffic filtering and marking
|
||||
|
||||
# Check Block all ports
|
||||
results['block_all_ports'] = self.uplink_pg_block_all_ports
|
||||
block_all_ports_spec = vim.BoolPolicy()
|
||||
block_all_ports_spec.inherited = False
|
||||
block_all_ports_spec.value = self.uplink_pg_block_all_ports
|
||||
if uplink_pg_config.defaultPortConfig.blocked.value != self.uplink_pg_block_all_ports:
|
||||
changed = True
|
||||
changed_list.append("block all ports")
|
||||
results['block_all_ports_previous'] = uplink_pg_config.defaultPortConfig.blocked.value
|
||||
uplink_pg_spec.defaultPortConfig.blocked = block_all_ports_spec
|
||||
|
||||
if changed:
|
||||
if self.module.check_mode:
|
||||
changed_suffix = ' would be changed'
|
||||
else:
|
||||
changed_suffix = ' changed'
|
||||
if len(changed_list) > 2:
|
||||
message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
|
||||
elif len(changed_list) == 2:
|
||||
message = ' and '.join(changed_list)
|
||||
elif len(changed_list) == 1:
|
||||
message = changed_list[0]
|
||||
message += changed_suffix
|
||||
if not self.module.check_mode:
|
||||
try:
|
||||
task = self.dvs.config.uplinkPortgroup[0].ReconfigureDVPortgroup_Task(uplink_pg_spec)
|
||||
wait_for_task(task)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(msg="Failed to update uplink portgroup : %s" % to_native(invalid_argument))
|
||||
else:
|
||||
message = "Uplink portgroup already configured properly"
|
||||
results['changed'] = changed
|
||||
results['result'] = message
|
||||
|
||||
self.module.exit_json(**results)
|
||||
|
||||
@staticmethod
|
||||
def get_vlan_ids_from_range(vlan_id_range):
|
||||
"""Get start and end VLAN ID from VLAN ID range"""
|
||||
try:
|
||||
vlan_id_start, vlan_id_end = vlan_id_range.split('-')
|
||||
except (AttributeError, TypeError):
|
||||
vlan_id_start = vlan_id_end = vlan_id_range
|
||||
except ValueError:
|
||||
vlan_id_start = vlan_id_end = vlan_id_range.strip()
|
||||
return vlan_id_start, vlan_id_end
|
||||
|
||||
@staticmethod
|
||||
def get_lacp_support_mode(mode):
|
||||
"""Get LACP support mode"""
|
||||
return_mode = None
|
||||
if mode == 'basic':
|
||||
return_mode = 'singleLag'
|
||||
elif mode == 'enhanced':
|
||||
return_mode = 'multipleLag'
|
||||
elif mode == 'singleLag':
|
||||
return_mode = 'basic'
|
||||
elif mode == 'multipleLag':
|
||||
return_mode = 'enhanced'
|
||||
return return_mode
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
switch=dict(required=True, aliases=['dvswitch']),
|
||||
name=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
advanced=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
port_config_reset_at_disconnect=dict(type='bool', default=True),
|
||||
block_override=dict(type='bool', default=True),
|
||||
vendor_config_override=dict(type='bool', default=False),
|
||||
vlan_override=dict(type='bool', default=False),
|
||||
netflow_override=dict(type='bool', default=False),
|
||||
traffic_filter_override=dict(type='bool', default=False),
|
||||
),
|
||||
default=dict(
|
||||
port_config_reset_at_disconnect=True,
|
||||
block_override=True,
|
||||
vendor_config_override=False,
|
||||
vlan_override=False,
|
||||
netflow_override=False,
|
||||
traffic_filter_override=False,
|
||||
),
|
||||
aliases=['port_policy'],
|
||||
),
|
||||
lacp=dict(
|
||||
type='dict',
|
||||
options=dict(
|
||||
status=dict(type='str', choices=['enabled', 'disabled'], default=['disabled']),
|
||||
mode=dict(type='str', choices=['active', 'passive'], default=['passive']),
|
||||
),
|
||||
default=dict(
|
||||
status='disabled',
|
||||
mode='passive',
|
||||
),
|
||||
),
|
||||
vlan_trunk_range=dict(type='list', default=['0-4094']),
|
||||
netflow_enabled=dict(type='bool', default=False),
|
||||
block_all_ports=dict(type='bool', default=False),
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_dvswitch_uplink_pg = VMwareDvSwitchUplinkPortgroup(module)
|
||||
vmware_dvswitch_uplink_pg.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,232 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Michael Tipton <mike () ibeta.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_evc_mode
|
||||
short_description: Enable/Disable EVC mode on vCenter
|
||||
description:
|
||||
- This module can be used to enable/disable EVC mode on vCenter.
|
||||
version_added: 2.9
|
||||
author:
|
||||
- Michael Tipton (@castawayegr)
|
||||
notes:
|
||||
- Tested on vSphere 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter_name:
|
||||
description:
|
||||
- The name of the datacenter the cluster belongs to that you want to enable or disable EVC mode on.
|
||||
required: True
|
||||
type: str
|
||||
cluster_name:
|
||||
description:
|
||||
- The name of the cluster to enable or disable EVC mode on.
|
||||
required: True
|
||||
type: str
|
||||
evc_mode:
|
||||
description:
|
||||
- Required for C(state=present).
|
||||
- The EVC mode to enable or disable on the cluster. (intel-broadwell, intel-nehalem, intel-merom, etc.).
|
||||
required: True
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Add or remove EVC mode.
|
||||
choices: [absent, present]
|
||||
default: present
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Enable EVC Mode
|
||||
vmware_evc_mode:
|
||||
hostname: "{{ groups['vcsa'][0] }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ site_password }}"
|
||||
datacenter_name: "{{ datacenter_name }}"
|
||||
cluster_name: "{{ cluster_name }}"
|
||||
evc_mode: "intel-broadwell"
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
register: enable_evc
|
||||
|
||||
- name: Disable EVC Mode
|
||||
vmware_evc_mode:
|
||||
hostname: "{{ groups['vcsa'][0] }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ site_password }}"
|
||||
datacenter_name: "{{ datacenter_name }}"
|
||||
cluster_name: "{{ cluster_name }}"
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
register: disable_evc
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
result:
|
||||
description: information about performed operation
|
||||
returned: always
|
||||
type: str
|
||||
sample: "EVC Mode for 'intel-broadwell' has been enabled."
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import (PyVmomi, find_datacenter_by_name, find_cluster_by_name,
|
||||
vmware_argument_spec, wait_for_task, TaskError)
|
||||
|
||||
|
||||
class VMwareEVC(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareEVC, self).__init__(module)
|
||||
self.cluster_name = module.params['cluster_name']
|
||||
self.evc_mode = module.params['evc_mode']
|
||||
self.datacenter_name = module.params['datacenter_name']
|
||||
self.desired_state = module.params['state']
|
||||
self.datacenter = None
|
||||
self.cluster = None
|
||||
|
||||
def process_state(self):
|
||||
"""
|
||||
Manage internal states of evc
|
||||
"""
|
||||
evc_states = {
|
||||
'absent': {
|
||||
'present': self.state_disable_evc,
|
||||
'absent': self.state_exit_unchanged,
|
||||
},
|
||||
'present': {
|
||||
'present': self.state_update_evc,
|
||||
'absent': self.state_enable_evc,
|
||||
}
|
||||
}
|
||||
current_state = self.check_evc_configuration()
|
||||
# Based on the desired_state and the current_state call
|
||||
# the appropriate method from the dictionary
|
||||
evc_states[self.desired_state][current_state]()
|
||||
|
||||
def check_evc_configuration(self):
|
||||
"""
|
||||
Check evc configuration
|
||||
Returns: 'Present' if evc enabled, else 'absent'
|
||||
"""
|
||||
try:
|
||||
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
|
||||
if self.datacenter is None:
|
||||
self.module.fail_json(msg="Datacenter '%s' does not exist." % self.datacenter_name)
|
||||
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
|
||||
|
||||
if self.cluster is None:
|
||||
self.module.fail_json(msg="Cluster '%s' does not exist." % self.cluster_name)
|
||||
self.evcm = self.cluster.EvcManager()
|
||||
|
||||
if not self.evcm:
|
||||
self.module.fail_json(msg="Unable to get EVC manager for cluster '%s'." % self.cluster_name)
|
||||
self.evc_state = self.evcm.evcState
|
||||
self.current_evc_mode = self.evc_state.currentEVCModeKey
|
||||
|
||||
if not self.current_evc_mode:
|
||||
return 'absent'
|
||||
|
||||
return 'present'
|
||||
except Exception as generic_exc:
|
||||
self.module.fail_json(msg="Failed to check configuration"
|
||||
" due to generic exception %s" % to_native(generic_exc))
|
||||
|
||||
def state_exit_unchanged(self):
|
||||
"""
|
||||
Exit without any change
|
||||
"""
|
||||
self.module.exit_json(changed=False, msg="EVC Mode is already disabled on cluster '%s'." % self.cluster_name)
|
||||
|
||||
def state_update_evc(self):
|
||||
"""
|
||||
Update EVC Mode
|
||||
"""
|
||||
changed, result = False, None
|
||||
try:
|
||||
if not self.module.check_mode and self.current_evc_mode != self.evc_mode:
|
||||
evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
|
||||
changed, result = wait_for_task(evc_task)
|
||||
if self.module.check_mode and self.current_evc_mode != self.evc_mode:
|
||||
changed, result = True, None
|
||||
if self.current_evc_mode == self.evc_mode:
|
||||
self.module.exit_json(changed=changed, msg="EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'." % self.params)
|
||||
self.module.exit_json(changed=changed, msg="EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'." % self.params)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(msg="Failed to update EVC mode: %s" % to_native(invalid_argument))
|
||||
|
||||
def state_enable_evc(self):
|
||||
"""
|
||||
Enable EVC Mode
|
||||
"""
|
||||
changed, result = False, None
|
||||
try:
|
||||
if not self.module.check_mode:
|
||||
evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
|
||||
changed, result = wait_for_task(evc_task)
|
||||
if self.module.check_mode:
|
||||
changed, result = True, None
|
||||
self.module.exit_json(changed=changed, msg="EVC Mode for '%(evc_mode)s' has been enabled on '%(cluster_name)s'." % self.params)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(msg="Failed to enable EVC mode: %s" % to_native(invalid_argument))
|
||||
|
||||
def state_disable_evc(self):
|
||||
"""
|
||||
Disable EVC Mode
|
||||
"""
|
||||
changed, result = False, None
|
||||
try:
|
||||
if not self.module.check_mode:
|
||||
evc_task = self.evcm.DisableEvcMode_Task()
|
||||
changed, result = wait_for_task(evc_task)
|
||||
if self.module.check_mode:
|
||||
changed, result = True, None
|
||||
self.module.exit_json(changed=changed, msg="EVC Mode has been disabled on cluster '%s'." % self.cluster_name)
|
||||
except TaskError as invalid_argument:
|
||||
self.module.fail_json(msg="Failed to disable EVC mode: %s" % to_native(invalid_argument))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
cluster_name=dict(type='str', required=True),
|
||||
datacenter_name=dict(type='str', required=True),
|
||||
evc_mode=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
['state', 'present', ['cluster_name', 'datacenter_name', 'evc_mode']]
|
||||
]
|
||||
)
|
||||
|
||||
vmware_evc = VMwareEVC(module)
|
||||
vmware_evc.process_state()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,357 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_export_ovf
|
||||
short_description: Exports a VMware virtual machine to an OVF file, device files and a manifest file
|
||||
description: >
|
||||
This module can be used to export a VMware virtual machine to OVF template from vCenter server or ESXi host.
|
||||
version_added: '2.8'
|
||||
author:
|
||||
- Diane Wang (@Tomorrow9) <dianew@vmware.com>
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
notes: []
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine to export.
|
||||
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- Uuid of the virtual machine to export.
|
||||
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
datacenter:
|
||||
default: ha-datacenter
|
||||
description:
|
||||
- Datacenter name of the virtual machine to export.
|
||||
- This parameter is case sensitive.
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute path to find the specified guest.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
|
||||
- This parameter is case sensitive.
|
||||
- 'If multiple machines are found with same name, this parameter is used to identify
|
||||
uniqueness of the virtual machine. version_added 2.5'
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
export_dir:
|
||||
description:
|
||||
- Absolute path to place the exported files on the server running this task, must have write permission.
|
||||
- If folder not exist will create it, also create a folder under this path named with VM name.
|
||||
required: yes
|
||||
type: path
|
||||
export_with_images:
|
||||
default: false
|
||||
description:
|
||||
- Export an ISO image of the media mounted on the CD/DVD Drive within the virtual machine.
|
||||
type: bool
|
||||
download_timeout:
|
||||
description:
|
||||
- The user defined timeout in second of exporting file.
|
||||
- If the vmdk file is too large, you can increase the value.
|
||||
default: 30
|
||||
type: int
|
||||
version_added: '2.9'
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- vmware_export_ovf:
|
||||
validate_certs: false
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
name: '{{ vm_name }}'
|
||||
export_with_images: true
|
||||
export_dir: /path/to/ovf_template/
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
instance:
|
||||
description: list of the exported files, if exported from vCenter server, device file is not named with vm name
|
||||
returned: always
|
||||
type: dict
|
||||
sample: None
|
||||
'''
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
from time import sleep
|
||||
from threading import Thread
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_text, to_bytes
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
from pyVim import connect
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class LeaseProgressUpdater(Thread):
|
||||
def __init__(self, http_nfc_lease, update_interval):
|
||||
Thread.__init__(self)
|
||||
self._running = True
|
||||
self.httpNfcLease = http_nfc_lease
|
||||
self.updateInterval = update_interval
|
||||
self.progressPercent = 0
|
||||
|
||||
def set_progress_percent(self, progress_percent):
|
||||
self.progressPercent = progress_percent
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def run(self):
|
||||
while self._running:
|
||||
try:
|
||||
if self.httpNfcLease.state == vim.HttpNfcLease.State.done:
|
||||
return
|
||||
self.httpNfcLease.HttpNfcLeaseProgress(self.progressPercent)
|
||||
sleep_sec = 0
|
||||
while True:
|
||||
if self.httpNfcLease.state == vim.HttpNfcLease.State.done or self.httpNfcLease.state == vim.HttpNfcLease.State.error:
|
||||
return
|
||||
sleep_sec += 1
|
||||
sleep(1)
|
||||
if sleep_sec == self.updateInterval:
|
||||
break
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
||||
class VMwareExportVmOvf(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareExportVmOvf, self).__init__(module)
|
||||
self.mf_file = ''
|
||||
self.ovf_dir = ''
|
||||
# set read device content chunk size to 2 MB
|
||||
self.chunk_size = 2 * 2 ** 20
|
||||
# set lease progress update interval to 15 seconds
|
||||
self.lease_interval = 15
|
||||
self.facts = {'device_files': []}
|
||||
self.download_timeout = None
|
||||
|
||||
def create_export_dir(self, vm_obj):
|
||||
self.ovf_dir = os.path.join(self.params['export_dir'], vm_obj.name)
|
||||
if not os.path.exists(self.ovf_dir):
|
||||
try:
|
||||
os.makedirs(self.ovf_dir)
|
||||
except OSError as err:
|
||||
self.module.fail_json(msg='Exception caught when create folder %s, with error %s'
|
||||
% (self.ovf_dir, to_text(err)))
|
||||
self.mf_file = os.path.join(self.ovf_dir, vm_obj.name + '.mf')
|
||||
|
||||
def download_device_files(self, headers, temp_target_disk, device_url, lease_updater, total_bytes_written,
|
||||
total_bytes_to_write):
|
||||
mf_content = 'SHA256(' + os.path.basename(temp_target_disk) + ')= '
|
||||
sha256_hash = hashlib.sha256()
|
||||
response = None
|
||||
|
||||
with open(self.mf_file, 'a') as mf_handle:
|
||||
with open(temp_target_disk, 'wb') as handle:
|
||||
try:
|
||||
response = open_url(device_url, headers=headers, validate_certs=False, timeout=self.download_timeout)
|
||||
except Exception as err:
|
||||
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
|
||||
lease_updater.stop()
|
||||
self.module.fail_json(msg='Exception caught when getting %s, %s' % (device_url, to_text(err)))
|
||||
if not response:
|
||||
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
|
||||
lease_updater.stop()
|
||||
self.module.fail_json(msg='Getting %s failed' % device_url)
|
||||
if response.getcode() >= 400:
|
||||
lease_updater.httpNfcLease.HttpNfcLeaseAbort()
|
||||
lease_updater.stop()
|
||||
self.module.fail_json(msg='Getting %s return code %d' % (device_url, response.getcode()))
|
||||
current_bytes_written = 0
|
||||
block = response.read(self.chunk_size)
|
||||
while block:
|
||||
handle.write(block)
|
||||
sha256_hash.update(block)
|
||||
handle.flush()
|
||||
os.fsync(handle.fileno())
|
||||
current_bytes_written += len(block)
|
||||
block = response.read(self.chunk_size)
|
||||
written_percent = ((current_bytes_written + total_bytes_written) * 100) / total_bytes_to_write
|
||||
lease_updater.progressPercent = int(written_percent)
|
||||
mf_handle.write(mf_content + sha256_hash.hexdigest() + '\n')
|
||||
self.facts['device_files'].append(temp_target_disk)
|
||||
return current_bytes_written
|
||||
|
||||
def export_to_ovf_files(self, vm_obj):
|
||||
self.create_export_dir(vm_obj=vm_obj)
|
||||
export_with_iso = False
|
||||
if 'export_with_images' in self.params and self.params['export_with_images']:
|
||||
export_with_iso = True
|
||||
self.download_timeout = self.params['download_timeout']
|
||||
|
||||
ovf_files = []
|
||||
# get http nfc lease firstly
|
||||
http_nfc_lease = vm_obj.ExportVm()
|
||||
# create a thread to track file download progress
|
||||
lease_updater = LeaseProgressUpdater(http_nfc_lease, self.lease_interval)
|
||||
total_bytes_written = 0
|
||||
# total storage space occupied by the virtual machine across all datastores
|
||||
total_bytes_to_write = vm_obj.summary.storage.unshared
|
||||
# new deployed VM with no OS installed
|
||||
if total_bytes_to_write == 0:
|
||||
total_bytes_to_write = vm_obj.summary.storage.committed
|
||||
if total_bytes_to_write == 0:
|
||||
http_nfc_lease.HttpNfcLeaseAbort()
|
||||
self.module.fail_json(msg='Total storage space occupied by the VM is 0.')
|
||||
headers = {'Accept': 'application/x-vnd.vmware-streamVmdk'}
|
||||
cookies = connect.GetStub().cookie
|
||||
if cookies:
|
||||
headers['Cookie'] = cookies
|
||||
lease_updater.start()
|
||||
try:
|
||||
while True:
|
||||
if http_nfc_lease.state == vim.HttpNfcLease.State.ready:
|
||||
for deviceUrl in http_nfc_lease.info.deviceUrl:
|
||||
file_download = False
|
||||
if deviceUrl.targetId and deviceUrl.disk:
|
||||
file_download = True
|
||||
elif deviceUrl.url.split('/')[-1].split('.')[-1] == 'iso':
|
||||
if export_with_iso:
|
||||
file_download = True
|
||||
elif deviceUrl.url.split('/')[-1].split('.')[-1] == 'nvram':
|
||||
if self.host_version_at_least(version=(6, 7, 0), vm_obj=vm_obj):
|
||||
file_download = True
|
||||
else:
|
||||
continue
|
||||
device_file_name = deviceUrl.url.split('/')[-1]
|
||||
# device file named disk-0.iso, disk-1.vmdk, disk-2.vmdk, replace 'disk' with vm name
|
||||
if device_file_name.split('.')[0][0:5] == "disk-":
|
||||
device_file_name = device_file_name.replace('disk', vm_obj.name)
|
||||
temp_target_disk = os.path.join(self.ovf_dir, device_file_name)
|
||||
device_url = deviceUrl.url
|
||||
# if export from ESXi host, replace * with hostname in url
|
||||
# e.g., https://*/ha-nfc/5289bf27-da99-7c0e-3978-8853555deb8c/disk-1.vmdk
|
||||
if '*' in device_url:
|
||||
device_url = device_url.replace('*', self.params['hostname'])
|
||||
if file_download:
|
||||
current_bytes_written = self.download_device_files(headers=headers,
|
||||
temp_target_disk=temp_target_disk,
|
||||
device_url=device_url,
|
||||
lease_updater=lease_updater,
|
||||
total_bytes_written=total_bytes_written,
|
||||
total_bytes_to_write=total_bytes_to_write)
|
||||
total_bytes_written += current_bytes_written
|
||||
ovf_file = vim.OvfManager.OvfFile()
|
||||
ovf_file.deviceId = deviceUrl.key
|
||||
ovf_file.path = device_file_name
|
||||
ovf_file.size = current_bytes_written
|
||||
ovf_files.append(ovf_file)
|
||||
break
|
||||
elif http_nfc_lease.state == vim.HttpNfcLease.State.initializing:
|
||||
sleep(2)
|
||||
continue
|
||||
elif http_nfc_lease.state == vim.HttpNfcLease.State.error:
|
||||
lease_updater.stop()
|
||||
self.module.fail_json(msg='Get HTTP NFC lease error %s.' % http_nfc_lease.state.error[0].fault)
|
||||
|
||||
# generate ovf file
|
||||
ovf_manager = self.content.ovfManager
|
||||
ovf_descriptor_name = vm_obj.name
|
||||
ovf_parameters = vim.OvfManager.CreateDescriptorParams()
|
||||
ovf_parameters.name = ovf_descriptor_name
|
||||
ovf_parameters.ovfFiles = ovf_files
|
||||
vm_descriptor_result = ovf_manager.CreateDescriptor(obj=vm_obj, cdp=ovf_parameters)
|
||||
if vm_descriptor_result.error:
|
||||
http_nfc_lease.HttpNfcLeaseAbort()
|
||||
lease_updater.stop()
|
||||
self.module.fail_json(msg='Create VM descriptor file error %s.' % vm_descriptor_result.error)
|
||||
else:
|
||||
vm_descriptor = vm_descriptor_result.ovfDescriptor
|
||||
ovf_descriptor_path = os.path.join(self.ovf_dir, ovf_descriptor_name + '.ovf')
|
||||
sha256_hash = hashlib.sha256()
|
||||
with open(self.mf_file, 'a') as mf_handle:
|
||||
with open(ovf_descriptor_path, 'w') as handle:
|
||||
handle.write(vm_descriptor)
|
||||
sha256_hash.update(to_bytes(vm_descriptor))
|
||||
mf_handle.write('SHA256(' + os.path.basename(ovf_descriptor_path) + ')= ' + sha256_hash.hexdigest() + '\n')
|
||||
http_nfc_lease.HttpNfcLeaseProgress(100)
|
||||
# self.facts = http_nfc_lease.HttpNfcLeaseGetManifest()
|
||||
http_nfc_lease.HttpNfcLeaseComplete()
|
||||
lease_updater.stop()
|
||||
self.facts.update({'manifest': self.mf_file, 'ovf_file': ovf_descriptor_path})
|
||||
except Exception as err:
|
||||
kwargs = {
|
||||
'changed': False,
|
||||
'failed': True,
|
||||
'msg': "get exception: %s" % to_text(err),
|
||||
}
|
||||
http_nfc_lease.HttpNfcLeaseAbort()
|
||||
lease_updater.stop()
|
||||
return kwargs
|
||||
return {'changed': True, 'failed': False, 'instance': self.facts}
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str', default='ha-datacenter'),
|
||||
export_dir=dict(type='path', required=True),
|
||||
export_with_images=dict(type='bool', default=False),
|
||||
download_timeout=dict(type='int', default=30),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid'],
|
||||
],
|
||||
)
|
||||
pyv = VMwareExportVmOvf(module)
|
||||
vm = pyv.get_vm()
|
||||
if vm:
|
||||
vm_facts = pyv.gather_facts(vm)
|
||||
vm_power_state = vm_facts['hw_power_status'].lower()
|
||||
if vm_power_state != 'poweredoff':
|
||||
module.fail_json(msg='VM state should be poweredoff to export')
|
||||
results = pyv.export_to_ovf_files(vm_obj=vm)
|
||||
module.exit_json(**results)
|
||||
else:
|
||||
module.fail_json(msg='The specified virtual machine not found')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,164 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2019, David Hewitt <davidmhewitt@gmail.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_folder_info
|
||||
short_description: Provides information about folders in a datacenter
|
||||
description:
|
||||
- The module can be used to gather a hierarchical view of the folders that exist within a datacenter
|
||||
version_added: 2.9
|
||||
author:
|
||||
- David Hewitt (@davidmhewitt)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter:
|
||||
description:
|
||||
- Name of the datacenter.
|
||||
required: true
|
||||
type: str
|
||||
aliases: ['datacenter_name']
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Provide information about vCenter folders
|
||||
vmware_folder_info:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: datacenter_name
|
||||
delegate_to: localhost
|
||||
register: vcenter_folder_info
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
folder_info:
|
||||
description:
|
||||
- dict about folders
|
||||
returned: success
|
||||
type: str
|
||||
sample:
|
||||
{
|
||||
"datastoreFolders": {
|
||||
"path": "/DC01/datastore",
|
||||
"subfolders": {
|
||||
"Local Datastores": {
|
||||
"path": "/DC01/datastore/Local Datastores",
|
||||
"subfolders": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"hostFolders": {
|
||||
"path": "/DC01/host",
|
||||
"subfolders": {}
|
||||
},
|
||||
"networkFolders": {
|
||||
"path": "/DC01/network",
|
||||
"subfolders": {}
|
||||
},
|
||||
"vmFolders": {
|
||||
"path": "/DC01/vm",
|
||||
"subfolders": {
|
||||
"Core Infrastructure Servers": {
|
||||
"path": "/DC01/vm/Core Infrastructure Servers",
|
||||
"subfolders": {
|
||||
"Staging Network Services": {
|
||||
"path": "/DC01/vm/Core Infrastructure Servers/Staging Network Services",
|
||||
"subfolders": {}
|
||||
},
|
||||
"VMware": {
|
||||
"path": "/DC01/vm/Core Infrastructure Servers/VMware",
|
||||
"subfolders": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError as import_err:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
|
||||
|
||||
|
||||
class VmwareFolderInfoManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareFolderInfoManager, self).__init__(module)
|
||||
self.dc_name = self.params['datacenter']
|
||||
|
||||
def gather_folder_info(self):
|
||||
datacenter = self.find_datacenter_by_name(self.dc_name)
|
||||
if datacenter is None:
|
||||
self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name)
|
||||
|
||||
folder_trees = {}
|
||||
folder_trees['vmFolders'] = self.build_folder_tree(datacenter.vmFolder, "/%s/vm" % self.dc_name)
|
||||
folder_trees['hostFolders'] = self.build_folder_tree(datacenter.hostFolder, "/%s/host" % self.dc_name)
|
||||
folder_trees['networkFolders'] = self.build_folder_tree(datacenter.networkFolder, "/%s/network" % self.dc_name)
|
||||
folder_trees['datastoreFolders'] = self.build_folder_tree(datacenter.datastoreFolder, "/%s/datastore" % self.dc_name)
|
||||
|
||||
self.module.exit_json(
|
||||
changed=False,
|
||||
folder_info=folder_trees
|
||||
)
|
||||
|
||||
def build_folder_tree(self, folder, path):
|
||||
tree = {
|
||||
'path': path,
|
||||
'subfolders': {}
|
||||
}
|
||||
|
||||
children = None
|
||||
if hasattr(folder, 'childEntity'):
|
||||
children = folder.childEntity
|
||||
|
||||
if children:
|
||||
for child in children:
|
||||
if child == folder:
|
||||
continue
|
||||
if isinstance(child, vim.Folder):
|
||||
ctree = self.build_folder_tree(child, "%s/%s" % (path, child.name))
|
||||
tree['subfolders'][child.name] = dict.copy(ctree)
|
||||
return tree
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str', required=True, aliases=['datacenter_name'])
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vmware_folder_info_mgr = VmwareFolderInfoManager(module)
|
||||
vmware_folder_info_mgr.gather_folder_info()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
File diff suppressed because it is too large
Load Diff
@ -1,214 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_boot_info
|
||||
short_description: Gather info about boot options for the given virtual machine
|
||||
description:
|
||||
- Gather information about boot options for the given virtual machine.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the VM to work with.
|
||||
- This is required if C(uuid) or C(moid) parameter is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
|
||||
- This is required if C(name) or C(moid) parameter is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
name_match:
|
||||
description:
|
||||
- If multiple virtual machines matching the name, use the first or last found.
|
||||
default: 'first'
|
||||
choices: ['first', 'last']
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Gather info about virtual machine's boot order and related parameters
|
||||
vmware_guest_boot_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
name: "{{ vm_name }}"
|
||||
register: vm_boot_order_info
|
||||
|
||||
- name: Gather information about virtual machine's boot order using MoID
|
||||
vmware_guest_boot_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
moid: "vm-42"
|
||||
register: vm_moid_boot_order_info
|
||||
'''
|
||||
|
||||
RETURN = r"""
|
||||
vm_boot_info:
|
||||
description: metadata about boot order of virtual machine
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"current_boot_order": [
|
||||
"floppy",
|
||||
"disk",
|
||||
"ethernet",
|
||||
"cdrom"
|
||||
],
|
||||
"current_boot_delay": 2000,
|
||||
"current_boot_retry_delay": 22300,
|
||||
"current_boot_retry_enabled": true,
|
||||
"current_enter_bios_setup": true,
|
||||
"current_boot_firmware": "bios",
|
||||
"current_secure_boot_enabled": false,
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, VmomiSupport
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class VmBootInfoManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmBootInfoManager, self).__init__(module)
|
||||
self.name = self.params['name']
|
||||
self.uuid = self.params['uuid']
|
||||
self.moid = self.params['moid']
|
||||
self.use_instance_uuid = self.params['use_instance_uuid']
|
||||
self.vm = None
|
||||
|
||||
def _get_vm(self):
|
||||
vms = []
|
||||
|
||||
if self.uuid:
|
||||
if self.use_instance_uuid:
|
||||
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="use_instance_uuid")
|
||||
else:
|
||||
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
|
||||
if vm_obj is None:
|
||||
self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
|
||||
vms = [vm_obj]
|
||||
|
||||
elif self.name:
|
||||
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
|
||||
for temp_vm_object in objects:
|
||||
if temp_vm_object.obj.name == self.name:
|
||||
vms.append(temp_vm_object.obj)
|
||||
|
||||
elif self.moid:
|
||||
vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.module.params['moid'], self.si._stub)
|
||||
if vm_obj:
|
||||
vms.append(vm_obj)
|
||||
|
||||
if vms:
|
||||
if self.params.get('name_match') == 'first':
|
||||
self.vm = vms[0]
|
||||
elif self.params.get('name_match') == 'last':
|
||||
self.vm = vms[-1]
|
||||
else:
|
||||
self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid or self.moid))
|
||||
|
||||
@staticmethod
|
||||
def humanize_boot_order(boot_order):
|
||||
results = []
|
||||
for device in boot_order:
|
||||
if isinstance(device, vim.vm.BootOptions.BootableCdromDevice):
|
||||
results.append('cdrom')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice):
|
||||
results.append('disk')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice):
|
||||
results.append('ethernet')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice):
|
||||
results.append('floppy')
|
||||
return results
|
||||
|
||||
def ensure(self):
|
||||
self._get_vm()
|
||||
|
||||
results = dict()
|
||||
if self.vm and self.vm.config:
|
||||
results = dict(
|
||||
current_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
|
||||
current_boot_delay=self.vm.config.bootOptions.bootDelay,
|
||||
current_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup,
|
||||
current_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled,
|
||||
current_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay,
|
||||
current_boot_firmware=self.vm.config.firmware,
|
||||
current_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=False, vm_boot_info=results)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
name_match=dict(
|
||||
choices=['first', 'last'],
|
||||
default='first'
|
||||
),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
mutually_exclusive=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
pyv = VmBootInfoManager(module)
|
||||
pyv.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,417 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_boot_manager
|
||||
short_description: Manage boot options for the given virtual machine
|
||||
description:
|
||||
- This module can be used to manage boot options for the given virtual machine.
|
||||
version_added: 2.7
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the VM to work with.
|
||||
- This is required if C(uuid) or C(moid) parameter is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
|
||||
- This is required if C(name) or C(moid) parameter is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
boot_order:
|
||||
description:
|
||||
- List of the boot devices.
|
||||
default: []
|
||||
type: list
|
||||
name_match:
|
||||
description:
|
||||
- If multiple virtual machines matching the name, use the first or last found.
|
||||
default: 'first'
|
||||
choices: ['first', 'last']
|
||||
type: str
|
||||
boot_delay:
|
||||
description:
|
||||
- Delay in milliseconds before starting the boot sequence.
|
||||
default: 0
|
||||
type: int
|
||||
enter_bios_setup:
|
||||
description:
|
||||
- If set to C(True), the virtual machine automatically enters BIOS setup the next time it boots.
|
||||
- The virtual machine resets this flag, so that the machine boots proceeds normally.
|
||||
type: 'bool'
|
||||
default: False
|
||||
boot_retry_enabled:
|
||||
description:
|
||||
- If set to C(True), the virtual machine that fails to boot, will try to boot again after C(boot_retry_delay) is expired.
|
||||
- If set to C(False), the virtual machine waits indefinitely for user intervention.
|
||||
type: 'bool'
|
||||
default: False
|
||||
boot_retry_delay:
|
||||
description:
|
||||
- Specify the time in milliseconds between virtual machine boot failure and subsequent attempt to boot again.
|
||||
- If set, will automatically set C(boot_retry_enabled) to C(True) as this parameter is required.
|
||||
default: 0
|
||||
type: int
|
||||
boot_firmware:
|
||||
description:
|
||||
- Choose which firmware should be used to boot the virtual machine.
|
||||
choices: ["bios", "efi"]
|
||||
type: str
|
||||
secure_boot_enabled:
|
||||
description:
|
||||
- Choose if EFI secure boot should be enabled. EFI secure boot can only be enabled with boot_firmware = efi
|
||||
type: 'bool'
|
||||
default: False
|
||||
version_added: '2.8'
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Change virtual machine's boot order and related parameters
|
||||
vmware_guest_boot_manager:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
name: testvm
|
||||
boot_delay: 2000
|
||||
enter_bios_setup: True
|
||||
boot_retry_enabled: True
|
||||
boot_retry_delay: 22300
|
||||
boot_firmware: bios
|
||||
secure_boot_enabled: False
|
||||
boot_order:
|
||||
- floppy
|
||||
- cdrom
|
||||
- ethernet
|
||||
- disk
|
||||
delegate_to: localhost
|
||||
register: vm_boot_order
|
||||
|
||||
- name: Change virtual machine's boot order using Virtual Machine MoID
|
||||
vmware_guest_boot_manager:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
moid: vm-42
|
||||
boot_delay: 2000
|
||||
enter_bios_setup: True
|
||||
boot_retry_enabled: True
|
||||
boot_retry_delay: 22300
|
||||
boot_firmware: bios
|
||||
secure_boot_enabled: False
|
||||
boot_order:
|
||||
- floppy
|
||||
- cdrom
|
||||
- ethernet
|
||||
- disk
|
||||
delegate_to: localhost
|
||||
register: vm_boot_order
|
||||
'''
|
||||
|
||||
RETURN = r"""
|
||||
vm_boot_status:
|
||||
description: metadata about boot order of virtual machine
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"current_boot_order": [
|
||||
"floppy",
|
||||
"disk",
|
||||
"ethernet",
|
||||
"cdrom"
|
||||
],
|
||||
"current_boot_delay": 2000,
|
||||
"current_boot_retry_delay": 22300,
|
||||
"current_boot_retry_enabled": true,
|
||||
"current_enter_bios_setup": true,
|
||||
"current_boot_firmware": "bios",
|
||||
"current_secure_boot_enabled": false,
|
||||
"previous_boot_delay": 10,
|
||||
"previous_boot_retry_delay": 10000,
|
||||
"previous_boot_retry_enabled": true,
|
||||
"previous_enter_bios_setup": false,
|
||||
"previous_boot_firmware": "efi",
|
||||
"previous_secure_boot_enabled": true,
|
||||
"previous_boot_order": [
|
||||
"ethernet",
|
||||
"cdrom",
|
||||
"floppy",
|
||||
"disk"
|
||||
],
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id, wait_for_task, TaskError
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, VmomiSupport
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class VmBootManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmBootManager, self).__init__(module)
|
||||
self.name = self.params['name']
|
||||
self.uuid = self.params['uuid']
|
||||
self.moid = self.params['moid']
|
||||
self.use_instance_uuid = self.params['use_instance_uuid']
|
||||
self.vm = None
|
||||
|
||||
def _get_vm(self):
|
||||
vms = []
|
||||
|
||||
if self.uuid:
|
||||
if self.use_instance_uuid:
|
||||
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="instance_uuid")
|
||||
else:
|
||||
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
|
||||
if vm_obj is None:
|
||||
self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
|
||||
vms = [vm_obj]
|
||||
|
||||
elif self.name:
|
||||
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
|
||||
for temp_vm_object in objects:
|
||||
if temp_vm_object.obj.name == self.name:
|
||||
vms.append(temp_vm_object.obj)
|
||||
|
||||
elif self.moid:
|
||||
vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.module.params['moid'], self.si._stub)
|
||||
if vm_obj:
|
||||
vms.append(vm_obj)
|
||||
|
||||
if vms:
|
||||
if self.params.get('name_match') == 'first':
|
||||
self.vm = vms[0]
|
||||
elif self.params.get('name_match') == 'last':
|
||||
self.vm = vms[-1]
|
||||
else:
|
||||
self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid))
|
||||
|
||||
@staticmethod
|
||||
def humanize_boot_order(boot_order):
|
||||
results = []
|
||||
for device in boot_order:
|
||||
if isinstance(device, vim.vm.BootOptions.BootableCdromDevice):
|
||||
results.append('cdrom')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice):
|
||||
results.append('disk')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice):
|
||||
results.append('ethernet')
|
||||
elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice):
|
||||
results.append('floppy')
|
||||
return results
|
||||
|
||||
def ensure(self):
|
||||
self._get_vm()
|
||||
|
||||
valid_device_strings = ['cdrom', 'disk', 'ethernet', 'floppy']
|
||||
|
||||
boot_order_list = []
|
||||
for device_order in self.params.get('boot_order'):
|
||||
if device_order not in valid_device_strings:
|
||||
self.module.fail_json(msg="Invalid device found [%s], please specify device from ['%s']" % (device_order,
|
||||
"', '".join(valid_device_strings)))
|
||||
if device_order == 'cdrom':
|
||||
first_cdrom = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualCdrom)]
|
||||
if first_cdrom:
|
||||
boot_order_list.append(vim.vm.BootOptions.BootableCdromDevice())
|
||||
elif device_order == 'disk':
|
||||
first_hdd = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
|
||||
if first_hdd:
|
||||
boot_order_list.append(vim.vm.BootOptions.BootableDiskDevice(deviceKey=first_hdd[0].key))
|
||||
elif device_order == 'ethernet':
|
||||
first_ether = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualEthernetCard)]
|
||||
if first_ether:
|
||||
boot_order_list.append(vim.vm.BootOptions.BootableEthernetDevice(deviceKey=first_ether[0].key))
|
||||
elif device_order == 'floppy':
|
||||
first_floppy = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualFloppy)]
|
||||
if first_floppy:
|
||||
boot_order_list.append(vim.vm.BootOptions.BootableFloppyDevice())
|
||||
|
||||
change_needed = False
|
||||
kwargs = dict()
|
||||
if len(boot_order_list) != len(self.vm.config.bootOptions.bootOrder):
|
||||
kwargs.update({'bootOrder': boot_order_list})
|
||||
change_needed = True
|
||||
else:
|
||||
for i in range(0, len(boot_order_list)):
|
||||
boot_device_type = type(boot_order_list[i])
|
||||
vm_boot_device_type = type(self.vm.config.bootOptions.bootOrder[i])
|
||||
if boot_device_type != vm_boot_device_type:
|
||||
kwargs.update({'bootOrder': boot_order_list})
|
||||
change_needed = True
|
||||
|
||||
if self.vm.config.bootOptions.bootDelay != self.params.get('boot_delay'):
|
||||
kwargs.update({'bootDelay': self.params.get('boot_delay')})
|
||||
change_needed = True
|
||||
|
||||
if self.vm.config.bootOptions.enterBIOSSetup != self.params.get('enter_bios_setup'):
|
||||
kwargs.update({'enterBIOSSetup': self.params.get('enter_bios_setup')})
|
||||
change_needed = True
|
||||
|
||||
if self.vm.config.bootOptions.bootRetryEnabled != self.params.get('boot_retry_enabled'):
|
||||
kwargs.update({'bootRetryEnabled': self.params.get('boot_retry_enabled')})
|
||||
change_needed = True
|
||||
|
||||
if self.vm.config.bootOptions.bootRetryDelay != self.params.get('boot_retry_delay'):
|
||||
if not self.vm.config.bootOptions.bootRetryEnabled:
|
||||
kwargs.update({'bootRetryEnabled': True})
|
||||
kwargs.update({'bootRetryDelay': self.params.get('boot_retry_delay')})
|
||||
change_needed = True
|
||||
|
||||
boot_firmware_required = False
|
||||
if self.vm.config.firmware != self.params.get('boot_firmware'):
|
||||
change_needed = True
|
||||
boot_firmware_required = True
|
||||
|
||||
if self.vm.config.bootOptions.efiSecureBootEnabled != self.params.get('secure_boot_enabled'):
|
||||
if self.params.get('secure_boot_enabled') and self.params.get('boot_firmware') == "bios":
|
||||
self.module.fail_json(msg="EFI secure boot cannot be enabled when boot_firmware = bios, but both are specified")
|
||||
|
||||
# If the user is not specifying boot_firmware, make sure they aren't trying to enable it on a
|
||||
# system with boot_firmware already set to 'bios'
|
||||
if self.params.get('secure_boot_enabled') and \
|
||||
self.params.get('boot_firmware') is None and \
|
||||
self.vm.config.firmware == 'bios':
|
||||
self.module.fail_json(msg="EFI secure boot cannot be enabled when boot_firmware = bios. VM's boot_firmware currently set to bios")
|
||||
|
||||
kwargs.update({'efiSecureBootEnabled': self.params.get('secure_boot_enabled')})
|
||||
change_needed = True
|
||||
|
||||
changed = False
|
||||
results = dict(
|
||||
previous_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
|
||||
previous_boot_delay=self.vm.config.bootOptions.bootDelay,
|
||||
previous_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup,
|
||||
previous_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled,
|
||||
previous_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay,
|
||||
previous_boot_firmware=self.vm.config.firmware,
|
||||
previous_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled,
|
||||
current_boot_order=[],
|
||||
)
|
||||
|
||||
if change_needed:
|
||||
vm_conf = vim.vm.ConfigSpec()
|
||||
vm_conf.bootOptions = vim.vm.BootOptions(**kwargs)
|
||||
if boot_firmware_required:
|
||||
vm_conf.firmware = self.params.get('boot_firmware')
|
||||
task = self.vm.ReconfigVM_Task(vm_conf)
|
||||
|
||||
try:
|
||||
changed, result = wait_for_task(task)
|
||||
except TaskError as e:
|
||||
self.module.fail_json(msg="Failed to perform reconfigure virtual"
|
||||
" machine %s for boot order due to: %s" % (self.name or self.uuid,
|
||||
to_native(e)))
|
||||
|
||||
results.update(
|
||||
{
|
||||
'current_boot_order': self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
|
||||
'current_boot_delay': self.vm.config.bootOptions.bootDelay,
|
||||
'current_enter_bios_setup': self.vm.config.bootOptions.enterBIOSSetup,
|
||||
'current_boot_retry_enabled': self.vm.config.bootOptions.bootRetryEnabled,
|
||||
'current_boot_retry_delay': self.vm.config.bootOptions.bootRetryDelay,
|
||||
'current_boot_firmware': self.vm.config.firmware,
|
||||
'current_secure_boot_enabled': self.vm.config.bootOptions.efiSecureBootEnabled,
|
||||
}
|
||||
)
|
||||
|
||||
self.module.exit_json(changed=changed, vm_boot_status=results)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
boot_order=dict(
|
||||
type='list',
|
||||
default=[],
|
||||
),
|
||||
name_match=dict(
|
||||
choices=['first', 'last'],
|
||||
default='first'
|
||||
),
|
||||
boot_delay=dict(
|
||||
type='int',
|
||||
default=0,
|
||||
),
|
||||
enter_bios_setup=dict(
|
||||
type='bool',
|
||||
default=False,
|
||||
),
|
||||
boot_retry_enabled=dict(
|
||||
type='bool',
|
||||
default=False,
|
||||
),
|
||||
boot_retry_delay=dict(
|
||||
type='int',
|
||||
default=0,
|
||||
),
|
||||
secure_boot_enabled=dict(
|
||||
type='bool',
|
||||
default=False,
|
||||
),
|
||||
boot_firmware=dict(
|
||||
type='str',
|
||||
choices=['efi', 'bios'],
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
mutually_exclusive=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
)
|
||||
|
||||
pyv = VmBootManager(module)
|
||||
pyv.ensure()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,553 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Ansible Project
|
||||
# Copyright: (c) 2019, VMware, Inc. All Rights Reserved.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_controller
|
||||
short_description: Manage disk or USB controllers related to virtual machine in given vCenter infrastructure
|
||||
description:
|
||||
- This module can be used to add, remove disk controllers or USB controllers belonging to given virtual machine.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: '2.10'
|
||||
author:
|
||||
- Diane Wang (@Tomorrow9) <dianew@vmware.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.0, 6.5 and 6.7
|
||||
requirements:
|
||||
- "python >= 2.7"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine.
|
||||
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to gather facts if known, this is VMware's unique identifier.
|
||||
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- This is a required parameter, only if multiple VMs are found with same name.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter name to which virtual machine belongs to.
|
||||
default: ha-datacenter
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
controllers:
|
||||
description:
|
||||
- A list of disk or USB controllers to add or remove.
|
||||
- Total 4 disk controllers with the same type are allowed per VM.
|
||||
- Total 2 USB controllers are allowed per VM, 1 USB 2.0 and 1 USB 3.0 or 3.1.
|
||||
- For specific guest OS, supported controller types please refer to VMware Compatibility Guide.
|
||||
suboptions:
|
||||
controller_number:
|
||||
description:
|
||||
- Disk controller bus number. When C(state) is set to C(absent), this parameter is required.
|
||||
- When C(type) set to C(usb2) or C(usb3), this parameter is not required.
|
||||
type: int
|
||||
choices:
|
||||
- 0
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
type:
|
||||
description:
|
||||
- Type of disk or USB controller.
|
||||
- From vSphere 6.5 and virtual machine with hardware version 13, C(nvme) controller starts to be supported.
|
||||
required: true
|
||||
type: str
|
||||
choices:
|
||||
- buslogic
|
||||
- lsilogic
|
||||
- lsilogicsas
|
||||
- paravirtual
|
||||
- sata
|
||||
- nvme
|
||||
- usb2
|
||||
- usb3
|
||||
state:
|
||||
description:
|
||||
- Add new controller or remove specified existing controller.
|
||||
- If C(state) is set to C(absent), the specified controller will be removed from virtual machine when there is no disk or device attaching to it.
|
||||
- If specified controller is removed or not exist, no action will be taken only warning message.
|
||||
- If C(state) is set to C(present), new controller with specified type will be added.
|
||||
- If the number of controller with specified controller type reaches it's maximum, no action will be taken only warning message.
|
||||
required: true
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
type: list
|
||||
gather_disk_controller_facts:
|
||||
description:
|
||||
- Whether to collect existing disk and USB controllers facts only.
|
||||
- When this parameter is set to C(True), C(controllers) parameter will be ignored.
|
||||
type: bool
|
||||
sleep_time:
|
||||
description:
|
||||
- 'The sleep time in seconds after VM reconfigure task completes, used when not get the updated VM controller
|
||||
facts after VM reconfiguration.'
|
||||
- This parameter is not required. Maximum value is 600.
|
||||
default: 10
|
||||
type: int
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add disk and USB 3.0 controllers for virtual machine located by name
|
||||
vmware_guest_controller:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
name: test_VM
|
||||
controllers:
|
||||
- state: present
|
||||
type: sata
|
||||
- state: present
|
||||
type: nvme
|
||||
- state: present
|
||||
type: usb3
|
||||
delegate_to: localhost
|
||||
register: disk_controller_facts
|
||||
|
||||
- name: Remove disk controllers and USB 2.0 from virtual machine located by moid
|
||||
vmware_guest_controller:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
moid: vm-33
|
||||
controllers:
|
||||
- state: absent
|
||||
controller_number: 1
|
||||
type: sata
|
||||
- state: absent
|
||||
controller_number: 0
|
||||
type: nvme
|
||||
- state: absent
|
||||
type: usb2
|
||||
delegate_to: localhost
|
||||
register: disk_controller_facts
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
disk_controller_status:
|
||||
description: metadata about the virtual machine's existing disk controllers or after adding or removing operation
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"nvme": {
|
||||
"0": {
|
||||
"controller_busnumber": 0,
|
||||
"controller_controllerkey": 100,
|
||||
"controller_devicekey": 31000,
|
||||
"controller_disks_devicekey": [],
|
||||
"controller_label": "NVME controller 0",
|
||||
"controller_summary": "NVME controller 0",
|
||||
"controller_unitnumber": 30
|
||||
}
|
||||
},
|
||||
"sata": {
|
||||
"0": {
|
||||
"controller_busnumber": 0,
|
||||
"controller_controllerkey": 100,
|
||||
"controller_devicekey": 15000,
|
||||
"controller_disks_devicekey": [
|
||||
16000,
|
||||
16001
|
||||
],
|
||||
"controller_label": "SATA controller 0",
|
||||
"controller_summary": "AHCI",
|
||||
"controller_unitnumber": 24
|
||||
}
|
||||
},
|
||||
"scsi": {
|
||||
"0": {
|
||||
"controller_busnumber": 0,
|
||||
"controller_controllerkey": 100,
|
||||
"controller_devicekey": 1000,
|
||||
"controller_disks_devicekey": [
|
||||
2000
|
||||
],
|
||||
"controller_label": "SCSI controller 0",
|
||||
"controller_summary": "LSI Logic SAS",
|
||||
"controller_unitnumber": 3
|
||||
},
|
||||
"1": {
|
||||
"controller_busnumber": 1,
|
||||
"controller_controllerkey": 100,
|
||||
"controller_devicekey": 1001,
|
||||
"controller_disks_devicekey": [],
|
||||
"controller_label": "SCSI controller 1",
|
||||
"controller_summary": "VMware paravirtual SCSI",
|
||||
"controller_unitnumber": 4
|
||||
}
|
||||
},
|
||||
"usb2": {
|
||||
"0": {
|
||||
"controller_busnumber": 0,
|
||||
"controller_controllerkey": 100,
|
||||
"controller_devicekey": 7000,
|
||||
"controller_disks_devicekey": [],
|
||||
"controller_label": "USB Controller",
|
||||
"controller_summary": "Auto connect Disabled",
|
||||
"controller_unitnumber": 22
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from random import randint
|
||||
import time
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
self.sleep_time = 10
|
||||
self.scsi_device_type = dict(lsilogic=vim.vm.device.VirtualLsiLogicController,
|
||||
paravirtual=vim.vm.device.ParaVirtualSCSIController,
|
||||
buslogic=vim.vm.device.VirtualBusLogicController,
|
||||
lsilogicsas=vim.vm.device.VirtualLsiLogicSASController)
|
||||
self.sata_device_type = vim.vm.device.VirtualAHCIController
|
||||
self.nvme_device_type = vim.vm.device.VirtualNVMEController
|
||||
self.usb_device_type = dict(usb2=vim.vm.device.VirtualUSBController,
|
||||
usb3=vim.vm.device.VirtualUSBXHCIController)
|
||||
self.controller_types = dict(self.scsi_device_type, sata=self.sata_device_type, nvme=self.nvme_device_type)
|
||||
self.controller_types.update(self.usb_device_type)
|
||||
self.config_spec = vim.vm.ConfigSpec()
|
||||
self.config_spec.deviceChange = []
|
||||
self.change_detected = False
|
||||
self.disk_ctl_bus_num_list = dict(sata=list(range(0, 4)),
|
||||
nvme=list(range(0, 4)),
|
||||
scsi=list(range(0, 4)))
|
||||
|
||||
def get_unused_ctl_bus_number(self):
|
||||
"""
|
||||
Get gid of occupied bus numbers of each type of disk controller, update the available bus number list
|
||||
"""
|
||||
for device in self.current_vm_obj.config.hardware.device:
|
||||
if isinstance(device, self.sata_device_type):
|
||||
if len(self.disk_ctl_bus_num_list['sata']) != 0:
|
||||
self.disk_ctl_bus_num_list['sata'].remove(device.busNumber)
|
||||
if isinstance(device, self.nvme_device_type):
|
||||
if len(self.disk_ctl_bus_num_list['nvme']) != 0:
|
||||
self.disk_ctl_bus_num_list['nvme'].remove(device.busNumber)
|
||||
if isinstance(device, tuple(self.scsi_device_type.values())):
|
||||
if len(self.disk_ctl_bus_num_list['scsi']) != 0:
|
||||
self.disk_ctl_bus_num_list['scsi'].remove(device.busNumber)
|
||||
|
||||
def check_ctl_disk_exist(self, ctl_type=None, bus_number=None):
|
||||
"""
|
||||
Check if controller of specified type exists and if there is disk attaching to it
|
||||
Return: Specified controller device, True or False of attaching disks
|
||||
"""
|
||||
ctl_specified = None
|
||||
disks_attached_exist = False
|
||||
if ctl_type is None:
|
||||
return ctl_specified, disks_attached_exist
|
||||
|
||||
for device in self.current_vm_obj.config.hardware.device:
|
||||
if isinstance(device, self.controller_types.get(ctl_type)):
|
||||
if bus_number is not None and device.busNumber != bus_number:
|
||||
continue
|
||||
ctl_specified = device
|
||||
if len(device.device) != 0:
|
||||
disks_attached_exist = True
|
||||
break
|
||||
|
||||
return ctl_specified, disks_attached_exist
|
||||
|
||||
def create_controller(self, ctl_type, bus_number=0):
|
||||
"""
|
||||
Create new disk or USB controller with specified type
|
||||
Args:
|
||||
ctl_type: controller type
|
||||
bus_number: disk controller bus number
|
||||
|
||||
Return: Virtual device spec for virtual controller
|
||||
"""
|
||||
disk_ctl = vim.vm.device.VirtualDeviceSpec()
|
||||
disk_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
|
||||
if ctl_type == 'sata':
|
||||
disk_ctl.device = self.sata_device_type()
|
||||
disk_ctl.device.key = -randint(15000, 19999)
|
||||
elif ctl_type == 'nvme':
|
||||
disk_ctl.device = self.nvme_device_type()
|
||||
disk_ctl.device.key = -randint(31000, 39999)
|
||||
elif ctl_type in self.scsi_device_type.keys():
|
||||
disk_ctl.device = self.scsi_device_type.get(ctl_type)()
|
||||
disk_ctl.device.key = -randint(1000, 6999)
|
||||
disk_ctl.device.hotAddRemove = True
|
||||
disk_ctl.device.sharedBus = 'noSharing'
|
||||
disk_ctl.device.scsiCtlrUnitNumber = 7
|
||||
elif ctl_type in self.usb_device_type.keys():
|
||||
disk_ctl.device = self.usb_device_type.get(ctl_type)()
|
||||
if ctl_type == 'usb2':
|
||||
disk_ctl.device.key = 7000
|
||||
elif ctl_type == 'usb3':
|
||||
disk_ctl.device.key = 14000
|
||||
|
||||
disk_ctl.device.deviceInfo = vim.Description()
|
||||
disk_ctl.device.busNumber = bus_number
|
||||
|
||||
return disk_ctl
|
||||
|
||||
def gather_disk_controller_facts(self):
|
||||
"""
|
||||
Gather existing controller facts
|
||||
|
||||
Return: A dictionary of each type controller facts
|
||||
"""
|
||||
disk_ctl_facts = dict(
|
||||
scsi=dict(),
|
||||
sata=dict(),
|
||||
nvme=dict(),
|
||||
usb2=dict(),
|
||||
usb3=dict()
|
||||
)
|
||||
for device in self.current_vm_obj.config.hardware.device:
|
||||
ctl_facts_dict = dict()
|
||||
if isinstance(device, tuple(self.controller_types.values())):
|
||||
ctl_facts_dict[device.busNumber] = dict(
|
||||
controller_summary=device.deviceInfo.summary,
|
||||
controller_label=device.deviceInfo.label,
|
||||
controller_busnumber=device.busNumber,
|
||||
controller_controllerkey=device.controllerKey,
|
||||
controller_devicekey=device.key,
|
||||
controller_unitnumber=device.unitNumber,
|
||||
controller_disks_devicekey=device.device,
|
||||
)
|
||||
if isinstance(device, tuple(self.scsi_device_type.values())):
|
||||
disk_ctl_facts['scsi'].update(ctl_facts_dict)
|
||||
if isinstance(device, self.nvme_device_type):
|
||||
disk_ctl_facts['nvme'].update(ctl_facts_dict)
|
||||
if isinstance(device, self.sata_device_type):
|
||||
disk_ctl_facts['sata'].update(ctl_facts_dict)
|
||||
if isinstance(device, self.usb_device_type.get('usb2')):
|
||||
disk_ctl_facts['usb2'].update(ctl_facts_dict)
|
||||
if isinstance(device, self.usb_device_type.get('usb3')):
|
||||
disk_ctl_facts['usb3'].update(ctl_facts_dict)
|
||||
|
||||
return disk_ctl_facts
|
||||
|
||||
def sanitize_disk_controller_config(self):
|
||||
"""
|
||||
Check correctness of controller configuration provided by user
|
||||
|
||||
Return: A list of dictionary with checked controller configured
|
||||
"""
|
||||
if not self.params.get('controllers'):
|
||||
self.module.exit_json(changed=False, msg="No controller provided for virtual"
|
||||
" machine '%s' for management." % self.current_vm_obj.name)
|
||||
if 10 != self.params.get('sleep_time') <= 300:
|
||||
self.sleep_time = self.params.get('sleep_time')
|
||||
exec_get_unused_ctl_bus_number = False
|
||||
controller_config = self.params.get('controllers')
|
||||
for ctl_config in controller_config:
|
||||
if ctl_config:
|
||||
if ctl_config['type'] not in self.usb_device_type.keys():
|
||||
if ctl_config['state'] == 'absent' and ctl_config.get('controller_number') is None:
|
||||
self.module.fail_json(msg="Disk controller number is required when removing it.")
|
||||
if ctl_config['state'] == 'present' and not exec_get_unused_ctl_bus_number:
|
||||
self.get_unused_ctl_bus_number()
|
||||
exec_get_unused_ctl_bus_number = True
|
||||
# starts from hardware version 13 nvme controller supported
|
||||
if ctl_config['state'] == 'present' and ctl_config['type'] == 'nvme':
|
||||
vm_hwv = int(self.current_vm_obj.config.version.split('-')[1])
|
||||
if vm_hwv < 13:
|
||||
self.module.fail_json(msg="Can not create new NVMe disk controller due to VM hardware version"
|
||||
" is '%s', not >= 13." % vm_hwv)
|
||||
if exec_get_unused_ctl_bus_number:
|
||||
for ctl_config in controller_config:
|
||||
if ctl_config and ctl_config['state'] == 'present' and ctl_config['type'] not in self.usb_device_type.keys():
|
||||
if ctl_config['type'] in self.scsi_device_type.keys():
|
||||
if len(self.disk_ctl_bus_num_list['scsi']) != 0:
|
||||
ctl_config['controller_number'] = self.disk_ctl_bus_num_list['scsi'].pop(0)
|
||||
else:
|
||||
ctl_config['controller_number'] = None
|
||||
elif ctl_config['type'] == 'sata' or ctl_config['type'] == 'nvme':
|
||||
if len(self.disk_ctl_bus_num_list.get(ctl_config['type'])) != 0:
|
||||
ctl_config['controller_number'] = self.disk_ctl_bus_num_list.get(ctl_config['type']).pop(0)
|
||||
else:
|
||||
ctl_config['controller_number'] = None
|
||||
|
||||
return controller_config
|
||||
|
||||
def configure_disk_controllers(self):
|
||||
"""
|
||||
Do disk controller management, add or remove
|
||||
|
||||
Return: Operation result
|
||||
"""
|
||||
if self.params['gather_disk_controller_facts']:
|
||||
results = {'changed': False, 'failed': False, 'disk_controller_data': self.gather_disk_controller_facts()}
|
||||
return results
|
||||
|
||||
controller_config = self.sanitize_disk_controller_config()
|
||||
for disk_ctl_config in controller_config:
|
||||
if disk_ctl_config and disk_ctl_config['state'] == 'present':
|
||||
# create new USB controller, bus number is 0
|
||||
if disk_ctl_config['type'] in self.usb_device_type.keys():
|
||||
usb_exists, has_disks_attached = self.check_ctl_disk_exist(disk_ctl_config['type'])
|
||||
if usb_exists:
|
||||
self.module.warn("'%s' USB controller already exists, can not add more." % disk_ctl_config['type'])
|
||||
else:
|
||||
disk_controller_new = self.create_controller(disk_ctl_config['type'])
|
||||
self.config_spec.deviceChange.append(disk_controller_new)
|
||||
self.change_detected = True
|
||||
# create other disk controller
|
||||
else:
|
||||
if disk_ctl_config.get('controller_number') is not None:
|
||||
disk_controller_new = self.create_controller(disk_ctl_config['type'], disk_ctl_config.get('controller_number'))
|
||||
self.config_spec.deviceChange.append(disk_controller_new)
|
||||
self.change_detected = True
|
||||
else:
|
||||
if disk_ctl_config['type'] in self.scsi_device_type.keys():
|
||||
self.module.warn("Already 4 SCSI controllers, can not add new '%s' controller." % disk_ctl_config['type'])
|
||||
else:
|
||||
self.module.warn("Already 4 '%s' controllers, can not add new one." % disk_ctl_config['type'])
|
||||
elif disk_ctl_config and disk_ctl_config['state'] == 'absent':
|
||||
existing_ctl, has_disks_attached = self.check_ctl_disk_exist(disk_ctl_config['type'], disk_ctl_config.get('controller_number'))
|
||||
if existing_ctl is not None:
|
||||
if not has_disks_attached:
|
||||
ctl_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
ctl_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
|
||||
ctl_spec.device = existing_ctl
|
||||
self.config_spec.deviceChange.append(ctl_spec)
|
||||
self.change_detected = True
|
||||
else:
|
||||
self.module.warn("Can not remove specified controller, type '%s', bus number '%s',"
|
||||
" there are disks attaching to it." % (disk_ctl_config['type'], disk_ctl_config.get('controller_number')))
|
||||
else:
|
||||
self.module.warn("Not find specified controller to remove, type '%s', bus number '%s'."
|
||||
% (disk_ctl_config['type'], disk_ctl_config.get('controller_number')))
|
||||
|
||||
try:
|
||||
task = self.current_vm_obj.ReconfigVM_Task(spec=self.config_spec)
|
||||
wait_for_task(task)
|
||||
except vim.fault.InvalidDeviceSpec as e:
|
||||
self.module.fail_json(msg="Failed to configure controller on given virtual machine due to invalid"
|
||||
" device spec : %s" % to_native(e.msg),
|
||||
details="Please check ESXi server logs for more details.")
|
||||
except vim.fault.RestrictedVersion as e:
|
||||
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
|
||||
" product versioning restrictions: %s" % to_native(e.msg))
|
||||
if task.info.state == 'error':
|
||||
results = {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
|
||||
else:
|
||||
if self.change_detected:
|
||||
time.sleep(self.sleep_time)
|
||||
results = {'changed': self.change_detected, 'failed': False, 'disk_controller_data': self.gather_disk_controller_facts()}
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str', default='ha-datacenter'),
|
||||
controllers=dict(
|
||||
type='list',
|
||||
elements='dict',
|
||||
required=False,
|
||||
options=dict(
|
||||
state=dict(type='str', choices=['present', 'absent'], required=True),
|
||||
controller_number=dict(type='int', choices=[0, 1, 2, 3], required=False),
|
||||
type=dict(
|
||||
type='str',
|
||||
choices=['sata', 'nvme', 'lsilogic', 'buslogic', 'lsilogicsas', 'paravirtual', 'usb2', 'usb3'],
|
||||
required=True,
|
||||
),
|
||||
),
|
||||
),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
gather_disk_controller_facts=dict(type='bool', default=False),
|
||||
sleep_time=dict(type='int', default=10),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
]
|
||||
)
|
||||
|
||||
if module.params['folder']:
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['folder'] = module.params['folder'].rstrip('/')
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
# Check if the VM exists before continuing
|
||||
vm = pyv.get_vm()
|
||||
|
||||
if not vm:
|
||||
# We unable to find the virtual machine user specified
|
||||
# Bail out
|
||||
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
|
||||
module.fail_json(msg="Unable to manage disk or USB controllers for non-existing virtual machine '%s'." % vm_id)
|
||||
|
||||
# VM exists
|
||||
result = pyv.configure_disk_controllers()
|
||||
if result['failed']:
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,429 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2020, Anusha Hegde <anushah@vmware.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: vmware_guest_cross_vc_clone
|
||||
|
||||
short_description: Cross-vCenter VM/template clone
|
||||
|
||||
version_added: '2.10'
|
||||
|
||||
description:
|
||||
- 'This module can be used for Cross-vCenter vm/template clone'
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine or template.
|
||||
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the vm/template instance to clone from, this is VMware's unique identifier.
|
||||
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the vm/template instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
destination_vm_name:
|
||||
description:
|
||||
- The name of the cloned VM.
|
||||
type: str
|
||||
required: True
|
||||
destination_vcenter:
|
||||
description:
|
||||
- The hostname or IP address of the destination VCenter.
|
||||
type: str
|
||||
required: True
|
||||
destination_vcenter_username:
|
||||
description:
|
||||
- The username of the destination VCenter.
|
||||
type: str
|
||||
required: True
|
||||
destination_vcenter_password:
|
||||
description:
|
||||
- The password of the destination VCenter.
|
||||
type: str
|
||||
required: True
|
||||
destination_vcenter_port:
|
||||
description:
|
||||
- The port to establish connection in the destination VCenter.
|
||||
type: int
|
||||
default: 443
|
||||
destination_vcenter_validate_certs:
|
||||
description:
|
||||
- Parameter to indicate if certification validation needs to be done on destination VCenter.
|
||||
type: bool
|
||||
default: False
|
||||
destination_host:
|
||||
description:
|
||||
- The name of the destination host.
|
||||
type: str
|
||||
required: True
|
||||
destination_datastore:
|
||||
description:
|
||||
- The name of the destination datastore or the datastore cluster.
|
||||
- If datastore cluster name is specified, we will find the Storage DRS recommended datastore in that cluster.
|
||||
type: str
|
||||
required: True
|
||||
destination_vm_folder:
|
||||
description:
|
||||
- Destination folder, absolute path to deploy the cloned vm.
|
||||
- This parameter is case sensitive.
|
||||
- 'Examples:'
|
||||
- ' folder: vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
type: str
|
||||
required: True
|
||||
destination_resource_pool:
|
||||
description:
|
||||
- Destination resource pool.
|
||||
- If not provided, the destination host's parent's resource pool will be used.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The state of Virtual Machine deployed.
|
||||
- If set to C(present) and VM does not exists, then VM is created.
|
||||
- If set to C(present) and VM exists, no action is taken.
|
||||
- If set to C(poweredon) and VM does not exists, then VM is created with powered on state.
|
||||
- If set to C(poweredon) and VM exists, no action is taken.
|
||||
type: str
|
||||
required: False
|
||||
default: 'present'
|
||||
choices: [ 'present', 'poweredon' ]
|
||||
|
||||
extends_documentation_fragment:
|
||||
- vmware.documentation
|
||||
|
||||
author:
|
||||
- Anusha Hegde (@anusha94)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Clone template
|
||||
- name: clone a template across VC
|
||||
vmware_guest_cross_vc_clone:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
validate_certs: no
|
||||
name: "test_vm1"
|
||||
destination_vm_name: "cloned_vm_from_template"
|
||||
destination_vcenter: '{{ destination_vcenter_hostname }}'
|
||||
destination_vcenter_username: '{{ destination_vcenter_username }}'
|
||||
destination_vcenter_password: '{{ destination_vcenter_password }}'
|
||||
destination_vcenter_port: '{{ destination_vcenter_port }}'
|
||||
destination_vcenter_validate_certs: '{{ destination_vcenter_validate_certs }}'
|
||||
destination_host: '{{ destination_esxi }}'
|
||||
destination_datastore: '{{ destination_datastore }}'
|
||||
destination_vm_folder: '{{ destination_vm_folder }}'
|
||||
state: present
|
||||
register: cross_vc_clone_from_template
|
||||
|
||||
- name: clone a VM across VC
|
||||
vmware_guest_cross_vc_clone:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
name: "test_vm1"
|
||||
destination_vm_name: "cloned_vm_from_vm"
|
||||
destination_vcenter: '{{ destination_vcenter_hostname }}'
|
||||
destination_vcenter_username: '{{ destination_vcenter_username }}'
|
||||
destination_vcenter_password: '{{ destination_vcenter_password }}'
|
||||
destination_host: '{{ destination_esxi }}'
|
||||
destination_datastore: '{{ destination_datastore }}'
|
||||
destination_vm_folder: '{{ destination_vm_folder }}'
|
||||
state: poweredon
|
||||
register: cross_vc_clone_from_vm
|
||||
|
||||
- name: check_mode support
|
||||
vmware_guest_cross_vc_clone:
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
name: "test_vm1"
|
||||
destination_vm_name: "cloned_vm_from_vm"
|
||||
destination_vcenter: '{{ destination_vcenter_hostname }}'
|
||||
destination_vcenter_username: '{{ destination_vcenter_username }}'
|
||||
destination_vcenter_password: '{{ destination_vcenter_password }}'
|
||||
destination_host: '{{ destination_esxi }}'
|
||||
destination_datastore: '{{ destination_datastore }}'
|
||||
destination_vm_folder: '{{ destination_vm_folder }}'
|
||||
check_mode: yes
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
vm_info:
|
||||
description: metadata about the virtual machine
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"vm_name": "",
|
||||
"vcenter": "",
|
||||
"host": "",
|
||||
"datastore": "",
|
||||
"vm_folder": "",
|
||||
"power_on": ""
|
||||
}
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import (PyVmomi, find_hostsystem_by_name,
|
||||
find_datastore_by_name,
|
||||
find_folder_by_name, find_vm_by_name,
|
||||
connect_to_api, vmware_argument_spec,
|
||||
gather_vm_facts, find_obj, find_resource_pool_by_name,
|
||||
wait_for_task, TaskError)
|
||||
from ansible.module_utils._text import to_native
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class CrossVCCloneManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(CrossVCCloneManager, self).__init__(module)
|
||||
self.config_spec = vim.vm.ConfigSpec()
|
||||
self.clone_spec = vim.vm.CloneSpec()
|
||||
self.relocate_spec = vim.vm.RelocateSpec()
|
||||
self.service_locator = vim.ServiceLocator()
|
||||
self.destination_vcenter = self.params['destination_vcenter']
|
||||
self.destination_vcenter_username = self.params['destination_vcenter_username']
|
||||
self.destination_vcenter_password = self.params['destination_vcenter_password']
|
||||
self.destination_vcenter_port = self.params.get('port', 443)
|
||||
self.destination_vcenter_validate_certs = self.params.get('destination_vcenter_validate_certs', None)
|
||||
|
||||
def get_new_vm_info(self, vm):
|
||||
# to check if vm has been cloned in the destination vc
|
||||
# query for the vm in destination vc
|
||||
# get the host and datastore info
|
||||
# get the power status of the newly cloned vm
|
||||
info = {}
|
||||
vm_obj = find_vm_by_name(content=self.destination_content, vm_name=vm)
|
||||
if vm_obj is None:
|
||||
self.module.fail_json(msg="Newly cloned VM is not found in the destination VCenter")
|
||||
else:
|
||||
vm_facts = gather_vm_facts(self.destination_content, vm_obj)
|
||||
info['vm_name'] = vm
|
||||
info['vcenter'] = self.destination_vcenter
|
||||
info['host'] = vm_facts['hw_esxi_host']
|
||||
info['datastore'] = vm_facts['hw_datastores']
|
||||
info['vm_folder'] = vm_facts['hw_folder']
|
||||
info['power_on'] = vm_facts['hw_power_status']
|
||||
return info
|
||||
|
||||
def clone(self):
|
||||
# clone the vm/template on destination VC
|
||||
vm_folder = find_folder_by_name(content=self.destination_content, folder_name=self.params['destination_vm_folder'])
|
||||
if not vm_folder:
|
||||
self.module.fail_json(msg="Destination folder does not exist. Please refer to the documentation to correctly specify the folder.")
|
||||
vm_name = self.params['destination_vm_name']
|
||||
task = self.vm_obj.Clone(folder=vm_folder, name=vm_name, spec=self.clone_spec)
|
||||
wait_for_task(task)
|
||||
if task.info.state == 'error':
|
||||
result = {'changed': False, 'failed': True, 'msg': task.info.error.msg}
|
||||
else:
|
||||
vm_info = self.get_new_vm_info(vm_name)
|
||||
result = {'changed': True, 'failed': False, 'vm_info': vm_info}
|
||||
return result
|
||||
|
||||
def sanitize_params(self):
|
||||
'''
|
||||
this method is used to verify user provided parameters
|
||||
'''
|
||||
self.vm_obj = self.get_vm()
|
||||
if self.vm_obj is None:
|
||||
vm_id = self.vm_uuid or self.vm_name or self.moid
|
||||
self.module.fail_json(msg="Failed to find the VM/template with %s" % vm_id)
|
||||
|
||||
# connect to destination VC
|
||||
self.destination_content = connect_to_api(
|
||||
self.module,
|
||||
hostname=self.destination_vcenter,
|
||||
username=self.destination_vcenter_username,
|
||||
password=self.destination_vcenter_password,
|
||||
port=self.destination_vcenter_port,
|
||||
validate_certs=self.destination_vcenter_validate_certs)
|
||||
|
||||
# Check if vm name already exists in the destination VC
|
||||
vm = find_vm_by_name(content=self.destination_content, vm_name=self.params['destination_vm_name'])
|
||||
if vm:
|
||||
self.module.exit_json(changed=False, msg="A VM with the given name already exists")
|
||||
|
||||
datastore_name = self.params['destination_datastore']
|
||||
datastore_cluster = find_obj(self.destination_content, [vim.StoragePod], datastore_name)
|
||||
if datastore_cluster:
|
||||
# If user specified datastore cluster so get recommended datastore
|
||||
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
|
||||
# Check if get_recommended_datastore or user specified datastore exists or not
|
||||
self.destination_datastore = find_datastore_by_name(content=self.destination_content, datastore_name=datastore_name)
|
||||
if self.destination_datastore is None:
|
||||
self.module.fail_json(msg="Destination datastore not found.")
|
||||
|
||||
self.destination_host = find_hostsystem_by_name(content=self.destination_content, hostname=self.params['destination_host'])
|
||||
if self.destination_host is None:
|
||||
self.module.fail_json(msg="Destination host not found.")
|
||||
|
||||
if self.params['destination_resource_pool']:
|
||||
self.destination_resource_pool = find_resource_pool_by_name(
|
||||
content=self.destination_content,
|
||||
resource_pool_name=self.params['destination_resource_pool'])
|
||||
else:
|
||||
self.destination_resource_pool = self.destination_host.parent.resourcePool
|
||||
|
||||
def populate_specs(self):
|
||||
# populate service locator
|
||||
self.service_locator.instanceUuid = self.destination_content.about.instanceUuid
|
||||
self.service_locator.url = "https://" + self.destination_vcenter + ":" + str(self.params['port']) + "/sdk"
|
||||
creds = vim.ServiceLocatorNamePassword()
|
||||
creds.username = self.destination_vcenter_username
|
||||
creds.password = self.destination_vcenter_password
|
||||
self.service_locator.credential = creds
|
||||
|
||||
# populate relocate spec
|
||||
self.relocate_spec.datastore = self.destination_datastore
|
||||
self.relocate_spec.pool = self.destination_resource_pool
|
||||
self.relocate_spec.service = self.service_locator
|
||||
self.relocate_spec.host = self.destination_host
|
||||
|
||||
# populate clone spec
|
||||
self.clone_spec.config = self.config_spec
|
||||
self.clone_spec.powerOn = True if self.params['state'].lower() == 'poweredon' else False
|
||||
self.clone_spec.location = self.relocate_spec
|
||||
|
||||
def get_recommended_datastore(self, datastore_cluster_obj=None):
|
||||
"""
|
||||
Function to return Storage DRS recommended datastore from datastore cluster
|
||||
Args:
|
||||
datastore_cluster_obj: datastore cluster managed object
|
||||
Returns: Name of recommended datastore from the given datastore cluster
|
||||
"""
|
||||
if datastore_cluster_obj is None:
|
||||
return None
|
||||
# Check if Datastore Cluster provided by user is SDRS ready
|
||||
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
|
||||
if sdrs_status:
|
||||
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
|
||||
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
|
||||
pod_sel_spec.storagePod = datastore_cluster_obj
|
||||
storage_spec = vim.storageDrs.StoragePlacementSpec()
|
||||
storage_spec.podSelectionSpec = pod_sel_spec
|
||||
storage_spec.type = 'create'
|
||||
|
||||
try:
|
||||
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
|
||||
rec_action = rec.recommendations[0].action[0]
|
||||
return rec_action.destination.name
|
||||
except Exception:
|
||||
# There is some error so we fall back to general workflow
|
||||
pass
|
||||
datastore = None
|
||||
datastore_freespace = 0
|
||||
for ds in datastore_cluster_obj.childEntity:
|
||||
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
|
||||
# If datastore field is provided, filter destination datastores
|
||||
if not self.is_datastore_valid(datastore_obj=ds):
|
||||
continue
|
||||
|
||||
datastore = ds
|
||||
datastore_freespace = ds.summary.freeSpace
|
||||
if datastore:
|
||||
return datastore.name
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main method
|
||||
"""
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
destination_vm_name=dict(type='str', required=True),
|
||||
destination_datastore=dict(type='str', required=True),
|
||||
destination_host=dict(type='str', required=True),
|
||||
destination_vcenter=dict(type='str', required=True),
|
||||
destination_vcenter_username=dict(type='str', required=True),
|
||||
destination_vcenter_password=dict(type='str', required=True, no_log=True),
|
||||
destination_vcenter_port=dict(type='int', default=443),
|
||||
destination_vcenter_validate_certs=dict(type='bool', default=False),
|
||||
destination_vm_folder=dict(type='str', required=True),
|
||||
destination_resource_pool=dict(type='str', default=None),
|
||||
state=dict(type='str', default='present',
|
||||
choices=['present', 'poweredon'])
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=[
|
||||
['uuid', 'name', 'moid'],
|
||||
],
|
||||
mutually_exclusive=[
|
||||
['uuid', 'name', 'moid'],
|
||||
],
|
||||
)
|
||||
result = {'failed': False, 'changed': False}
|
||||
if module.check_mode:
|
||||
if module.params['state'] in ['present']:
|
||||
result.update(
|
||||
vm_name=module.params['destination_vm_name'],
|
||||
vcenter=module.params['destination_vcenter'],
|
||||
host=module.params['destination_host'],
|
||||
datastore=module.params['destination_datastore'],
|
||||
vm_folder=module.params['destination_vm_folder'],
|
||||
state=module.params['state'],
|
||||
changed=True,
|
||||
desired_operation='Create VM with PowerOff State'
|
||||
)
|
||||
if module.params['state'] == 'poweredon':
|
||||
result.update(
|
||||
vm_name=module.params['destination_vm_name'],
|
||||
vcenter=module.params['destination_vcenter'],
|
||||
host=module.params['destination_host'],
|
||||
datastore=module.params['destination_datastore'],
|
||||
vm_folder=module.params['destination_vm_folder'],
|
||||
state=module.params['state'],
|
||||
changed=True,
|
||||
desired_operation='Create VM with PowerON State'
|
||||
)
|
||||
module.exit_json(**result)
|
||||
|
||||
clone_manager = CrossVCCloneManager(module)
|
||||
clone_manager.sanitize_params()
|
||||
clone_manager.populate_specs()
|
||||
result = clone_manager.clone()
|
||||
|
||||
if result['failed']:
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,152 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_custom_attribute_defs
|
||||
short_description: Manage custom attributes definitions for virtual machine from VMware
|
||||
description:
|
||||
- This module can be used to add and remove custom attributes definitions for the given virtual machine from VMware.
|
||||
version_added: 2.7
|
||||
author:
|
||||
- Jimmy Conner (@cigamit)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
attribute_key:
|
||||
description:
|
||||
- Name of the custom attribute definition.
|
||||
- This is required parameter, if C(state) is set to C(present) or C(absent).
|
||||
required: False
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Manage definition of custom attributes.
|
||||
- If set to C(present) and definition not present, then custom attribute definition is created.
|
||||
- If set to C(present) and definition is present, then no action taken.
|
||||
- If set to C(absent) and definition is present, then custom attribute definition is removed.
|
||||
- If set to C(absent) and definition is absent, then no action taken.
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
required: True
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add VMware Attribute Definition
|
||||
vmware_guest_custom_attribute_defs:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
state: present
|
||||
attribute_key: custom_attr_def_1
|
||||
delegate_to: localhost
|
||||
register: defs
|
||||
|
||||
- name: Remove VMware Attribute Definition
|
||||
vmware_guest_custom_attribute_defs:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
state: absent
|
||||
attribute_key: custom_attr_def_1
|
||||
delegate_to: localhost
|
||||
register: defs
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
custom_attribute_defs:
|
||||
description: list of all current attribute definitions
|
||||
returned: always
|
||||
type: list
|
||||
sample: ["sample_5", "sample_4"]
|
||||
"""
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class VmAttributeDefManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmAttributeDefManager, self).__init__(module)
|
||||
|
||||
def remove_custom_def(self, field):
|
||||
changed = False
|
||||
f = dict()
|
||||
for x in self.custom_field_mgr:
|
||||
if x.name == field and x.managedObjectType == vim.VirtualMachine:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
self.content.customFieldsManager.RemoveCustomFieldDef(key=x.key)
|
||||
break
|
||||
f[x.name] = (x.key, x.managedObjectType)
|
||||
return {'changed': changed, 'failed': False, 'custom_attribute_defs': list(f.keys())}
|
||||
|
||||
def add_custom_def(self, field):
|
||||
changed = False
|
||||
found = False
|
||||
f = dict()
|
||||
for x in self.custom_field_mgr:
|
||||
if x.name == field:
|
||||
found = True
|
||||
f[x.name] = (x.key, x.managedObjectType)
|
||||
|
||||
if not found:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
new_field = self.content.customFieldsManager.AddFieldDefinition(name=field, moType=vim.VirtualMachine)
|
||||
f[new_field.name] = (new_field.key, new_field.type)
|
||||
return {'changed': changed, 'failed': False, 'custom_attribute_defs': list(f.keys())}
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
attribute_key=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
['state', 'present', ['attribute_key']],
|
||||
['state', 'absent', ['attribute_key']],
|
||||
]
|
||||
)
|
||||
|
||||
pyv = VmAttributeDefManager(module)
|
||||
results = dict(changed=False, custom_attribute_defs=list())
|
||||
if module.params['state'] == "present":
|
||||
results = pyv.add_custom_def(module.params['attribute_key'])
|
||||
elif module.params['state'] == "absent":
|
||||
results = pyv.remove_custom_def(module.params['attribute_key'])
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,259 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright, (c) 2018, Ansible Project
|
||||
# Copyright, (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_custom_attributes
|
||||
short_description: Manage custom attributes from VMware for the given virtual machine
|
||||
description:
|
||||
- This module can be used to add, remove and update custom attributes for the given virtual machine.
|
||||
version_added: 2.7
|
||||
author:
|
||||
- Jimmy Conner (@cigamit)
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine to work with.
|
||||
- This is required parameter, if C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The action to take.
|
||||
- If set to C(present), then custom attribute is added or updated.
|
||||
- If set to C(absent), then custom attribute is removed.
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the virtual machine to manage if known. This is VMware's unique identifier.
|
||||
- This is required parameter, if C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
folder:
|
||||
description:
|
||||
- Absolute path to find an existing guest.
|
||||
- This is required parameter, if C(name) is supplied and multiple virtual machines with same name are found.
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- Datacenter name where the virtual machine is located in.
|
||||
required: True
|
||||
type: str
|
||||
attributes:
|
||||
description:
|
||||
- A list of name and value of custom attributes that needs to be manage.
|
||||
- Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent).
|
||||
default: []
|
||||
type: list
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add virtual machine custom attributes
|
||||
vmware_guest_custom_attributes:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
uuid: 421e4592-c069-924d-ce20-7e7533fab926
|
||||
state: present
|
||||
attributes:
|
||||
- name: MyAttribute
|
||||
value: MyValue
|
||||
delegate_to: localhost
|
||||
register: attributes
|
||||
|
||||
- name: Add multiple virtual machine custom attributes
|
||||
vmware_guest_custom_attributes:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
uuid: 421e4592-c069-924d-ce20-7e7533fab926
|
||||
state: present
|
||||
attributes:
|
||||
- name: MyAttribute
|
||||
value: MyValue
|
||||
- name: MyAttribute2
|
||||
value: MyValue2
|
||||
delegate_to: localhost
|
||||
register: attributes
|
||||
|
||||
- name: Remove virtual machine Attribute
|
||||
vmware_guest_custom_attributes:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
uuid: 421e4592-c069-924d-ce20-7e7533fab926
|
||||
state: absent
|
||||
attributes:
|
||||
- name: MyAttribute
|
||||
delegate_to: localhost
|
||||
register: attributes
|
||||
|
||||
- name: Remove virtual machine Attribute using Virtual Machine MoID
|
||||
vmware_guest_custom_attributes:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
moid: vm-42
|
||||
state: absent
|
||||
attributes:
|
||||
- name: MyAttribute
|
||||
delegate_to: localhost
|
||||
register: attributes
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
custom_attributes:
|
||||
description: metadata about the virtual machine attributes
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"mycustom": "my_custom_value",
|
||||
"mycustom_2": "my_custom_value_2",
|
||||
"sample_1": "sample_1_value",
|
||||
"sample_2": "sample_2_value",
|
||||
"sample_3": "sample_3_value"
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
|
||||
class VmAttributeManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmAttributeManager, self).__init__(module)
|
||||
|
||||
def set_custom_field(self, vm, user_fields):
|
||||
result_fields = dict()
|
||||
change_list = list()
|
||||
changed = False
|
||||
|
||||
for field in user_fields:
|
||||
field_key = self.check_exists(field['name'])
|
||||
found = False
|
||||
field_value = field.get('value', '')
|
||||
|
||||
for k, v in [(x.name, v.value) for x in self.custom_field_mgr for v in vm.customValue if x.key == v.key]:
|
||||
if k == field['name']:
|
||||
found = True
|
||||
if v != field_value:
|
||||
if not self.module.check_mode:
|
||||
self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
|
||||
result_fields[k] = field_value
|
||||
change_list.append(True)
|
||||
if not found and field_value != "":
|
||||
if not field_key and not self.module.check_mode:
|
||||
field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'], moType=vim.VirtualMachine)
|
||||
change_list.append(True)
|
||||
if not self.module.check_mode:
|
||||
self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
|
||||
result_fields[field['name']] = field_value
|
||||
|
||||
if any(change_list):
|
||||
changed = True
|
||||
|
||||
return {'changed': changed, 'failed': False, 'custom_attributes': result_fields}
|
||||
|
||||
def check_exists(self, field):
|
||||
for x in self.custom_field_mgr:
|
||||
if x.name == field:
|
||||
return x
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
datacenter=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
state=dict(type='str', default='present',
|
||||
choices=['absent', 'present']),
|
||||
attributes=dict(
|
||||
type='list',
|
||||
default=[],
|
||||
options=dict(
|
||||
name=dict(type='str', required=True),
|
||||
value=dict(type='str'),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
)
|
||||
|
||||
if module.params.get('folder'):
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['folder'] = module.params['folder'].rstrip('/')
|
||||
|
||||
pyv = VmAttributeManager(module)
|
||||
results = {'changed': False, 'failed': False, 'instance': dict()}
|
||||
|
||||
# Check if the virtual machine exists before continuing
|
||||
vm = pyv.get_vm()
|
||||
|
||||
if vm:
|
||||
# virtual machine already exists
|
||||
if module.params['state'] == "present":
|
||||
results = pyv.set_custom_field(vm, module.params['attributes'])
|
||||
elif module.params['state'] == "absent":
|
||||
results = pyv.set_custom_field(vm, module.params['attributes'])
|
||||
module.exit_json(**results)
|
||||
else:
|
||||
# virtual machine does not exists
|
||||
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
|
||||
module.fail_json(msg="Unable to manage custom attributes for non-existing"
|
||||
" virtual machine %s" % vm_id)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,193 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_customization_info
|
||||
short_description: Gather info about VM customization specifications
|
||||
description:
|
||||
- This module can be used to gather information about customization specifications.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
notes:
|
||||
- Tested on vSphere 6.0 and 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
spec_name:
|
||||
description:
|
||||
- Name of customization specification to find.
|
||||
required: False
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather info about all customization specification
|
||||
vmware_guest_customization_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
delegate_to: localhost
|
||||
register: all_custom_spec_info
|
||||
|
||||
- name: Gather info about customization specification with the given name
|
||||
vmware_guest_customization_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
spec_name: custom_linux_spec
|
||||
delegate_to: localhost
|
||||
register: custom_spec_info
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
custom_spec_info:
|
||||
description: metadata about the customization specification
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"assignip-eee0d684-44b7-457c-8c55-2585590b0d99": {
|
||||
"change_version": "1523438001",
|
||||
"description": "sample description",
|
||||
"dns_server_list": [],
|
||||
"dns_suffix_list": [],
|
||||
"domain": "None",
|
||||
"hostname": "sample1",
|
||||
"hw_clock_utc": null,
|
||||
"last_updated_time": "2018-04-11T09:13:21+00:00",
|
||||
"name": "sample",
|
||||
"nic_setting_map": [
|
||||
{
|
||||
"dns_domain": null,
|
||||
"gateway": [],
|
||||
"ip_address": "192.168.10.10",
|
||||
"net_bios": null,
|
||||
"nic_dns_server_list": [],
|
||||
"primary_wins": null,
|
||||
"secondry_wins": null,
|
||||
"subnet_mask": "255.255.255.0"
|
||||
}
|
||||
],
|
||||
"time_zone": null,
|
||||
"type": "Linux"
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
|
||||
class VmwareCustomSpecManger(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareCustomSpecManger, self).__init__(module)
|
||||
self.cc_mgr = self.content.customizationSpecManager
|
||||
if self.cc_mgr is None:
|
||||
self.module.fail_json(msg="Failed to get customization spec manager.")
|
||||
|
||||
def gather_custom_spec_info(self):
|
||||
"""
|
||||
Gather information about customization specifications
|
||||
"""
|
||||
|
||||
spec_name = self.params.get('spec_name', None)
|
||||
specs_list = []
|
||||
if spec_name:
|
||||
if self.cc_mgr.DoesCustomizationSpecExist(name=spec_name):
|
||||
specs_list.append(spec_name)
|
||||
else:
|
||||
self.module.fail_json(msg="Unable to find customization specification named '%s'" % spec_name)
|
||||
else:
|
||||
available_specs = self.cc_mgr.info
|
||||
for spec_info in available_specs:
|
||||
specs_list.append(spec_info.name)
|
||||
|
||||
spec_info = dict()
|
||||
for spec in specs_list:
|
||||
current_spec = self.cc_mgr.GetCustomizationSpec(name=spec)
|
||||
adapter_mapping_list = []
|
||||
for nic in current_spec.spec.nicSettingMap:
|
||||
temp_data = dict(
|
||||
mac_address=nic.macAddress,
|
||||
ip_address=nic.adapter.ip.ipAddress,
|
||||
subnet_mask=nic.adapter.subnetMask,
|
||||
gateway=[gw for gw in nic.adapter.gateway],
|
||||
nic_dns_server_list=[ndsl for ndsl in nic.adapter.dnsServerList],
|
||||
dns_domain=nic.adapter.dnsDomain,
|
||||
primary_wins=nic.adapter.primaryWINS,
|
||||
secondry_wins=nic.adapter.secondaryWINS,
|
||||
net_bios=nic.adapter.netBIOS,
|
||||
)
|
||||
adapter_mapping_list.append(temp_data)
|
||||
|
||||
current_hostname = None
|
||||
if isinstance(current_spec.spec.identity.hostName, vim.vm.customization.PrefixNameGenerator):
|
||||
current_hostname = current_spec.spec.identity.hostName.base
|
||||
elif isinstance(current_spec.spec.identity.hostName, vim.vm.customization.FixedName):
|
||||
current_hostname = current_spec.spec.identity.hostName.name
|
||||
|
||||
spec_info[spec] = dict(
|
||||
# Spec
|
||||
name=current_spec.info.name,
|
||||
description=current_spec.info.description,
|
||||
type=current_spec.info.type,
|
||||
last_updated_time=current_spec.info.lastUpdateTime,
|
||||
change_version=current_spec.info.changeVersion,
|
||||
# Identity
|
||||
hostname=current_hostname,
|
||||
domain=current_spec.spec.identity.domain,
|
||||
time_zone=current_spec.spec.identity.timeZone,
|
||||
hw_clock_utc=current_spec.spec.identity.hwClockUTC,
|
||||
# global IP Settings
|
||||
dns_suffix_list=[i for i in current_spec.spec.globalIPSettings.dnsSuffixList],
|
||||
dns_server_list=[i for i in current_spec.spec.globalIPSettings.dnsServerList],
|
||||
# NIC setting map
|
||||
nic_setting_map=adapter_mapping_list,
|
||||
)
|
||||
return spec_info
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
spec_name=dict(type='str'),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
pyv = VmwareCustomSpecManger(module)
|
||||
try:
|
||||
module.exit_json(custom_spec_info=pyv.gather_custom_spec_info())
|
||||
except Exception as exc:
|
||||
module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,886 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_disk
|
||||
short_description: Manage disks related to virtual machine in given vCenter infrastructure
|
||||
description:
|
||||
- This module can be used to add, remove and update disks belonging to given virtual machine.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
- This module is destructive in nature, please read documentation carefully before proceeding.
|
||||
- Be careful while removing disk specified as this may lead to data loss.
|
||||
version_added: 2.8
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.0 and 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine.
|
||||
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to gather facts if known, this is VMware's unique identifier.
|
||||
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- This is a required parameter, only if multiple VMs are found with same name.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter name to which virtual machine belongs to.
|
||||
required: True
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
disk:
|
||||
description:
|
||||
- A list of disks to add.
|
||||
- The virtual disk related information is provided using this list.
|
||||
- All values and parameters are case sensitive.
|
||||
- 'Valid attributes are:'
|
||||
- ' - C(size[_tb,_gb,_mb,_kb]) (integer): Disk storage size in specified unit.'
|
||||
- ' If C(size) specified then unit must be specified. There is no space allowed in between size number and unit.'
|
||||
- ' Only first occurrence in disk element will be considered, even if there are multiple size* parameters available.'
|
||||
- ' - C(type) (string): Valid values are:'
|
||||
- ' - C(thin) thin disk'
|
||||
- ' - C(eagerzeroedthick) eagerzeroedthick disk'
|
||||
- ' - C(thick) thick disk'
|
||||
- ' Default: C(thick) thick disk, no eagerzero.'
|
||||
- ' - C(disk_mode) (string): Type of disk mode. Valid values are:'
|
||||
- ' - C(persistent) Changes are immediately and permanently written to the virtual disk. This is default.'
|
||||
- ' - C(independent_persistent) Same as persistent, but not affected by snapshots.'
|
||||
- ' - C(independent_nonpersistent) Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
|
||||
- ' - C(datastore) (string): Name of datastore or datastore cluster to be used for the disk.'
|
||||
- ' - C(autoselect_datastore) (bool): Select the less used datastore. Specify only if C(datastore) is not specified.'
|
||||
- ' - C(scsi_controller) (integer): SCSI controller number. Valid value range from 0 to 3.'
|
||||
- ' Only 4 SCSI controllers are allowed per VM.'
|
||||
- ' Care should be taken while specifying C(scsi_controller) is 0 and C(unit_number) as 0 as this disk may contain OS.'
|
||||
- ' - C(unit_number) (integer): Disk Unit Number. Valid value range from 0 to 15. Only 15 disks are allowed per SCSI Controller.'
|
||||
- ' - C(scsi_type) (string): Type of SCSI controller. This value is required only for the first occurrence of SCSI Controller.'
|
||||
- ' This value is ignored, if SCSI Controller is already present or C(state) is C(absent).'
|
||||
- ' Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual).'
|
||||
- ' C(paravirtual) is default value for this parameter.'
|
||||
- ' - C(destroy) (bool): If C(state) is C(absent), make sure the disk file is deleted from the datastore (default C(yes)).'
|
||||
- ' Added in version 2.10.'
|
||||
- ' - C(filename) (string): Existing disk image to be used. Filename must already exist on the datastore.'
|
||||
- ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.10.'
|
||||
- ' - C(state) (string): State of disk. This is either "absent" or "present".'
|
||||
- ' If C(state) is set to C(absent), disk will be removed permanently from virtual machine configuration and from VMware storage.'
|
||||
- ' If C(state) is set to C(present), disk will be added if not present at given SCSI Controller and Unit Number.'
|
||||
- ' If C(state) is set to C(present) and disk exists with different size, disk size is increased.'
|
||||
- ' Reducing disk size is not allowed.'
|
||||
suboptions:
|
||||
iolimit:
|
||||
description:
|
||||
- Section specifies the shares and limit for storage I/O resource.
|
||||
suboptions:
|
||||
limit:
|
||||
description:
|
||||
- Section specifies values for limit where the utilization of a virtual machine will not exceed, even if there are available resources.
|
||||
shares:
|
||||
description:
|
||||
- Specifies different types of shares user can add for the given disk.
|
||||
suboptions:
|
||||
level:
|
||||
description:
|
||||
- Specifies different level for the shares section.
|
||||
- Valid values are low, normal, high, custom.
|
||||
level_value:
|
||||
description:
|
||||
- Custom value when C(level) is set as C(custom).
|
||||
type: int
|
||||
type: list
|
||||
elements: dict
|
||||
shares:
|
||||
description:
|
||||
- section for iolimit section tells about what are all different types of shares user can add for disk.
|
||||
suboptions:
|
||||
level:
|
||||
description:
|
||||
- tells about different level for the shares section, valid values are low,normal,high,custom.
|
||||
type: str
|
||||
level_value:
|
||||
description:
|
||||
- custom value when level is set as custom.
|
||||
type: int
|
||||
type: list
|
||||
elements: dict
|
||||
default: []
|
||||
type: list
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add disks to virtual machine using UUID
|
||||
vmware_guest_disk:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
uuid: 421e4592-c069-924d-ce20-7e7533fab926
|
||||
disk:
|
||||
- size_mb: 10
|
||||
type: thin
|
||||
datastore: datacluster0
|
||||
state: present
|
||||
scsi_controller: 1
|
||||
unit_number: 1
|
||||
scsi_type: 'paravirtual'
|
||||
disk_mode: 'persistent'
|
||||
- size_gb: 10
|
||||
type: eagerzeroedthick
|
||||
state: present
|
||||
autoselect_datastore: True
|
||||
scsi_controller: 2
|
||||
scsi_type: 'buslogic'
|
||||
unit_number: 12
|
||||
disk_mode: 'independent_persistent'
|
||||
- size: 10Gb
|
||||
type: eagerzeroedthick
|
||||
state: present
|
||||
autoselect_datastore: True
|
||||
scsi_controller: 2
|
||||
scsi_type: 'buslogic'
|
||||
unit_number: 1
|
||||
disk_mode: 'independent_nonpersistent'
|
||||
- filename: "[datastore1] path/to/existing/disk.vmdk"
|
||||
delegate_to: localhost
|
||||
register: disk_facts
|
||||
|
||||
- name: Add disks with specified shares to the virtual machine
|
||||
vmware_guest_disk:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
disk:
|
||||
- size_gb: 1
|
||||
type: thin
|
||||
datastore: datacluster0
|
||||
state: present
|
||||
scsi_controller: 1
|
||||
unit_number: 1
|
||||
disk_mode: 'independent_persistent'
|
||||
shares:
|
||||
level: custom
|
||||
level_value: 1300
|
||||
delegate_to: localhost
|
||||
register: test_custom_shares
|
||||
|
||||
- name: create new disk with custom IO limits and shares in IO Limits
|
||||
vmware_guest_disk:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
disk:
|
||||
- size_gb: 1
|
||||
type: thin
|
||||
datastore: datacluster0
|
||||
state: present
|
||||
scsi_controller: 1
|
||||
unit_number: 1
|
||||
disk_mode: 'independent_persistent'
|
||||
iolimit:
|
||||
limit: 1506
|
||||
shares:
|
||||
level: custom
|
||||
level_value: 1305
|
||||
delegate_to: localhost
|
||||
register: test_custom_IoLimit_shares
|
||||
|
||||
- name: Remove disks from virtual machine using name
|
||||
vmware_guest_disk:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
name: VM_225
|
||||
disk:
|
||||
- state: absent
|
||||
scsi_controller: 1
|
||||
unit_number: 1
|
||||
delegate_to: localhost
|
||||
register: disk_facts
|
||||
|
||||
- name: Remove disk from virtual machine using moid
|
||||
vmware_guest_disk:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
moid: vm-42
|
||||
disk:
|
||||
- state: absent
|
||||
scsi_controller: 1
|
||||
unit_number: 1
|
||||
delegate_to: localhost
|
||||
register: disk_facts
|
||||
|
||||
- name: Remove disk from virtual machine but keep the VMDK file on the datastore
|
||||
vmware_guest_disk:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
name: VM_225
|
||||
disk:
|
||||
- state: absent
|
||||
scsi_controller: 1
|
||||
unit_number: 2
|
||||
destroy: no
|
||||
delegate_to: localhost
|
||||
register: disk_facts
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
disk_status:
|
||||
description: metadata about the virtual machine's disks after managing them
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"0": {
|
||||
"backing_datastore": "datastore2",
|
||||
"backing_disk_mode": "persistent",
|
||||
"backing_eagerlyscrub": false,
|
||||
"backing_filename": "[datastore2] VM_225/VM_225.vmdk",
|
||||
"backing_thinprovisioned": false,
|
||||
"backing_writethrough": false,
|
||||
"capacity_in_bytes": 10485760,
|
||||
"capacity_in_kb": 10240,
|
||||
"controller_key": 1000,
|
||||
"key": 2000,
|
||||
"label": "Hard disk 1",
|
||||
"summary": "10,240 KB",
|
||||
"unit_number": 0
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
import re
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, find_obj, get_all_objs
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
self.desired_disks = self.params['disk'] # Match with vmware_guest parameter
|
||||
self.vm = None
|
||||
self.scsi_device_type = dict(lsilogic=vim.vm.device.VirtualLsiLogicController,
|
||||
paravirtual=vim.vm.device.ParaVirtualSCSIController,
|
||||
buslogic=vim.vm.device.VirtualBusLogicController,
|
||||
lsilogicsas=vim.vm.device.VirtualLsiLogicSASController)
|
||||
self.config_spec = vim.vm.ConfigSpec()
|
||||
self.config_spec.deviceChange = []
|
||||
|
||||
def create_scsi_controller(self, scsi_type, scsi_bus_number):
|
||||
"""
|
||||
Create SCSI Controller with given SCSI Type and SCSI Bus Number
|
||||
Args:
|
||||
scsi_type: Type of SCSI
|
||||
scsi_bus_number: SCSI Bus number to be assigned
|
||||
|
||||
Returns: Virtual device spec for SCSI Controller
|
||||
|
||||
"""
|
||||
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
|
||||
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
|
||||
scsi_ctl.device = self.scsi_device_type[scsi_type]()
|
||||
scsi_ctl.device.unitNumber = 3
|
||||
scsi_ctl.device.busNumber = scsi_bus_number
|
||||
scsi_ctl.device.hotAddRemove = True
|
||||
scsi_ctl.device.sharedBus = 'noSharing'
|
||||
scsi_ctl.device.scsiCtlrUnitNumber = 7
|
||||
|
||||
return scsi_ctl
|
||||
|
||||
@staticmethod
|
||||
def create_scsi_disk(scsi_ctl_key, disk_index, disk_mode, disk_filename):
|
||||
"""
|
||||
Create Virtual Device Spec for virtual disk
|
||||
Args:
|
||||
scsi_ctl_key: Unique SCSI Controller Key
|
||||
disk_index: Disk unit number at which disk needs to be attached
|
||||
disk_mode: Disk mode
|
||||
disk_filename: Path to the disk file on the datastore
|
||||
|
||||
Returns: Virtual Device Spec for virtual disk
|
||||
|
||||
"""
|
||||
disk_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
|
||||
disk_spec.device = vim.vm.device.VirtualDisk()
|
||||
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
|
||||
disk_spec.device.backing.diskMode = disk_mode
|
||||
disk_spec.device.controllerKey = scsi_ctl_key
|
||||
disk_spec.device.unitNumber = disk_index
|
||||
|
||||
if disk_filename is not None:
|
||||
disk_spec.device.backing.fileName = disk_filename
|
||||
else:
|
||||
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
|
||||
|
||||
return disk_spec
|
||||
|
||||
def reconfigure_vm(self, config_spec, device_type):
|
||||
"""
|
||||
Reconfigure virtual machine after modifying device spec
|
||||
Args:
|
||||
config_spec: Config Spec
|
||||
device_type: Type of device being modified
|
||||
|
||||
Returns: Boolean status 'changed' and actual task result
|
||||
|
||||
"""
|
||||
changed, results = (False, '')
|
||||
try:
|
||||
# Perform actual VM reconfiguration
|
||||
task = self.vm.ReconfigVM_Task(spec=config_spec)
|
||||
changed, results = wait_for_task(task)
|
||||
except vim.fault.InvalidDeviceSpec as invalid_device_spec:
|
||||
self.module.fail_json(msg="Failed to manage %s on given virtual machine due to invalid"
|
||||
" device spec : %s" % (device_type, to_native(invalid_device_spec.msg)),
|
||||
details="Please check ESXi server logs for more details.")
|
||||
except vim.fault.RestrictedVersion as e:
|
||||
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
|
||||
" product versioning restrictions: %s" % to_native(e.msg))
|
||||
|
||||
return changed, results
|
||||
|
||||
def get_ioandshares_diskconfig(self, disk_spec, disk):
|
||||
io_disk_spec = vim.StorageResourceManager.IOAllocationInfo()
|
||||
if 'iolimit' in disk:
|
||||
io_disk_spec.limit = disk['iolimit']['limit']
|
||||
if 'shares' in disk['iolimit']:
|
||||
shares_spec = vim.SharesInfo()
|
||||
shares_spec.level = disk['iolimit']['shares']['level']
|
||||
if shares_spec.level == 'custom':
|
||||
shares_spec.shares = disk['iolimit']['shares']['level_value']
|
||||
io_disk_spec.shares = shares_spec
|
||||
disk_spec.device.storageIOAllocation = io_disk_spec
|
||||
if 'shares' in disk:
|
||||
shares_spec = vim.SharesInfo()
|
||||
shares_spec.level = disk['shares']['level']
|
||||
if shares_spec.level == 'custom':
|
||||
shares_spec.shares = disk['shares']['level_value']
|
||||
io_disk_spec.shares = shares_spec
|
||||
disk_spec.device.storageIOAllocation = io_disk_spec
|
||||
return disk_spec
|
||||
|
||||
def ensure_disks(self, vm_obj=None):
|
||||
"""
|
||||
Manage internal state of virtual machine disks
|
||||
Args:
|
||||
vm_obj: Managed object of virtual machine
|
||||
|
||||
"""
|
||||
# Set vm object
|
||||
self.vm = vm_obj
|
||||
# Sanitize user input
|
||||
disk_data = self.sanitize_disk_inputs()
|
||||
# Create stateful information about SCSI devices
|
||||
current_scsi_info = dict()
|
||||
results = dict(changed=False, disk_data=None, disk_changes=dict())
|
||||
|
||||
# Deal with SCSI Controller
|
||||
for device in vm_obj.config.hardware.device:
|
||||
if isinstance(device, tuple(self.scsi_device_type.values())):
|
||||
# Found SCSI device
|
||||
if device.busNumber not in current_scsi_info:
|
||||
device_bus_number = 1000 + device.busNumber
|
||||
current_scsi_info[device_bus_number] = dict(disks=dict())
|
||||
|
||||
scsi_changed = False
|
||||
for disk in disk_data:
|
||||
scsi_controller = disk['scsi_controller'] + 1000
|
||||
if scsi_controller not in current_scsi_info and disk['state'] == 'present':
|
||||
scsi_ctl = self.create_scsi_controller(disk['scsi_type'], disk['scsi_controller'])
|
||||
current_scsi_info[scsi_controller] = dict(disks=dict())
|
||||
self.config_spec.deviceChange.append(scsi_ctl)
|
||||
scsi_changed = True
|
||||
if scsi_changed:
|
||||
self.reconfigure_vm(self.config_spec, 'SCSI Controller')
|
||||
self.config_spec = vim.vm.ConfigSpec()
|
||||
self.config_spec.deviceChange = []
|
||||
|
||||
# Deal with Disks
|
||||
for device in vm_obj.config.hardware.device:
|
||||
if isinstance(device, vim.vm.device.VirtualDisk):
|
||||
# Found Virtual Disk device
|
||||
if device.controllerKey not in current_scsi_info:
|
||||
current_scsi_info[device.controllerKey] = dict(disks=dict())
|
||||
current_scsi_info[device.controllerKey]['disks'][device.unitNumber] = device
|
||||
|
||||
vm_name = self.vm.name
|
||||
disk_change_list = []
|
||||
for disk in disk_data:
|
||||
disk_change = False
|
||||
scsi_controller = disk['scsi_controller'] + 1000 # VMware auto assign 1000 + SCSI Controller
|
||||
if disk['disk_unit_number'] not in current_scsi_info[scsi_controller]['disks'] and disk['state'] == 'present':
|
||||
# Add new disk
|
||||
disk_spec = self.create_scsi_disk(scsi_controller, disk['disk_unit_number'], disk['disk_mode'], disk['filename'])
|
||||
if disk['filename'] is None:
|
||||
disk_spec.device.capacityInKB = disk['size']
|
||||
if disk['disk_type'] == 'thin':
|
||||
disk_spec.device.backing.thinProvisioned = True
|
||||
elif disk['disk_type'] == 'eagerzeroedthick':
|
||||
disk_spec.device.backing.eagerlyScrub = True
|
||||
# get Storage DRS recommended datastore from the datastore cluster
|
||||
if disk['datastore_cluster'] is not None:
|
||||
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=disk['datastore_cluster'], disk_spec_obj=disk_spec)
|
||||
disk['datastore'] = find_obj(self.content, [vim.Datastore], datastore_name)
|
||||
if disk['filename'] is None:
|
||||
disk_spec.device.backing.fileName = "[%s] %s/%s_%s_%s.vmdk" % (
|
||||
disk['datastore'].name,
|
||||
vm_name, vm_name,
|
||||
str(scsi_controller),
|
||||
str(disk['disk_unit_number']))
|
||||
else:
|
||||
disk_spec.device.backing.fileName = disk['filename']
|
||||
disk_spec.device.backing.datastore = disk['datastore']
|
||||
disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk)
|
||||
self.config_spec.deviceChange.append(disk_spec)
|
||||
disk_change = True
|
||||
current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']] = disk_spec.device
|
||||
results['disk_changes'][disk['disk_index']] = "Disk created."
|
||||
elif disk['disk_unit_number'] in current_scsi_info[scsi_controller]['disks']:
|
||||
if disk['state'] == 'present':
|
||||
disk_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
# set the operation to edit so that it knows to keep other settings
|
||||
disk_spec.device = current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']]
|
||||
# Edit and no resizing allowed
|
||||
if disk['size'] < disk_spec.device.capacityInKB:
|
||||
self.module.fail_json(msg="Given disk size at disk index [%s] is smaller than found (%d < %d)."
|
||||
"Reducing disks is not allowed." % (disk['disk_index'],
|
||||
disk['size'],
|
||||
disk_spec.device.capacityInKB))
|
||||
if disk['size'] != disk_spec.device.capacityInKB:
|
||||
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
|
||||
disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk)
|
||||
disk_spec.device.capacityInKB = disk['size']
|
||||
self.config_spec.deviceChange.append(disk_spec)
|
||||
disk_change = True
|
||||
results['disk_changes'][disk['disk_index']] = "Disk size increased."
|
||||
else:
|
||||
results['disk_changes'][disk['disk_index']] = "Disk already exists."
|
||||
|
||||
elif disk['state'] == 'absent':
|
||||
# Disk already exists, deleting
|
||||
disk_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
|
||||
if disk['destroy'] is True:
|
||||
disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.destroy
|
||||
disk_spec.device = current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']]
|
||||
self.config_spec.deviceChange.append(disk_spec)
|
||||
disk_change = True
|
||||
results['disk_changes'][disk['disk_index']] = "Disk deleted."
|
||||
|
||||
if disk_change:
|
||||
# Adding multiple disks in a single attempt raises weird errors
|
||||
# So adding single disk at a time.
|
||||
self.reconfigure_vm(self.config_spec, 'disks')
|
||||
self.config_spec = vim.vm.ConfigSpec()
|
||||
self.config_spec.deviceChange = []
|
||||
disk_change_list.append(disk_change)
|
||||
|
||||
if any(disk_change_list):
|
||||
results['changed'] = True
|
||||
results['disk_data'] = self.gather_disk_facts(vm_obj=self.vm)
|
||||
self.module.exit_json(**results)
|
||||
|
||||
def sanitize_disk_inputs(self):
|
||||
"""
|
||||
Check correctness of disk input provided by user
|
||||
Returns: A list of dictionary containing disk information
|
||||
|
||||
"""
|
||||
disks_data = list()
|
||||
if not self.desired_disks:
|
||||
self.module.exit_json(changed=False, msg="No disks provided for virtual"
|
||||
" machine '%s' for management." % self.vm.name)
|
||||
|
||||
for disk_index, disk in enumerate(self.desired_disks):
|
||||
# Initialize default value for disk
|
||||
current_disk = dict(disk_index=disk_index,
|
||||
state='present',
|
||||
destroy=True,
|
||||
filename=None,
|
||||
datastore_cluster=None,
|
||||
datastore=None,
|
||||
autoselect_datastore=True,
|
||||
disk_unit_number=0,
|
||||
scsi_controller=0,
|
||||
disk_mode='persistent')
|
||||
# Check state
|
||||
if 'state' in disk:
|
||||
if disk['state'] not in ['absent', 'present']:
|
||||
self.module.fail_json(msg="Invalid state provided '%s' for disk index [%s]."
|
||||
" State can be either - 'absent', 'present'" % (disk['state'],
|
||||
disk_index))
|
||||
else:
|
||||
current_disk['state'] = disk['state']
|
||||
|
||||
if current_disk['state'] == 'absent':
|
||||
current_disk['destroy'] = disk['destroy']
|
||||
elif current_disk['state'] == 'present':
|
||||
# Select datastore or datastore cluster
|
||||
if 'datastore' in disk:
|
||||
if 'autoselect_datastore' in disk:
|
||||
self.module.fail_json(msg="Please specify either 'datastore' "
|
||||
"or 'autoselect_datastore' for disk index [%s]" % disk_index)
|
||||
|
||||
# Check if given value is datastore or datastore cluster
|
||||
datastore_name = disk['datastore']
|
||||
datastore_cluster = find_obj(self.content, [vim.StoragePod], datastore_name)
|
||||
datastore = find_obj(self.content, [vim.Datastore], datastore_name)
|
||||
|
||||
if datastore is None and datastore_cluster is None:
|
||||
self.module.fail_json(msg="Failed to find datastore or datastore cluster named '%s' "
|
||||
"in given configuration." % disk['datastore'])
|
||||
if datastore_cluster:
|
||||
# If user specified datastore cluster, keep track of that for determining datastore later
|
||||
current_disk['datastore_cluster'] = datastore_cluster
|
||||
elif datastore:
|
||||
current_disk['datastore'] = datastore
|
||||
current_disk['autoselect_datastore'] = False
|
||||
elif 'autoselect_datastore' in disk:
|
||||
# Find datastore which fits requirement
|
||||
datastores = get_all_objs(self.content, [vim.Datastore])
|
||||
if not datastores:
|
||||
self.module.fail_json(msg="Failed to gather information about"
|
||||
" available datastores in given datacenter.")
|
||||
datastore = None
|
||||
datastore_freespace = 0
|
||||
for ds in datastores:
|
||||
if ds.summary.freeSpace > datastore_freespace:
|
||||
# If datastore field is provided, filter destination datastores
|
||||
datastore = ds
|
||||
datastore_freespace = ds.summary.freeSpace
|
||||
current_disk['datastore'] = datastore
|
||||
|
||||
if 'datastore' not in disk and 'autoselect_datastore' not in disk and 'filename' not in disk:
|
||||
self.module.fail_json(msg="Either 'datastore' or 'autoselect_datastore' is"
|
||||
" required parameter while creating disk for "
|
||||
"disk index [%s]." % disk_index)
|
||||
|
||||
if 'filename' in disk:
|
||||
current_disk['filename'] = disk['filename']
|
||||
|
||||
if [x for x in disk.keys() if x.startswith('size_') or x == 'size']:
|
||||
# size, size_tb, size_gb, size_mb, size_kb
|
||||
disk_size_parse_failed = False
|
||||
if 'size' in disk:
|
||||
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
|
||||
disk_size_m = size_regex.match(disk['size'])
|
||||
if disk_size_m:
|
||||
expected = disk_size_m.group(1)
|
||||
unit = disk_size_m.group(2)
|
||||
else:
|
||||
disk_size_parse_failed = True
|
||||
try:
|
||||
if re.match(r'\d+\.\d+', expected):
|
||||
# We found float value in string, let's typecast it
|
||||
expected = float(expected)
|
||||
else:
|
||||
# We found int value in string, let's typecast it
|
||||
expected = int(expected)
|
||||
except (TypeError, ValueError, NameError):
|
||||
disk_size_parse_failed = True
|
||||
else:
|
||||
# Even multiple size_ parameter provided by user,
|
||||
# consider first value only
|
||||
param = [x for x in disk.keys() if x.startswith('size_')][0]
|
||||
unit = param.split('_')[-1]
|
||||
disk_size = disk[param]
|
||||
if isinstance(disk_size, (float, int)):
|
||||
disk_size = str(disk_size)
|
||||
|
||||
try:
|
||||
if re.match(r'\d+\.\d+', disk_size):
|
||||
# We found float value in string, let's typecast it
|
||||
expected = float(disk_size)
|
||||
else:
|
||||
# We found int value in string, let's typecast it
|
||||
expected = int(disk_size)
|
||||
except (TypeError, ValueError, NameError):
|
||||
disk_size_parse_failed = True
|
||||
|
||||
if disk_size_parse_failed:
|
||||
# Common failure
|
||||
self.module.fail_json(msg="Failed to parse disk size for disk index [%s],"
|
||||
" please review value provided"
|
||||
" using documentation." % disk_index)
|
||||
|
||||
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
|
||||
unit = unit.lower()
|
||||
if unit in disk_units:
|
||||
current_disk['size'] = expected * (1024 ** disk_units[unit])
|
||||
else:
|
||||
self.module.fail_json(msg="%s is not a supported unit for disk size for disk index [%s]."
|
||||
" Supported units are ['%s']." % (unit,
|
||||
disk_index,
|
||||
"', '".join(disk_units.keys())))
|
||||
|
||||
elif current_disk['filename'] is None:
|
||||
# No size found but disk, fail
|
||||
self.module.fail_json(msg="No size, size_kb, size_mb, size_gb or size_tb"
|
||||
" attribute found into disk index [%s] configuration." % disk_index)
|
||||
# Check SCSI controller key
|
||||
if 'scsi_controller' in disk:
|
||||
try:
|
||||
temp_disk_controller = int(disk['scsi_controller'])
|
||||
except ValueError:
|
||||
self.module.fail_json(msg="Invalid SCSI controller ID '%s' specified"
|
||||
" at index [%s]" % (disk['scsi_controller'], disk_index))
|
||||
if temp_disk_controller not in range(0, 4):
|
||||
# Only 4 SCSI controllers are allowed per VM
|
||||
self.module.fail_json(msg="Invalid SCSI controller ID specified [%s],"
|
||||
" please specify value between 0 to 3 only." % temp_disk_controller)
|
||||
current_disk['scsi_controller'] = temp_disk_controller
|
||||
else:
|
||||
self.module.fail_json(msg="Please specify 'scsi_controller' under disk parameter"
|
||||
" at index [%s], which is required while creating disk." % disk_index)
|
||||
# Check for disk unit number
|
||||
if 'unit_number' in disk:
|
||||
try:
|
||||
temp_disk_unit_number = int(disk['unit_number'])
|
||||
except ValueError:
|
||||
self.module.fail_json(msg="Invalid Disk unit number ID '%s'"
|
||||
" specified at index [%s]" % (disk['unit_number'], disk_index))
|
||||
if temp_disk_unit_number not in range(0, 16):
|
||||
self.module.fail_json(msg="Invalid Disk unit number ID specified for disk [%s] at index [%s],"
|
||||
" please specify value between 0 to 15"
|
||||
" only (excluding 7)." % (temp_disk_unit_number, disk_index))
|
||||
|
||||
if temp_disk_unit_number == 7:
|
||||
self.module.fail_json(msg="Invalid Disk unit number ID specified for disk at index [%s],"
|
||||
" please specify value other than 7 as it is reserved"
|
||||
"for SCSI Controller" % disk_index)
|
||||
current_disk['disk_unit_number'] = temp_disk_unit_number
|
||||
|
||||
else:
|
||||
self.module.fail_json(msg="Please specify 'unit_number' under disk parameter"
|
||||
" at index [%s], which is required while creating disk." % disk_index)
|
||||
|
||||
# Type of Disk
|
||||
disk_type = disk.get('type', 'thick').lower()
|
||||
if disk_type not in ['thin', 'thick', 'eagerzeroedthick']:
|
||||
self.module.fail_json(msg="Invalid 'disk_type' specified for disk index [%s]. Please specify"
|
||||
" 'disk_type' value from ['thin', 'thick', 'eagerzeroedthick']." % disk_index)
|
||||
current_disk['disk_type'] = disk_type
|
||||
|
||||
# Mode of Disk
|
||||
temp_disk_mode = disk.get('disk_mode', 'persistent').lower()
|
||||
if temp_disk_mode not in ['persistent', 'independent_persistent', 'independent_nonpersistent']:
|
||||
self.module.fail_json(msg="Invalid 'disk_mode' specified for disk index [%s]. Please specify"
|
||||
" 'disk_mode' value from ['persistent', 'independent_persistent', 'independent_nonpersistent']." % disk_index)
|
||||
current_disk['disk_mode'] = temp_disk_mode
|
||||
|
||||
# SCSI Controller Type
|
||||
scsi_contrl_type = disk.get('scsi_type', 'paravirtual').lower()
|
||||
if scsi_contrl_type not in self.scsi_device_type.keys():
|
||||
self.module.fail_json(msg="Invalid 'scsi_type' specified for disk index [%s]. Please specify"
|
||||
" 'scsi_type' value from ['%s']" % (disk_index,
|
||||
"', '".join(self.scsi_device_type.keys())))
|
||||
current_disk['scsi_type'] = scsi_contrl_type
|
||||
if 'shares' in disk:
|
||||
current_disk['shares'] = disk['shares']
|
||||
if 'iolimit' in disk:
|
||||
current_disk['iolimit'] = disk['iolimit']
|
||||
disks_data.append(current_disk)
|
||||
return disks_data
|
||||
|
||||
def get_recommended_datastore(self, datastore_cluster_obj, disk_spec_obj):
|
||||
"""
|
||||
Return Storage DRS recommended datastore from datastore cluster
|
||||
Args:
|
||||
datastore_cluster_obj: datastore cluster managed object
|
||||
|
||||
Returns: Name of recommended datastore from the given datastore cluster,
|
||||
Returns None if no datastore recommendation found.
|
||||
|
||||
"""
|
||||
# Check if Datastore Cluster provided by user is SDRS ready
|
||||
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
|
||||
if sdrs_status:
|
||||
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
|
||||
disk_loc = vim.storageDrs.PodSelectionSpec.DiskLocator()
|
||||
pod_config = vim.storageDrs.PodSelectionSpec.VmPodConfig()
|
||||
pod_config.storagePod = datastore_cluster_obj
|
||||
pod_config.disk = [disk_loc]
|
||||
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
|
||||
pod_sel_spec.initialVmConfig = [pod_config]
|
||||
storage_spec = vim.storageDrs.StoragePlacementSpec()
|
||||
storage_spec.configSpec = vim.vm.ConfigSpec()
|
||||
storage_spec.configSpec.deviceChange.append(disk_spec_obj)
|
||||
storage_spec.resourcePool = self.vm.resourcePool
|
||||
storage_spec.podSelectionSpec = pod_sel_spec
|
||||
storage_spec.vm = self.vm
|
||||
storage_spec.type = 'reconfigure'
|
||||
|
||||
try:
|
||||
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
|
||||
rec_action = rec.recommendations[0].action[0]
|
||||
return rec_action.destination.name
|
||||
except Exception:
|
||||
# There is some error so we fall back to general workflow
|
||||
pass
|
||||
datastore = None
|
||||
datastore_freespace = 0
|
||||
for ds in datastore_cluster_obj.childEntity:
|
||||
if ds.summary.freeSpace > datastore_freespace:
|
||||
# If datastore field is provided, filter destination datastores
|
||||
datastore = ds
|
||||
datastore_freespace = ds.summary.freeSpace
|
||||
if datastore:
|
||||
return datastore.name
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def gather_disk_facts(vm_obj):
|
||||
"""
|
||||
Gather facts about VM's disks
|
||||
Args:
|
||||
vm_obj: Managed object of virtual machine
|
||||
|
||||
Returns: A list of dict containing disks information
|
||||
|
||||
"""
|
||||
disks_facts = dict()
|
||||
if vm_obj is None:
|
||||
return disks_facts
|
||||
|
||||
disk_index = 0
|
||||
for disk in vm_obj.config.hardware.device:
|
||||
if isinstance(disk, vim.vm.device.VirtualDisk):
|
||||
if disk.storageIOAllocation is None:
|
||||
disk.storageIOAllocation = vim.StorageResourceManager.IOAllocationInfo()
|
||||
disk.storageIOAllocation.shares = vim.SharesInfo()
|
||||
if disk.shares is None:
|
||||
disk.shares = vim.SharesInfo()
|
||||
disks_facts[disk_index] = dict(
|
||||
key=disk.key,
|
||||
label=disk.deviceInfo.label,
|
||||
summary=disk.deviceInfo.summary,
|
||||
backing_filename=disk.backing.fileName,
|
||||
backing_datastore=disk.backing.datastore.name,
|
||||
backing_disk_mode=disk.backing.diskMode,
|
||||
backing_writethrough=disk.backing.writeThrough,
|
||||
backing_thinprovisioned=disk.backing.thinProvisioned,
|
||||
backing_eagerlyscrub=bool(disk.backing.eagerlyScrub),
|
||||
controller_key=disk.controllerKey,
|
||||
unit_number=disk.unitNumber,
|
||||
iolimit_limit=disk.storageIOAllocation.limit,
|
||||
iolimit_shares_level=disk.storageIOAllocation.shares.level,
|
||||
iolimit_shares_limit=disk.storageIOAllocation.shares.shares,
|
||||
shares_level=disk.shares.level,
|
||||
shares_limit=disk.shares.shares,
|
||||
capacity_in_kb=disk.capacityInKB,
|
||||
capacity_in_bytes=disk.capacityInBytes,
|
||||
)
|
||||
disk_index += 1
|
||||
return disks_facts
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str', required=True),
|
||||
disk=dict(type='list', default=[]),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
]
|
||||
)
|
||||
|
||||
if module.params['folder']:
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['folder'] = module.params['folder'].rstrip('/')
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
# Check if the VM exists before continuing
|
||||
vm = pyv.get_vm()
|
||||
|
||||
if not vm:
|
||||
# We unable to find the virtual machine user specified
|
||||
# Bail out
|
||||
vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
|
||||
module.fail_json(msg="Unable to manage disks for non-existing"
|
||||
" virtual machine '%s'." % vm_id)
|
||||
|
||||
# VM exists
|
||||
try:
|
||||
pyv.ensure_disks(vm_obj=vm)
|
||||
except Exception as exc:
|
||||
module.fail_json(msg="Failed to manage disks for virtual machine"
|
||||
" '%s' with exception : %s" % (vm.name,
|
||||
to_native(exc)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,332 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, NAER William Leemans (@bushvin) <willie@elaba.net>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_disk_info
|
||||
short_description: Gather info about disks of given virtual machine
|
||||
description:
|
||||
- This module can be used to gather information about disks belonging to given virtual machine.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.0 and 6.5.
|
||||
- Disk UUID information is added in version 2.8.
|
||||
- Additional information about guest disk backings added in version 2.8.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine.
|
||||
- This is required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to gather information if known, this is VMware's unique identifier.
|
||||
- This is required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- This is required parameter, only if multiple VMs are found with same name.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter name to which virtual machine belongs to.
|
||||
required: True
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather disk info from virtual machine using UUID
|
||||
vmware_guest_disk_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: ha-datacenter
|
||||
validate_certs: no
|
||||
uuid: 421e4592-c069-924d-ce20-7e7533fab926
|
||||
delegate_to: localhost
|
||||
register: disk_info
|
||||
|
||||
- name: Gather disk info from virtual machine using name
|
||||
vmware_guest_disk_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: ha-datacenter
|
||||
validate_certs: no
|
||||
name: VM_225
|
||||
delegate_to: localhost
|
||||
register: disk_info
|
||||
|
||||
- name: Gather disk info from virtual machine using moid
|
||||
vmware_guest_disk_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: ha-datacenter
|
||||
validate_certs: no
|
||||
moid: vm-42
|
||||
delegate_to: localhost
|
||||
register: disk_info
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
guest_disk_info:
|
||||
description: metadata about the virtual machine's disks
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"0": {
|
||||
"backing_datastore": "datastore2",
|
||||
"backing_disk_mode": "persistent",
|
||||
"backing_diskmode": "persistent",
|
||||
"backing_eagerlyscrub": false,
|
||||
"backing_filename": "[datastore2] VM_225/VM_225.vmdk",
|
||||
"backing_thinprovisioned": false,
|
||||
"backing_type": "FlatVer2",
|
||||
"backing_writethrough": false,
|
||||
"backing_uuid": "200C3A00-f82a-97af-02ff-62a595f0020a",
|
||||
"capacity_in_bytes": 10485760,
|
||||
"capacity_in_kb": 10240,
|
||||
"controller_bus_number": 0,
|
||||
"controller_key": 1000,
|
||||
"controller_type": "paravirtual",
|
||||
"key": 2000,
|
||||
"label": "Hard disk 1",
|
||||
"summary": "10,240 KB",
|
||||
"unit_number": 0
|
||||
},
|
||||
"1": {
|
||||
"backing_datastore": "datastore3",
|
||||
"backing_devicename": "vml.012345678901234567890123456789012345678901234567890123",
|
||||
"backing_disk_mode": "independent_persistent",
|
||||
"backing_diskmode": "independent_persistent",
|
||||
"backing_filename": "[datastore3] VM_226/VM_226.vmdk",
|
||||
"backing_lunuuid": "012345678901234567890123456789012345678901234567890123",
|
||||
"backing_type": "RawDiskMappingVer1",
|
||||
"backing_uuid": null,
|
||||
"capacity_in_bytes": 15728640,
|
||||
"capacity_in_kb": 15360,
|
||||
"controller_bus_number": 0,
|
||||
"controller_key": 1000,
|
||||
"controller_type": "paravirtual",
|
||||
"key": 2001,
|
||||
"label": "Hard disk 3",
|
||||
"summary": "15,360 KB",
|
||||
"unit_number": 1
|
||||
},
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
|
||||
def gather_disk_info(self, vm_obj):
|
||||
"""
|
||||
Gather information about VM's disks
|
||||
Args:
|
||||
vm_obj: Managed object of virtual machine
|
||||
|
||||
Returns: A list of dict containing disks information
|
||||
|
||||
"""
|
||||
controller_info = dict()
|
||||
disks_info = dict()
|
||||
if vm_obj is None:
|
||||
return disks_info
|
||||
|
||||
controller_types = {
|
||||
vim.vm.device.VirtualLsiLogicController: 'lsilogic',
|
||||
vim.vm.device.ParaVirtualSCSIController: 'paravirtual',
|
||||
vim.vm.device.VirtualBusLogicController: 'buslogic',
|
||||
vim.vm.device.VirtualLsiLogicSASController: 'lsilogicsas',
|
||||
vim.vm.device.VirtualIDEController: 'ide'
|
||||
}
|
||||
|
||||
controller_index = 0
|
||||
for controller in vm_obj.config.hardware.device:
|
||||
if isinstance(controller, tuple(controller_types.keys())):
|
||||
controller_info[controller_index] = dict(
|
||||
key=controller.key,
|
||||
controller_type=controller_types[type(controller)],
|
||||
bus_number=controller.busNumber,
|
||||
devices=controller.device
|
||||
)
|
||||
controller_index += 1
|
||||
|
||||
disk_index = 0
|
||||
for disk in vm_obj.config.hardware.device:
|
||||
if isinstance(disk, vim.vm.device.VirtualDisk):
|
||||
disks_info[disk_index] = dict(
|
||||
key=disk.key,
|
||||
label=disk.deviceInfo.label,
|
||||
summary=disk.deviceInfo.summary,
|
||||
backing_filename=disk.backing.fileName,
|
||||
backing_datastore=disk.backing.datastore.name,
|
||||
controller_key=disk.controllerKey,
|
||||
unit_number=disk.unitNumber,
|
||||
capacity_in_kb=disk.capacityInKB,
|
||||
capacity_in_bytes=disk.capacityInBytes,
|
||||
)
|
||||
if isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'FlatVer1'
|
||||
disks_info[disk_index]['backing_writethrough'] = disk.backing.writeThrough
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'FlatVer2'
|
||||
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
|
||||
disks_info[disk_index]['backing_thinprovisioned'] = bool(disk.backing.thinProvisioned)
|
||||
disks_info[disk_index]['backing_eagerlyscrub'] = bool(disk.backing.eagerlyScrub)
|
||||
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.LocalPMemBackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'LocalPMem'
|
||||
disks_info[disk_index]['backing_volumeuuid'] = disk.backing.volumeUUID
|
||||
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'PartitionedRawDiskVer2'
|
||||
disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
|
||||
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'RawDiskMappingVer1'
|
||||
disks_info[disk_index]['backing_devicename'] = disk.backing.deviceName
|
||||
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
|
||||
disks_info[disk_index]['backing_disk_mode'] = disk.backing.diskMode
|
||||
disks_info[disk_index]['backing_lunuuid'] = disk.backing.lunUuid
|
||||
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'RawDiskVer2'
|
||||
disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
|
||||
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'SeSparse'
|
||||
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
|
||||
disks_info[disk_index]['backing_disk_mode'] = disk.backing.diskMode
|
||||
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
|
||||
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer1BackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'SparseVer1'
|
||||
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
|
||||
disks_info[disk_index]['backing_disk_mode'] = disk.backing.diskMode
|
||||
disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
|
||||
disks_info[disk_index]['backing_split'] = bool(disk.backing.split)
|
||||
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
|
||||
|
||||
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer2BackingInfo):
|
||||
disks_info[disk_index]['backing_type'] = 'SparseVer2'
|
||||
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
|
||||
disks_info[disk_index]['backing_disk_mode'] = disk.backing.diskMode
|
||||
disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
|
||||
disks_info[disk_index]['backing_split'] = bool(disk.backing.split)
|
||||
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
|
||||
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
|
||||
|
||||
for controller_index in range(len(controller_info)):
|
||||
if controller_info[controller_index]['key'] == disks_info[disk_index]['controller_key']:
|
||||
disks_info[disk_index]['controller_bus_number'] = controller_info[controller_index]['bus_number']
|
||||
disks_info[disk_index]['controller_type'] = controller_info[controller_index]['controller_type']
|
||||
|
||||
disk_index += 1
|
||||
return disks_info
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str', required=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if module.params['folder']:
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['folder'] = module.params['folder'].rstrip('/')
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
# Check if the VM exists before continuing
|
||||
vm = pyv.get_vm()
|
||||
|
||||
if vm:
|
||||
# VM exists
|
||||
try:
|
||||
module.exit_json(guest_disk_info=pyv.gather_disk_info(vm))
|
||||
except Exception as exc:
|
||||
module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc))
|
||||
else:
|
||||
# We unable to find the virtual machine user specified
|
||||
# Bail out
|
||||
vm_id = (module.params.get('uuid') or module.params.get('moid') or module.params.get('name'))
|
||||
module.fail_json(msg="Unable to gather disk information for non-existing VM %s" % vm_id)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,460 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, Stéphane Travassac <stravassac@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_file_operation
|
||||
short_description: Files operation in a VMware guest operating system without network
|
||||
description:
|
||||
- Module to copy a file to a VM, fetch a file from a VM and create or delete a directory in the guest OS.
|
||||
version_added: "2.5"
|
||||
author:
|
||||
- Stéphane Travassac (@stravassac)
|
||||
notes:
|
||||
- Tested on vSphere 6
|
||||
- Only the first match against vm_id is used, even if there are multiple matches
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
- requests
|
||||
options:
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter hosting the virtual machine.
|
||||
- If set, it will help to speed up virtual machine search.
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- The cluster hosting the virtual machine.
|
||||
- If set, it will help to speed up virtual machine search.
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute path to find an existing guest or create the new guest.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- Used only if C(vm_id_type) is C(inventory_path).
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
- ' folder: vm/folder2'
|
||||
- ' folder: folder2'
|
||||
type: str
|
||||
vm_id:
|
||||
description:
|
||||
- Name of the virtual machine to work with.
|
||||
required: True
|
||||
type: str
|
||||
vm_id_type:
|
||||
description:
|
||||
- The VMware identification method by which the virtual machine will be identified.
|
||||
default: vm_name
|
||||
choices:
|
||||
- 'uuid'
|
||||
- 'instance_uuid'
|
||||
- 'dns_name'
|
||||
- 'inventory_path'
|
||||
- 'vm_name'
|
||||
type: str
|
||||
vm_username:
|
||||
description:
|
||||
- The user to login in to the virtual machine.
|
||||
required: True
|
||||
type: str
|
||||
vm_password:
|
||||
description:
|
||||
- The password used to login-in to the virtual machine.
|
||||
required: True
|
||||
type: str
|
||||
directory:
|
||||
description:
|
||||
- Create or delete a directory.
|
||||
- Can be used to create temp directory inside guest using mktemp operation.
|
||||
- mktemp sets variable C(dir) in the result with the name of the new directory.
|
||||
- mktemp operation option is added in version 2.8
|
||||
- 'Valid attributes are:'
|
||||
- ' operation (str): Valid values are: create, delete, mktemp'
|
||||
- ' path (str): directory path (required for create or remove)'
|
||||
- ' prefix (str): temporary directory prefix (required for mktemp)'
|
||||
- ' suffix (str): temporary directory suffix (required for mktemp)'
|
||||
- ' recurse (boolean): Not required, default (false)'
|
||||
required: False
|
||||
type: dict
|
||||
copy:
|
||||
description:
|
||||
- Copy file to vm without requiring network.
|
||||
- 'Valid attributes are:'
|
||||
- ' src: file source absolute or relative'
|
||||
- ' dest: file destination, path must be exist'
|
||||
- ' overwrite: False or True (not required, default False)'
|
||||
required: False
|
||||
type: dict
|
||||
fetch:
|
||||
description:
|
||||
- Get file from virtual machine without requiring network.
|
||||
- 'Valid attributes are:'
|
||||
- ' src: The file on the remote system to fetch. This I(must) be a file, not a directory'
|
||||
- ' dest: file destination on localhost, path must be exist'
|
||||
required: False
|
||||
type: dict
|
||||
version_added: 2.5
|
||||
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create directory inside a vm
|
||||
vmware_guest_file_operation:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
vm_id: "{{ guest_name }}"
|
||||
vm_username: "{{ guest_username }}"
|
||||
vm_password: "{{ guest_userpassword }}"
|
||||
directory:
|
||||
path: "/test"
|
||||
operation: create
|
||||
recurse: no
|
||||
delegate_to: localhost
|
||||
|
||||
- name: copy file to vm
|
||||
vmware_guest_file_operation:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
vm_id: "{{ guest_name }}"
|
||||
vm_username: "{{ guest_username }}"
|
||||
vm_password: "{{ guest_userpassword }}"
|
||||
copy:
|
||||
src: "files/test.zip"
|
||||
dest: "/root/test.zip"
|
||||
overwrite: False
|
||||
delegate_to: localhost
|
||||
|
||||
- name: fetch file from vm
|
||||
vmware_guest_file_operation:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
vm_id: "{{ guest_name }}"
|
||||
vm_username: "{{ guest_username }}"
|
||||
vm_password: "{{ guest_userpassword }}"
|
||||
fetch:
|
||||
src: "/root/test.zip"
|
||||
dest: "files/test.zip"
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import os
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils import urls
|
||||
from ansible.module_utils._text import to_bytes, to_native
|
||||
from ansible.module_utils.vmware import (PyVmomi, find_cluster_by_name, find_datacenter_by_name,
|
||||
find_vm_by_id, vmware_argument_spec)
|
||||
|
||||
|
||||
class VmwareGuestFileManager(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VmwareGuestFileManager, self).__init__(module)
|
||||
datacenter_name = module.params['datacenter']
|
||||
cluster_name = module.params['cluster']
|
||||
folder = module.params['folder']
|
||||
|
||||
datacenter = None
|
||||
if datacenter_name:
|
||||
datacenter = find_datacenter_by_name(self.content, datacenter_name)
|
||||
if not datacenter:
|
||||
module.fail_json(msg="Unable to find %(datacenter)s datacenter" % module.params)
|
||||
|
||||
cluster = None
|
||||
if cluster_name:
|
||||
cluster = find_cluster_by_name(self.content, cluster_name, datacenter)
|
||||
if not cluster:
|
||||
module.fail_json(msg="Unable to find %(cluster)s cluster" % module.params)
|
||||
|
||||
if module.params['vm_id_type'] == 'inventory_path':
|
||||
vm = find_vm_by_id(self.content, vm_id=module.params['vm_id'], vm_id_type="inventory_path", folder=folder)
|
||||
else:
|
||||
vm = find_vm_by_id(self.content,
|
||||
vm_id=module.params['vm_id'],
|
||||
vm_id_type=module.params['vm_id_type'],
|
||||
datacenter=datacenter,
|
||||
cluster=cluster)
|
||||
|
||||
if not vm:
|
||||
module.fail_json(msg='Unable to find virtual machine.')
|
||||
|
||||
self.vm = vm
|
||||
try:
|
||||
result = dict(changed=False)
|
||||
if module.params['directory']:
|
||||
result = self.directory()
|
||||
if module.params['copy']:
|
||||
result = self.copy()
|
||||
if module.params['fetch']:
|
||||
result = self.fetch()
|
||||
module.exit_json(**result)
|
||||
except vmodl.RuntimeFault as runtime_fault:
|
||||
module.fail_json(msg=to_native(runtime_fault.msg))
|
||||
except vmodl.MethodFault as method_fault:
|
||||
module.fail_json(msg=to_native(method_fault.msg))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
def directory(self):
|
||||
result = dict(changed=True, uuid=self.vm.summary.config.uuid)
|
||||
vm_username = self.module.params['vm_username']
|
||||
vm_password = self.module.params['vm_password']
|
||||
|
||||
recurse = bool(self.module.params['directory']['recurse'])
|
||||
operation = self.module.params['directory']['operation']
|
||||
path = self.module.params['directory']['path']
|
||||
prefix = self.module.params['directory']['prefix']
|
||||
suffix = self.module.params['directory']['suffix']
|
||||
creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
|
||||
file_manager = self.content.guestOperationsManager.fileManager
|
||||
if operation in ("create", "mktemp"):
|
||||
try:
|
||||
if operation == "create":
|
||||
file_manager.MakeDirectoryInGuest(vm=self.vm,
|
||||
auth=creds,
|
||||
directoryPath=path,
|
||||
createParentDirectories=recurse)
|
||||
else:
|
||||
newdir = file_manager.CreateTemporaryDirectoryInGuest(vm=self.vm, auth=creds,
|
||||
prefix=prefix, suffix=suffix)
|
||||
result['dir'] = newdir
|
||||
except vim.fault.FileAlreadyExists as file_already_exists:
|
||||
result['changed'] = False
|
||||
result['msg'] = "Guest directory %s already exist: %s" % (path,
|
||||
to_native(file_already_exists.msg))
|
||||
except vim.fault.GuestPermissionDenied as permission_denied:
|
||||
self.module.fail_json(msg="Permission denied for path %s : %s" % (path,
|
||||
to_native(permission_denied.msg)),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.InvalidGuestLogin as invalid_guest_login:
|
||||
self.module.fail_json(msg="Invalid guest login for user %s : %s" % (vm_username,
|
||||
to_native(invalid_guest_login.msg)),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
# other exceptions
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to Create directory into VM VMware exception : %s" % to_native(e),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
|
||||
if operation == "delete":
|
||||
try:
|
||||
file_manager.DeleteDirectoryInGuest(vm=self.vm, auth=creds, directoryPath=path,
|
||||
recursive=recurse)
|
||||
except vim.fault.FileNotFound as file_not_found:
|
||||
result['changed'] = False
|
||||
result['msg'] = "Guest directory %s not exists %s" % (path,
|
||||
to_native(file_not_found.msg))
|
||||
except vim.fault.FileFault as e:
|
||||
self.module.fail_json(msg="FileFault : %s" % e.msg,
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.GuestPermissionDenied as permission_denied:
|
||||
self.module.fail_json(msg="Permission denied for path %s : %s" % (path,
|
||||
to_native(permission_denied.msg)),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.InvalidGuestLogin as invalid_guest_login:
|
||||
self.module.fail_json(msg="Invalid guest login for user %s : %s" % (vm_username,
|
||||
to_native(invalid_guest_login.msg)),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
# other exceptions
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to Delete directory into Vm VMware exception : %s" % to_native(e),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
|
||||
return result
|
||||
|
||||
def fetch(self):
|
||||
result = dict(changed=True, uuid=self.vm.summary.config.uuid)
|
||||
vm_username = self.module.params['vm_username']
|
||||
vm_password = self.module.params['vm_password']
|
||||
hostname = self.module.params['hostname']
|
||||
dest = self.module.params["fetch"]['dest']
|
||||
src = self.module.params['fetch']['src']
|
||||
creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
|
||||
file_manager = self.content.guestOperationsManager.fileManager
|
||||
|
||||
try:
|
||||
fileTransferInfo = file_manager.InitiateFileTransferFromGuest(vm=self.vm, auth=creds,
|
||||
guestFilePath=src)
|
||||
url = fileTransferInfo.url
|
||||
url = url.replace("*", hostname)
|
||||
resp, info = urls.fetch_url(self.module, url, method="GET")
|
||||
try:
|
||||
with open(dest, "wb") as local_file:
|
||||
local_file.write(resp.read())
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="local file write exception : %s" % to_native(e),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.FileNotFound as file_not_found:
|
||||
self.module.fail_json(msg="Guest file %s does not exist : %s" % (src, to_native(file_not_found.msg)),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.FileFault as e:
|
||||
self.module.fail_json(msg="FileFault : %s" % to_native(e.msg),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.GuestPermissionDenied:
|
||||
self.module.fail_json(msg="Permission denied to fetch file %s" % src,
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.InvalidGuestLogin:
|
||||
self.module.fail_json(msg="Invalid guest login for user %s" % vm_username,
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
# other exceptions
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to Fetch file from Vm VMware exception : %s" % to_native(e),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
|
||||
return result
|
||||
|
||||
def copy(self):
|
||||
result = dict(changed=True, uuid=self.vm.summary.config.uuid)
|
||||
vm_username = self.module.params['vm_username']
|
||||
vm_password = self.module.params['vm_password']
|
||||
hostname = self.module.params['hostname']
|
||||
overwrite = self.module.params["copy"]["overwrite"]
|
||||
dest = self.module.params["copy"]['dest']
|
||||
src = self.module.params['copy']['src']
|
||||
b_src = to_bytes(src, errors='surrogate_or_strict')
|
||||
|
||||
if not os.path.exists(b_src):
|
||||
self.module.fail_json(msg="Source %s not found" % src)
|
||||
if not os.access(b_src, os.R_OK):
|
||||
self.module.fail_json(msg="Source %s not readable" % src)
|
||||
if os.path.isdir(b_src):
|
||||
self.module.fail_json(msg="copy does not support copy of directory: %s" % src)
|
||||
|
||||
data = None
|
||||
with open(b_src, "rb") as local_file:
|
||||
data = local_file.read()
|
||||
file_size = os.path.getsize(b_src)
|
||||
|
||||
creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
|
||||
file_attributes = vim.vm.guest.FileManager.FileAttributes()
|
||||
file_manager = self.content.guestOperationsManager.fileManager
|
||||
try:
|
||||
url = file_manager.InitiateFileTransferToGuest(vm=self.vm, auth=creds, guestFilePath=dest,
|
||||
fileAttributes=file_attributes, overwrite=overwrite,
|
||||
fileSize=file_size)
|
||||
url = url.replace("*", hostname)
|
||||
resp, info = urls.fetch_url(self.module, url, data=data, method="PUT")
|
||||
|
||||
status_code = info["status"]
|
||||
if status_code != 200:
|
||||
self.module.fail_json(msg='problem during file transfer, http message:%s' % info,
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.FileAlreadyExists:
|
||||
result['changed'] = False
|
||||
result['msg'] = "Guest file %s already exists" % dest
|
||||
return result
|
||||
except vim.fault.FileFault as e:
|
||||
self.module.fail_json(msg="FileFault:%s" % to_native(e.msg),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.GuestPermissionDenied as permission_denied:
|
||||
self.module.fail_json(msg="Permission denied to copy file into "
|
||||
"destination %s : %s" % (dest, to_native(permission_denied.msg)),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
except vim.fault.InvalidGuestLogin as invalid_guest_login:
|
||||
self.module.fail_json(msg="Invalid guest login for user"
|
||||
" %s : %s" % (vm_username, to_native(invalid_guest_login.msg)))
|
||||
# other exceptions
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to Copy file to Vm VMware exception : %s" % to_native(e),
|
||||
uuid=self.vm.summary.config.uuid)
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
datacenter=dict(type='str'),
|
||||
cluster=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
vm_id=dict(type='str', required=True),
|
||||
vm_id_type=dict(
|
||||
default='vm_name',
|
||||
type='str',
|
||||
choices=['inventory_path', 'uuid', 'instance_uuid', 'dns_name', 'vm_name']),
|
||||
vm_username=dict(type='str', required=True),
|
||||
vm_password=dict(type='str', no_log=True, required=True),
|
||||
directory=dict(
|
||||
type='dict',
|
||||
default=None,
|
||||
options=dict(
|
||||
operation=dict(required=True, type='str', choices=['create', 'delete', 'mktemp']),
|
||||
path=dict(required=False, type='str'),
|
||||
prefix=dict(required=False, type='str'),
|
||||
suffix=dict(required=False, type='str'),
|
||||
recurse=dict(required=False, type='bool', default=False)
|
||||
)
|
||||
),
|
||||
copy=dict(
|
||||
type='dict',
|
||||
default=None,
|
||||
options=dict(src=dict(required=True, type='str'),
|
||||
dest=dict(required=True, type='str'),
|
||||
overwrite=dict(required=False, type='bool', default=False)
|
||||
)
|
||||
),
|
||||
fetch=dict(
|
||||
type='dict',
|
||||
default=None,
|
||||
options=dict(
|
||||
src=dict(required=True, type='str'),
|
||||
dest=dict(required=True, type='str'),
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
|
||||
required_if=[['vm_id_type', 'inventory_path', ['folder']]],
|
||||
mutually_exclusive=[['directory', 'copy', 'fetch']],
|
||||
required_one_of=[['directory', 'copy', 'fetch']],
|
||||
)
|
||||
|
||||
if module.params['directory']:
|
||||
if module.params['directory']['operation'] in ('create', 'delete') and not module.params['directory']['path']:
|
||||
module.fail_json(msg='directory.path is required when operation is "create" or "delete"')
|
||||
if module.params['directory']['operation'] == 'mktemp' and not (module.params['directory']['prefix'] and module.params['directory']['suffix']):
|
||||
module.fail_json(msg='directory.prefix and directory.suffix are required when operation is "mktemp"')
|
||||
|
||||
if module.params['vm_id_type'] == 'inventory_path' and not module.params['folder']:
|
||||
module.fail_json(msg='Folder is required parameter when vm_id_type is inventory_path')
|
||||
|
||||
vmware_guest_file_manager = VmwareGuestFileManager(module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,154 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_find
|
||||
short_description: Find the folder path(s) for a virtual machine by name or UUID
|
||||
description:
|
||||
- Find the folder path(s) for a virtual machine by name or UUID
|
||||
version_added: 2.4
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.5
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the VM to work with.
|
||||
- This is required if C(uuid) parameter is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
|
||||
- This is required if C(name) parameter is not supplied.
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Find Guest's Folder using name
|
||||
vmware_guest_find:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
name: testvm
|
||||
delegate_to: localhost
|
||||
register: vm_folder
|
||||
|
||||
- name: Find Guest's Folder using UUID
|
||||
vmware_guest_find:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
uuid: 38c4c89c-b3d7-4ae6-ae4e-43c5118eae49
|
||||
delegate_to: localhost
|
||||
register: vm_folder
|
||||
'''
|
||||
|
||||
RETURN = r"""
|
||||
folders:
|
||||
description: List of folders for user specified virtual machine
|
||||
returned: on success
|
||||
type: list
|
||||
sample: [
|
||||
'/DC0/vm',
|
||||
]
|
||||
"""
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
self.name = self.params['name']
|
||||
self.uuid = self.params['uuid']
|
||||
self.use_instance_uuid = self.params['use_instance_uuid']
|
||||
|
||||
def getvm_folder_paths(self):
|
||||
results = []
|
||||
vms = []
|
||||
|
||||
if self.uuid:
|
||||
if self.use_instance_uuid:
|
||||
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="instance_uuid")
|
||||
else:
|
||||
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
|
||||
if vm_obj is None:
|
||||
self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
|
||||
vms = [vm_obj]
|
||||
|
||||
elif self.name:
|
||||
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
|
||||
for temp_vm_object in objects:
|
||||
if temp_vm_object.obj.name == self.name:
|
||||
vms.append(temp_vm_object.obj)
|
||||
|
||||
for vm in vms:
|
||||
folder_path = self.get_vm_path(self.content, vm)
|
||||
results.append(folder_path)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
required_one_of=[['name', 'uuid']],
|
||||
mutually_exclusive=[['name', 'uuid']],
|
||||
)
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
# Check if the VM exists before continuing
|
||||
folders = pyv.getvm_folder_paths()
|
||||
|
||||
# VM already exists
|
||||
if folders:
|
||||
try:
|
||||
module.exit_json(folders=folders)
|
||||
except Exception as exc:
|
||||
module.fail_json(msg="Folder enumeration failed with exception %s" % to_native(exc))
|
||||
else:
|
||||
module.fail_json(msg="Unable to find folders for virtual machine %s" % (module.params.get('name') or
|
||||
module.params.get('uuid')))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,316 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# This module is also sponsored by E.T.A.I. (www.etai.fr)
|
||||
# Copyright (C) 2018 James E. King III (@jeking3) <jking@apache.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_info
|
||||
short_description: Gather info about a single VM
|
||||
description:
|
||||
- Gather information about a single VM on a VMware ESX cluster.
|
||||
- This module was called C(vmware_guest_facts) before Ansible 2.9. The usage did not change.
|
||||
version_added: 2.3
|
||||
author:
|
||||
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
|
||||
notes:
|
||||
- Tested on vSphere 5.5, 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the VM to work with
|
||||
- This is required if C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
name_match:
|
||||
description:
|
||||
- If multiple VMs matching the name, use the first or last found
|
||||
default: 'first'
|
||||
choices: ['first', 'last']
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to manage if known, this is VMware's unique identifier.
|
||||
- This is required if C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- This is required if name is supplied.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- Destination datacenter for the deploy operation
|
||||
required: True
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- Whether to show tags or not.
|
||||
- If set C(True), shows tag information.
|
||||
- If set C(False), hides tags information.
|
||||
- vSphere Automation SDK and vCloud Suite SDK is required.
|
||||
default: 'no'
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
schema:
|
||||
description:
|
||||
- Specify the output schema desired.
|
||||
- The 'summary' output schema is the legacy output from the module
|
||||
- The 'vsphere' output schema is the vSphere API class definition
|
||||
which requires pyvmomi>6.7.1
|
||||
choices: ['summary', 'vsphere']
|
||||
default: 'summary'
|
||||
type: str
|
||||
version_added: '2.8'
|
||||
properties:
|
||||
description:
|
||||
- Specify the properties to retrieve.
|
||||
- If not specified, all properties are retrieved (deeply).
|
||||
- Results are returned in a structure identical to the vsphere API.
|
||||
- 'Example:'
|
||||
- ' properties: ['
|
||||
- ' "config.hardware.memoryMB",'
|
||||
- ' "config.hardware.numCPU",'
|
||||
- ' "guest.disk",'
|
||||
- ' "overallStatus"'
|
||||
- ' ]'
|
||||
- Only valid when C(schema) is C(vsphere).
|
||||
type: list
|
||||
required: False
|
||||
version_added: '2.8'
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather info from standalone ESXi server having datacenter as 'ha-datacenter'
|
||||
vmware_guest_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: ha-datacenter
|
||||
validate_certs: no
|
||||
uuid: 421e4592-c069-924d-ce20-7e7533fab926
|
||||
delegate_to: localhost
|
||||
register: info
|
||||
|
||||
- name: Gather some info from a guest using the vSphere API output schema
|
||||
vmware_guest_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
name: "{{ vm_name }}"
|
||||
schema: "vsphere"
|
||||
properties: ["config.hardware.memoryMB", "guest.disk", "overallStatus"]
|
||||
delegate_to: localhost
|
||||
register: info
|
||||
|
||||
- name: Gather some information about a guest using MoID
|
||||
vmware_guest_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
moid: vm-42
|
||||
schema: "vsphere"
|
||||
properties: ["config.hardware.memoryMB", "guest.disk", "overallStatus"]
|
||||
delegate_to: localhost
|
||||
register: vm_moid_info
|
||||
|
||||
- name: Gather Managed object ID (moid) from a guest using the vSphere API output schema for REST Calls
|
||||
vmware_guest_info:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
name: "{{ vm_name }}"
|
||||
schema: "vsphere"
|
||||
properties:
|
||||
- _moId
|
||||
delegate_to: localhost
|
||||
register: moid_info
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
instance:
|
||||
description: metadata about the virtual machine
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"annotation": "",
|
||||
"current_snapshot": null,
|
||||
"customvalues": {},
|
||||
"guest_consolidation_needed": false,
|
||||
"guest_question": null,
|
||||
"guest_tools_status": "guestToolsNotRunning",
|
||||
"guest_tools_version": "10247",
|
||||
"hw_cores_per_socket": 1,
|
||||
"hw_datastores": [
|
||||
"ds_226_3"
|
||||
],
|
||||
"hw_esxi_host": "10.76.33.226",
|
||||
"hw_eth0": {
|
||||
"addresstype": "assigned",
|
||||
"ipaddresses": null,
|
||||
"label": "Network adapter 1",
|
||||
"macaddress": "00:50:56:87:a5:9a",
|
||||
"macaddress_dash": "00-50-56-87-a5-9a",
|
||||
"portgroup_key": null,
|
||||
"portgroup_portkey": null,
|
||||
"summary": "VM Network"
|
||||
},
|
||||
"hw_files": [
|
||||
"[ds_226_3] ubuntu_t/ubuntu_t.vmx",
|
||||
"[ds_226_3] ubuntu_t/ubuntu_t.nvram",
|
||||
"[ds_226_3] ubuntu_t/ubuntu_t.vmsd",
|
||||
"[ds_226_3] ubuntu_t/vmware.log",
|
||||
"[ds_226_3] u0001/u0001.vmdk"
|
||||
],
|
||||
"hw_folder": "/DC0/vm/Discovered virtual machine",
|
||||
"hw_guest_full_name": null,
|
||||
"hw_guest_ha_state": null,
|
||||
"hw_guest_id": null,
|
||||
"hw_interfaces": [
|
||||
"eth0"
|
||||
],
|
||||
"hw_is_template": false,
|
||||
"hw_memtotal_mb": 1024,
|
||||
"hw_name": "ubuntu_t",
|
||||
"hw_power_status": "poweredOff",
|
||||
"hw_processor_count": 1,
|
||||
"hw_product_uuid": "4207072c-edd8-3bd5-64dc-903fd3a0db04",
|
||||
"hw_version": "vmx-13",
|
||||
"instance_uuid": "5007769d-add3-1e12-f1fe-225ae2a07caf",
|
||||
"ipv4": null,
|
||||
"ipv6": null,
|
||||
"module_hw": true,
|
||||
"snapshots": [],
|
||||
"tags": [
|
||||
"backup"
|
||||
],
|
||||
"vnc": {},
|
||||
"moid": "vm-42",
|
||||
"vimref": "vim.VirtualMachine:vm-42"
|
||||
}
|
||||
"""
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
from ansible.module_utils.vmware_rest_client import VmwareRestClient
|
||||
try:
|
||||
from com.vmware.vapi.std_client import DynamicID
|
||||
HAS_VSPHERE = True
|
||||
except ImportError:
|
||||
HAS_VSPHERE = False
|
||||
|
||||
|
||||
class VmwareTag(VmwareRestClient):
|
||||
def __init__(self, module):
|
||||
super(VmwareTag, self).__init__(module)
|
||||
self.tag_service = self.api_client.tagging.Tag
|
||||
self.tag_association_svc = self.api_client.tagging.TagAssociation
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
name_match=dict(type='str', choices=['first', 'last'], default='first'),
|
||||
uuid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
moid=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str', required=True),
|
||||
tags=dict(type='bool', default=False),
|
||||
schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
|
||||
properties=dict(type='list')
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
required_one_of=[['name', 'uuid', 'moid']],
|
||||
supports_check_mode=True)
|
||||
if module._name == 'vmware_guest_facts':
|
||||
module.deprecate("The 'vmware_guest_facts' module has been renamed to 'vmware_guest_info'", version='2.13')
|
||||
|
||||
if module.params.get('folder'):
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['folder'] = module.params['folder'].rstrip('/')
|
||||
|
||||
if module.params['schema'] != 'vsphere' and module.params.get('properties'):
|
||||
module.fail_json(msg="The option 'properties' is only valid when the schema is 'vsphere'")
|
||||
|
||||
pyv = PyVmomi(module)
|
||||
# Check if the VM exists before continuing
|
||||
vm = pyv.get_vm()
|
||||
|
||||
# VM already exists
|
||||
if vm:
|
||||
try:
|
||||
if module.params['schema'] == 'summary':
|
||||
instance = pyv.gather_facts(vm)
|
||||
else:
|
||||
instance = pyv.to_json(vm, module.params['properties'])
|
||||
if module.params.get('tags'):
|
||||
if not HAS_VSPHERE:
|
||||
module.fail_json(msg="Unable to find 'vCloud Suite SDK' Python library which is required."
|
||||
" Please refer this URL for installation steps"
|
||||
" - https://code.vmware.com/web/sdk/60/vcloudsuite-python")
|
||||
|
||||
vm_rest_client = VmwareTag(module)
|
||||
instance.update(
|
||||
tags=vm_rest_client.get_vm_tags(vm_rest_client.tag_service,
|
||||
vm_rest_client.tag_association_svc,
|
||||
vm_mid=vm._moId)
|
||||
)
|
||||
module.exit_json(instance=instance)
|
||||
except Exception as exc:
|
||||
module.fail_json(msg="Information gathering failed with exception %s" % to_text(exc))
|
||||
else:
|
||||
vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
|
||||
module.fail_json(msg="Unable to gather information for non-existing VM %s" % vm_id)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,260 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Jose Angel Munoz <josea.munoz () gmail.com>
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_guest_move
|
||||
short_description: Moves virtual machines in vCenter
|
||||
description:
|
||||
- This module can be used to move virtual machines between folders.
|
||||
version_added: '2.7'
|
||||
author:
|
||||
- Jose Angel Munoz (@imjoseangel)
|
||||
notes:
|
||||
- Tested on vSphere 5.5 and vSphere 6.5
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the existing virtual machine to move.
|
||||
- This is required if C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
|
||||
- This is required if C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
name_match:
|
||||
description:
|
||||
- If multiple virtual machines matching the name, use the first or last found.
|
||||
default: 'first'
|
||||
choices: [ first, last ]
|
||||
type: str
|
||||
dest_folder:
|
||||
description:
|
||||
- Absolute path to move an existing guest
|
||||
- The dest_folder should include the datacenter. ESX's datacenter is ha-datacenter.
|
||||
- This parameter is case sensitive.
|
||||
- 'Examples:'
|
||||
- ' dest_folder: /ha-datacenter/vm'
|
||||
- ' dest_folder: ha-datacenter/vm'
|
||||
- ' dest_folder: /datacenter1/vm'
|
||||
- ' dest_folder: datacenter1/vm'
|
||||
- ' dest_folder: /datacenter1/vm/folder1'
|
||||
- ' dest_folder: datacenter1/vm/folder1'
|
||||
- ' dest_folder: /folder1/datacenter1/vm'
|
||||
- ' dest_folder: folder1/datacenter1/vm'
|
||||
- ' dest_folder: /folder1/datacenter1/vm/folder2'
|
||||
required: True
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- Destination datacenter for the move operation
|
||||
required: True
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Move Virtual Machine
|
||||
vmware_guest_move:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: datacenter
|
||||
validate_certs: no
|
||||
name: testvm-1
|
||||
dest_folder: "/{{ datacenter }}/vm"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Move Virtual Machine using MoID
|
||||
vmware_guest_move:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: datacenter
|
||||
validate_certs: no
|
||||
moid: vm-42
|
||||
dest_folder: "/{{ datacenter }}/vm"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Get VM UUID
|
||||
vmware_guest_facts:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter }}"
|
||||
folder: "/{{datacenter}}/vm"
|
||||
name: "{{ vm_name }}"
|
||||
delegate_to: localhost
|
||||
register: vm_facts
|
||||
|
||||
- name: Get UUID from previous task and pass it to this task
|
||||
vmware_guest_move:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter }}"
|
||||
uuid: "{{ vm_facts.instance.hw_product_uuid }}"
|
||||
dest_folder: "/DataCenter/vm/path/to/new/folder/where/we/want"
|
||||
delegate_to: localhost
|
||||
register: facts
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
instance:
|
||||
description: metadata about the virtual machine
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"annotation": null,
|
||||
"current_snapshot": null,
|
||||
"customvalues": {},
|
||||
"guest_consolidation_needed": false,
|
||||
"guest_question": null,
|
||||
"guest_tools_status": null,
|
||||
"guest_tools_version": "0",
|
||||
"hw_cores_per_socket": 1,
|
||||
"hw_datastores": [
|
||||
"LocalDS_0"
|
||||
],
|
||||
"hw_esxi_host": "DC0_H0",
|
||||
"hw_eth0": {
|
||||
"addresstype": "generated",
|
||||
"ipaddresses": null,
|
||||
"label": "ethernet-0",
|
||||
"macaddress": "00:0c:29:6b:34:2c",
|
||||
"macaddress_dash": "00-0c-29-6b-34-2c",
|
||||
"summary": "DVSwitch: 43cdd1db-1ef7-4016-9bbe-d96395616199"
|
||||
},
|
||||
"hw_files": [
|
||||
"[LocalDS_0] DC0_H0_VM0/DC0_H0_VM0.vmx"
|
||||
],
|
||||
"hw_folder": "/F0/DC0/vm/F0",
|
||||
"hw_guest_full_name": null,
|
||||
"hw_guest_ha_state": null,
|
||||
"hw_guest_id": "otherGuest",
|
||||
"hw_interfaces": [
|
||||
"eth0"
|
||||
],
|
||||
"hw_is_template": false,
|
||||
"hw_memtotal_mb": 32,
|
||||
"hw_name": "DC0_H0_VM0",
|
||||
"hw_power_status": "poweredOn",
|
||||
"hw_processor_count": 1,
|
||||
"hw_product_uuid": "581c2808-64fb-45ee-871f-6a745525cb29",
|
||||
"instance_uuid": "8bcb0b6e-3a7d-4513-bf6a-051d15344352",
|
||||
"ipv4": null,
|
||||
"ipv6": null,
|
||||
"module_hw": true,
|
||||
"snapshots": []
|
||||
}
|
||||
"""
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
name_match=dict(
|
||||
type='str', choices=['first', 'last'], default='first'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
dest_folder=dict(type='str', required=True),
|
||||
datacenter=dict(type='str', required=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
mutually_exclusive=[
|
||||
['name', 'uuid', 'moid']
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['dest_folder'] = module.params['dest_folder'].rstrip('/')
|
||||
pyv = PyVmomiHelper(module)
|
||||
search_index = pyv.content.searchIndex
|
||||
|
||||
# Check if the VM exists before continuing
|
||||
vm = pyv.get_vm()
|
||||
|
||||
# VM exists
|
||||
if vm:
|
||||
try:
|
||||
vm_path = pyv.get_vm_path(pyv.content, vm).lstrip('/')
|
||||
if module.params['name']:
|
||||
vm_name = module.params['name']
|
||||
else:
|
||||
vm_name = vm.name
|
||||
|
||||
vm_full = vm_path + '/' + vm_name
|
||||
folder = search_index.FindByInventoryPath(module.params['dest_folder'])
|
||||
if folder is None:
|
||||
module.fail_json(msg="Folder name and/or path does not exist")
|
||||
vm_to_move = search_index.FindByInventoryPath(vm_full)
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, instance=pyv.gather_facts(vm))
|
||||
if vm_path != module.params['dest_folder'].lstrip('/'):
|
||||
move_task = folder.MoveInto([vm_to_move])
|
||||
changed, err = wait_for_task(move_task)
|
||||
if changed:
|
||||
module.exit_json(
|
||||
changed=True, instance=pyv.gather_facts(vm))
|
||||
else:
|
||||
module.exit_json(instance=pyv.gather_facts(vm))
|
||||
except Exception as exc:
|
||||
module.fail_json(msg="Failed to move VM with exception %s" %
|
||||
to_native(exc))
|
||||
else:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=False)
|
||||
vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
|
||||
module.fail_json(msg="Unable to find VM %s to move to %s" % (vm_id, module.params.get('dest_folder')))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,594 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Ansible Project
|
||||
# Copyright: (c) 2019, Diane Wang <dianew@vmware.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_network
|
||||
short_description: Manage network adapters of specified virtual machine in given vCenter infrastructure
|
||||
description:
|
||||
- This module is used to add, reconfigure, remove network adapter of given virtual machine.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Diane Wang (@Tomorrow9) <dianew@vmware.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.0, 6.5 and 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine.
|
||||
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to gather info if known, this is VMware's unique identifier.
|
||||
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: False
|
||||
type: bool
|
||||
version_added: '2.10'
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- This is a required parameter, only if multiple VMs are found with same name.
|
||||
- The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- The name of cluster where the virtual machine will run.
|
||||
- This is a required parameter, if C(esxi_hostname) is not set.
|
||||
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- The ESXi hostname where the virtual machine will run.
|
||||
- This is a required parameter, if C(cluster) is not set.
|
||||
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
|
||||
type: str
|
||||
datacenter:
|
||||
default: ha-datacenter
|
||||
description:
|
||||
- The datacenter name to which virtual machine belongs to.
|
||||
type: str
|
||||
gather_network_info:
|
||||
description:
|
||||
- If set to C(True), return settings of all network adapters, other parameters are ignored.
|
||||
- If set to C(False), will add, reconfigure or remove network adapters according to the parameters in C(networks).
|
||||
type: bool
|
||||
default: False
|
||||
aliases: [ gather_network_facts ]
|
||||
networks:
|
||||
type: list
|
||||
description:
|
||||
- A list of network adapters.
|
||||
- C(mac) or C(label) or C(device_type) is required to reconfigure or remove an existing network adapter.
|
||||
- 'If there are multiple network adapters with the same C(device_type), you should set C(label) or C(mac) to match
|
||||
one of them, or will apply changes on all network adapters with the C(device_type) specified.'
|
||||
- 'C(mac), C(label), C(device_type) is the order of precedence from greatest to least if all set.'
|
||||
- 'Valid attributes are:'
|
||||
- ' - C(mac) (string): MAC address of the existing network adapter to be reconfigured or removed.'
|
||||
- ' - C(label) (string): Label of the existing network adapter to be reconfigured or removed, e.g., "Network adapter 1".'
|
||||
- ' - C(device_type) (string): Valid virtual network device types are:
|
||||
C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov).
|
||||
Used to add new network adapter, reconfigure or remove the existing network adapter with this type.
|
||||
If C(mac) and C(label) not specified or not find network adapter by C(mac) or C(label) will use this parameter.'
|
||||
- ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
|
||||
When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
|
||||
- ' - C(vlan) (integer): VLAN number for this interface.'
|
||||
- ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
|
||||
This value is required if multiple distributed portgroups exists with the same name.'
|
||||
- ' - C(state) (string): State of the network adapter.'
|
||||
- ' If set to C(present), then will do reconfiguration for the specified network adapter.'
|
||||
- ' If set to C(new), then will add the specified network adapter.'
|
||||
- ' If set to C(absent), then will remove this network adapter.'
|
||||
- ' - C(manual_mac) (string): Manual specified MAC address of the network adapter when creating, or reconfiguring.
|
||||
If not specified when creating new network adapter, mac address will be generated automatically.
|
||||
When reconfigure MAC address, VM should be in powered off state.'
|
||||
- ' - C(connected) (bool): Indicates that virtual network adapter connects to the associated virtual machine.'
|
||||
- ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on.'
|
||||
- ' - C(directpath_io) (bool): If set, Universal Pass-Through (UPT or DirectPath I/O) will be enabled on the network adapter.
|
||||
UPT is only compatible for Vmxnet3 adapter.'
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Change network adapter settings of virtual machine
|
||||
vmware_guest_network:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
name: test-vm
|
||||
gather_network_info: false
|
||||
networks:
|
||||
- name: "VM Network"
|
||||
state: new
|
||||
manual_mac: "00:50:56:11:22:33"
|
||||
- state: present
|
||||
device_type: e1000e
|
||||
manual_mac: "00:50:56:44:55:66"
|
||||
- state: present
|
||||
label: "Network adapter 3"
|
||||
connected: false
|
||||
- state: absent
|
||||
mac: "00:50:56:44:55:77"
|
||||
delegate_to: localhost
|
||||
register: network_info
|
||||
|
||||
- name: Change network adapter settings of virtual machine using MoID
|
||||
vmware_guest_network:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
moid: vm-42
|
||||
gather_network_info: false
|
||||
networks:
|
||||
- state: absent
|
||||
mac: "00:50:56:44:55:77"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Change network adapter settings of virtual machine using instance UUID
|
||||
vmware_guest_network:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
uuid: 5003b4f5-c705-2f37-ccf6-dfc0b40afeb7
|
||||
use_instance_uuid: True
|
||||
gather_network_info: false
|
||||
networks:
|
||||
- state: absent
|
||||
mac: "00:50:56:44:55:77"
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable DirectPath I/O on a Vmxnet3 adapter
|
||||
vmware_guest_network:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
validate_certs: no
|
||||
name: test-vm
|
||||
gather_network_info: false
|
||||
networks:
|
||||
- state: present
|
||||
mac: "aa:50:56:58:59:61"
|
||||
directpath_io: True
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
network_data:
|
||||
description: metadata about the virtual machine's network adapter after managing them
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"0": {
|
||||
"label": "Network Adapter 1",
|
||||
"name": "VM Network",
|
||||
"device_type": "E1000E",
|
||||
"directpath_io": "N/A",
|
||||
"mac_addr": "00:50:56:89:dc:05",
|
||||
"unit_number": 7,
|
||||
"wake_onlan": false,
|
||||
"allow_guest_ctl": true,
|
||||
"connected": true,
|
||||
"start_connected": true,
|
||||
},
|
||||
"1": {
|
||||
"label": "Network Adapter 2",
|
||||
"name": "VM Network",
|
||||
"device_type": "VMXNET3",
|
||||
"directpath_io": true,
|
||||
"mac_addr": "00:50:56:8d:93:8c",
|
||||
"unit_number": 8,
|
||||
"start_connected": true,
|
||||
"wake_on_lan": true,
|
||||
"connected": true,
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.network import is_mac
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, get_all_objs, get_parent_datacenter
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
self.change_detected = False
|
||||
self.config_spec = vim.vm.ConfigSpec()
|
||||
self.config_spec.deviceChange = []
|
||||
self.nic_device_type = dict(
|
||||
pcnet32=vim.vm.device.VirtualPCNet32,
|
||||
vmxnet2=vim.vm.device.VirtualVmxnet2,
|
||||
vmxnet3=vim.vm.device.VirtualVmxnet3,
|
||||
e1000=vim.vm.device.VirtualE1000,
|
||||
e1000e=vim.vm.device.VirtualE1000e,
|
||||
sriov=vim.vm.device.VirtualSriovEthernetCard,
|
||||
)
|
||||
|
||||
def get_device_type(self, device_type=None):
|
||||
""" Get network adapter device type """
|
||||
if device_type and device_type in list(self.nic_device_type.keys()):
|
||||
return self.nic_device_type[device_type]()
|
||||
else:
|
||||
self.module.fail_json(msg='Invalid network device_type %s' % device_type)
|
||||
|
||||
def get_network_device(self, vm=None, mac=None, device_type=None, device_label=None):
|
||||
"""
|
||||
Get network adapter
|
||||
"""
|
||||
nic_devices = []
|
||||
nic_device = None
|
||||
if vm is None:
|
||||
if device_type:
|
||||
return nic_devices
|
||||
else:
|
||||
return nic_device
|
||||
|
||||
for device in vm.config.hardware.device:
|
||||
if mac:
|
||||
if isinstance(device, vim.vm.device.VirtualEthernetCard):
|
||||
if device.macAddress == mac:
|
||||
nic_device = device
|
||||
break
|
||||
elif device_type:
|
||||
if isinstance(device, self.nic_device_type[device_type]):
|
||||
nic_devices.append(device)
|
||||
elif device_label:
|
||||
if isinstance(device, vim.vm.device.VirtualEthernetCard):
|
||||
if device.deviceInfo.label == device_label:
|
||||
nic_device = device
|
||||
break
|
||||
if device_type:
|
||||
return nic_devices
|
||||
else:
|
||||
return nic_device
|
||||
|
||||
def get_network_device_by_mac(self, vm=None, mac=None):
|
||||
""" Get network adapter with the specified mac address"""
|
||||
return self.get_network_device(vm=vm, mac=mac)
|
||||
|
||||
def get_network_devices_by_type(self, vm=None, device_type=None):
|
||||
""" Get network adapter list with the name type """
|
||||
return self.get_network_device(vm=vm, device_type=device_type)
|
||||
|
||||
def get_network_device_by_label(self, vm=None, device_label=None):
|
||||
""" Get network adapter with the specified label """
|
||||
return self.get_network_device(vm=vm, device_label=device_label)
|
||||
|
||||
def create_network_adapter(self, device_info):
|
||||
nic = vim.vm.device.VirtualDeviceSpec()
|
||||
nic.device = self.get_device_type(device_type=device_info.get('device_type', 'vmxnet3'))
|
||||
nic.device.deviceInfo = vim.Description()
|
||||
network_object = self.find_network_by_name(network_name=device_info['name'])[0]
|
||||
if network_object:
|
||||
if hasattr(network_object, 'portKeys'):
|
||||
# DistributedVirtualPortGroup
|
||||
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
|
||||
nic.device.backing.port = vim.dvs.PortConnection()
|
||||
nic.device.backing.port.switchUuid = network_object.config.distributedVirtualSwitch.uuid
|
||||
nic.device.backing.port.portgroupKey = network_object.key
|
||||
elif isinstance(network_object, vim.OpaqueNetwork):
|
||||
# NSX-T Logical Switch
|
||||
nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
|
||||
network_id = network_object.summary.opaqueNetworkId
|
||||
nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
|
||||
nic.device.backing.opaqueNetworkId = network_id
|
||||
nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
|
||||
else:
|
||||
# Standard vSwitch
|
||||
nic.device.deviceInfo.summary = device_info['name']
|
||||
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
|
||||
nic.device.backing.deviceName = device_info['name']
|
||||
nic.device.backing.network = network_object
|
||||
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
|
||||
nic.device.connectable.startConnected = device_info.get('start_connected', True)
|
||||
nic.device.connectable.allowGuestControl = True
|
||||
nic.device.connectable.connected = device_info.get('connected', True)
|
||||
if 'manual_mac' in device_info:
|
||||
nic.device.addressType = 'manual'
|
||||
nic.device.macAddress = device_info['manual_mac']
|
||||
else:
|
||||
nic.device.addressType = 'generated'
|
||||
if 'directpath_io' in device_info:
|
||||
if isinstance(nic.device, vim.vm.device.VirtualVmxnet3):
|
||||
nic.device.uptCompatibilityEnabled = device_info['directpath_io']
|
||||
else:
|
||||
self.module.fail_json(msg='UPT is only compatible for Vmxnet3 adapter.'
|
||||
+ ' Clients can set this property enabled or disabled if ethernet virtual device is Vmxnet3.')
|
||||
|
||||
return nic
|
||||
|
||||
def get_network_info(self, vm_obj):
|
||||
network_info = dict()
|
||||
if vm_obj is None:
|
||||
return network_info
|
||||
|
||||
nic_index = 0
|
||||
for nic in vm_obj.config.hardware.device:
|
||||
nic_type = None
|
||||
directpath_io = 'N/A'
|
||||
if isinstance(nic, vim.vm.device.VirtualPCNet32):
|
||||
nic_type = 'PCNet32'
|
||||
elif isinstance(nic, vim.vm.device.VirtualVmxnet2):
|
||||
nic_type = 'VMXNET2'
|
||||
elif isinstance(nic, vim.vm.device.VirtualVmxnet3):
|
||||
nic_type = 'VMXNET3'
|
||||
directpath_io = nic.uptCompatibilityEnabled
|
||||
elif isinstance(nic, vim.vm.device.VirtualE1000):
|
||||
nic_type = 'E1000'
|
||||
elif isinstance(nic, vim.vm.device.VirtualE1000e):
|
||||
nic_type = 'E1000E'
|
||||
elif isinstance(nic, vim.vm.device.VirtualSriovEthernetCard):
|
||||
nic_type = 'SriovEthernetCard'
|
||||
if nic_type is not None:
|
||||
network_info[nic_index] = dict(
|
||||
device_type=nic_type,
|
||||
label=nic.deviceInfo.label,
|
||||
name=nic.deviceInfo.summary,
|
||||
mac_addr=nic.macAddress,
|
||||
unit_number=nic.unitNumber,
|
||||
wake_onlan=nic.wakeOnLanEnabled,
|
||||
allow_guest_ctl=nic.connectable.allowGuestControl,
|
||||
connected=nic.connectable.connected,
|
||||
start_connected=nic.connectable.startConnected,
|
||||
directpath_io=directpath_io
|
||||
)
|
||||
nic_index += 1
|
||||
|
||||
return network_info
|
||||
|
||||
def sanitize_network_params(self):
|
||||
network_list = []
|
||||
valid_state = ['new', 'present', 'absent']
|
||||
if len(self.params['networks']) != 0:
|
||||
for network in self.params['networks']:
|
||||
if 'state' not in network or network['state'].lower() not in valid_state:
|
||||
self.module.fail_json(msg="Network adapter state not specified or invalid: '%s', valid values: "
|
||||
"%s" % (network.get('state', ''), valid_state))
|
||||
# add new network adapter but no name specified
|
||||
if network['state'].lower() == 'new' and 'name' not in network and 'vlan' not in network:
|
||||
self.module.fail_json(msg="Please specify at least network name or VLAN name for adding new network adapter.")
|
||||
if network['state'].lower() == 'new' and 'mac' in network:
|
||||
self.module.fail_json(msg="networks.mac is used for vNIC reconfigure, but networks.state is set to 'new'.")
|
||||
if network['state'].lower() == 'present' and 'mac' not in network and 'label' not in network and 'device_type' not in network:
|
||||
self.module.fail_json(msg="Should specify 'mac', 'label' or 'device_type' parameter to reconfigure network adapter")
|
||||
if 'connected' in network:
|
||||
if not isinstance(network['connected'], bool):
|
||||
self.module.fail_json(msg="networks.connected parameter should be boolean.")
|
||||
if network['state'].lower() == 'new' and not network['connected']:
|
||||
network['start_connected'] = False
|
||||
if 'start_connected' in network:
|
||||
if not isinstance(network['start_connected'], bool):
|
||||
self.module.fail_json(msg="networks.start_connected parameter should be boolean.")
|
||||
if network['state'].lower() == 'new' and not network['start_connected']:
|
||||
network['connected'] = False
|
||||
# specified network does not exist
|
||||
if 'name' in network and not self.network_exists_by_name(network['name']):
|
||||
self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
|
||||
elif 'vlan' in network:
|
||||
objects = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
|
||||
dvps = [x for x in objects if to_text(get_parent_datacenter(x).name) == to_text(self.params['datacenter'])]
|
||||
for dvp in dvps:
|
||||
if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
|
||||
isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
|
||||
str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
|
||||
network['name'] = dvp.config.name
|
||||
break
|
||||
if 'dvswitch_name' in network and \
|
||||
dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
|
||||
dvp.config.name == network['vlan']:
|
||||
network['name'] = dvp.config.name
|
||||
break
|
||||
if dvp.config.name == network['vlan']:
|
||||
network['name'] = dvp.config.name
|
||||
break
|
||||
else:
|
||||
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
|
||||
|
||||
if 'device_type' in network and network['device_type'] not in list(self.nic_device_type.keys()):
|
||||
self.module.fail_json(msg="Device type specified '%s' is invalid. "
|
||||
"Valid types %s " % (network['device_type'], list(self.nic_device_type.keys())))
|
||||
|
||||
if ('mac' in network and not is_mac(network['mac'])) or \
|
||||
('manual_mac' in network and not is_mac(network['manual_mac'])):
|
||||
self.module.fail_json(msg="Device MAC address '%s' or manual set MAC address %s is invalid. "
|
||||
"Please provide correct MAC address." % (network['mac'], network['manual_mac']))
|
||||
|
||||
network_list.append(network)
|
||||
|
||||
return network_list
|
||||
|
||||
def get_network_config_spec(self, vm_obj, network_list):
|
||||
# create network adapter config spec for adding, editing, removing
|
||||
for network in network_list:
|
||||
# add new network adapter
|
||||
if network['state'].lower() == 'new':
|
||||
nic_spec = self.create_network_adapter(network)
|
||||
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
|
||||
self.change_detected = True
|
||||
self.config_spec.deviceChange.append(nic_spec)
|
||||
# reconfigure network adapter or remove network adapter
|
||||
else:
|
||||
nic_devices = []
|
||||
if 'mac' in network:
|
||||
nic = self.get_network_device_by_mac(vm_obj, mac=network['mac'])
|
||||
if nic is not None:
|
||||
nic_devices.append(nic)
|
||||
if 'label' in network and len(nic_devices) == 0:
|
||||
nic = self.get_network_device_by_label(vm_obj, device_label=network['label'])
|
||||
if nic is not None:
|
||||
nic_devices.append(nic)
|
||||
if 'device_type' in network and len(nic_devices) == 0:
|
||||
nic_devices = self.get_network_devices_by_type(vm_obj, device_type=network['device_type'])
|
||||
if len(nic_devices) != 0:
|
||||
for nic_device in nic_devices:
|
||||
nic_spec = vim.vm.device.VirtualDeviceSpec()
|
||||
if network['state'].lower() == 'present':
|
||||
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
|
||||
nic_spec.device = nic_device
|
||||
if 'start_connected' in network and nic_device.connectable.startConnected != network['start_connected']:
|
||||
nic_device.connectable.startConnected = network['start_connected']
|
||||
self.change_detected = True
|
||||
if 'connected' in network and nic_device.connectable.connected != network['connected']:
|
||||
nic_device.connectable.connected = network['connected']
|
||||
self.change_detected = True
|
||||
if 'name' in network:
|
||||
network_object = self.find_network_by_name(network_name=network['name'])[0]
|
||||
if network_object and hasattr(network_object, 'portKeys') and hasattr(nic_spec.device.backing, 'port'):
|
||||
if network_object.config.distributedVirtualSwitch.uuid != nic_spec.device.backing.port.switchUuid:
|
||||
# DistributedVirtualPortGroup
|
||||
nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
|
||||
nic_spec.device.backing.port = vim.dvs.PortConnection()
|
||||
nic_spec.device.backing.port.switchUuid = network_object.config.distributedVirtualSwitch.uuid
|
||||
nic_spec.device.backing.port.portgroupKey = network_object.key
|
||||
self.change_detected = True
|
||||
elif network_object and isinstance(network_object, vim.OpaqueNetwork) and hasattr(nic_spec.device.backing, 'opaqueNetworkId'):
|
||||
if nic_spec.device.backing.opaqueNetworkId != network_object.summary.opaqueNetworkId:
|
||||
# NSX-T Logical Switch
|
||||
nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
|
||||
network_id = network_object.summary.opaqueNetworkId
|
||||
nic_spec.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
|
||||
nic_spec.device.backing.opaqueNetworkId = network_id
|
||||
nic_spec.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
|
||||
self.change_detected = True
|
||||
elif nic_device.deviceInfo.summary != network['name']:
|
||||
# Standard vSwitch
|
||||
nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
|
||||
nic_spec.device.backing.deviceName = network['name']
|
||||
nic_spec.device.backing.network = network_object
|
||||
self.change_detected = True
|
||||
if 'manual_mac' in network and nic_device.macAddress != network['manual_mac']:
|
||||
if vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
|
||||
self.module.fail_json(msg='Expected power state is poweredOff to reconfigure MAC address')
|
||||
nic_device.addressType = 'manual'
|
||||
nic_device.macAddress = network['manual_mac']
|
||||
self.change_detected = True
|
||||
if 'directpath_io' in network:
|
||||
if isinstance(nic_device, vim.vm.device.VirtualVmxnet3):
|
||||
if nic_device.uptCompatibilityEnabled != network['directpath_io']:
|
||||
nic_device.uptCompatibilityEnabled = network['directpath_io']
|
||||
self.change_detected = True
|
||||
else:
|
||||
self.module.fail_json(msg='UPT is only compatible for Vmxnet3 adapter.'
|
||||
+ ' Clients can set this property enabled or disabled if ethernet virtual device is Vmxnet3.')
|
||||
if self.change_detected:
|
||||
self.config_spec.deviceChange.append(nic_spec)
|
||||
elif network['state'].lower() == 'absent':
|
||||
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
|
||||
nic_spec.device = nic_device
|
||||
self.change_detected = True
|
||||
self.config_spec.deviceChange.append(nic_spec)
|
||||
else:
|
||||
self.module.fail_json(msg='Unable to find the specified network adapter: %s' % network)
|
||||
|
||||
def reconfigure_vm_network(self, vm_obj):
|
||||
network_list = self.sanitize_network_params()
|
||||
# gather network adapter info only
|
||||
if (self.params['gather_network_info'] is not None and self.params['gather_network_info']) or len(network_list) == 0:
|
||||
results = {'changed': False, 'failed': False, 'network_data': self.get_network_info(vm_obj)}
|
||||
# do reconfigure then gather info
|
||||
else:
|
||||
self.get_network_config_spec(vm_obj, network_list)
|
||||
try:
|
||||
task = vm_obj.ReconfigVM_Task(spec=self.config_spec)
|
||||
wait_for_task(task)
|
||||
except vim.fault.InvalidDeviceSpec as e:
|
||||
self.module.fail_json(msg="Failed to configure network adapter on given virtual machine due to invalid"
|
||||
" device spec : %s" % to_native(e.msg),
|
||||
details="Please check ESXi server logs for more details.")
|
||||
except vim.fault.RestrictedVersion as e:
|
||||
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
|
||||
" product versioning restrictions: %s" % to_native(e.msg))
|
||||
if task.info.state == 'error':
|
||||
results = {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
|
||||
else:
|
||||
network_info = self.get_network_info(vm_obj)
|
||||
results = {'changed': self.change_detected, 'failed': False, 'network_data': network_info}
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
moid=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str', default='ha-datacenter'),
|
||||
esxi_hostname=dict(type='str'),
|
||||
cluster=dict(type='str'),
|
||||
gather_network_info=dict(type='bool', default=False, aliases=['gather_network_facts']),
|
||||
networks=dict(type='list', default=[])
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
]
|
||||
)
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
vm = pyv.get_vm()
|
||||
if not vm:
|
||||
vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
|
||||
module.fail_json(msg='Unable to find the specified virtual machine using %s' % vm_id)
|
||||
|
||||
result = pyv.reconfigure_vm_network(vm)
|
||||
if result['failed']:
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,284 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: vmware_guest_powerstate
|
||||
short_description: Manages power states of virtual machines in vCenter
|
||||
description:
|
||||
- Power on / Power off / Restart a virtual machine.
|
||||
version_added: '2.5'
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
- PyVmomi
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Set the state of the virtual machine.
|
||||
choices: [ powered-off, powered-on, reboot-guest, restarted, shutdown-guest, suspended, present]
|
||||
default: present
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine to work with.
|
||||
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
|
||||
type: str
|
||||
name_match:
|
||||
description:
|
||||
- If multiple virtual machines matching the name, use the first or last found.
|
||||
default: first
|
||||
choices: [ first, last ]
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to manage if known, this is VMware's unique identifier.
|
||||
- This is required if C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
use_instance_uuid:
|
||||
description:
|
||||
- Whether to use the VMware instance UUID rather than the BIOS UUID.
|
||||
default: no
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
scheduled_at:
|
||||
description:
|
||||
- Date and time in string format at which specified task needs to be performed.
|
||||
- "The required format for date and time - 'dd/mm/yyyy hh:mm'."
|
||||
- Scheduling task requires vCenter server. A standalone ESXi server does not support this option.
|
||||
type: str
|
||||
schedule_task_name:
|
||||
description:
|
||||
- Name of schedule task.
|
||||
- Valid only if C(scheduled_at) is specified.
|
||||
type: str
|
||||
required: False
|
||||
version_added: '2.9'
|
||||
schedule_task_description:
|
||||
description:
|
||||
- Description of schedule task.
|
||||
- Valid only if C(scheduled_at) is specified.
|
||||
type: str
|
||||
required: False
|
||||
version_added: '2.9'
|
||||
schedule_task_enabled:
|
||||
description:
|
||||
- Flag to indicate whether the scheduled task is enabled or disabled.
|
||||
type: bool
|
||||
required: False
|
||||
default: True
|
||||
version_added: '2.9'
|
||||
force:
|
||||
description:
|
||||
- Ignore warnings and complete the actions.
|
||||
- This parameter is useful while forcing virtual machine state.
|
||||
default: False
|
||||
type: bool
|
||||
version_added: 2.5
|
||||
state_change_timeout:
|
||||
description:
|
||||
- If the C(state) is set to C(shutdown-guest), by default the module will return immediately after sending the shutdown signal.
|
||||
- If this argument is set to a positive integer, the module will instead wait for the VM to reach the poweredoff state.
|
||||
- The value sets a timeout in seconds for the module to wait for the state change.
|
||||
default: 0
|
||||
version_added: '2.6'
|
||||
type: int
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Set the state of a virtual machine to poweroff
|
||||
vmware_guest_powerstate:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
folder: "/{{ datacenter_name }}/vm/my_folder"
|
||||
name: "{{ guest_name }}"
|
||||
state: powered-off
|
||||
delegate_to: localhost
|
||||
register: deploy
|
||||
|
||||
- name: Set the state of a virtual machine to poweron using MoID
|
||||
vmware_guest_powerstate:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
folder: "/{{ datacenter_name }}/vm/my_folder"
|
||||
moid: vm-42
|
||||
state: powered-on
|
||||
delegate_to: localhost
|
||||
register: deploy
|
||||
|
||||
- name: Set the state of a virtual machine to poweroff at given scheduled time
|
||||
vmware_guest_powerstate:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
folder: "/{{ datacenter_name }}/vm/my_folder"
|
||||
name: "{{ guest_name }}"
|
||||
state: powered-off
|
||||
scheduled_at: "09/01/2018 10:18"
|
||||
schedule_task_name: "task_00001"
|
||||
schedule_task_description: "Sample task to poweroff VM"
|
||||
schedule_task_enabled: True
|
||||
delegate_to: localhost
|
||||
register: deploy_at_schedule_datetime
|
||||
|
||||
- name: Wait for the virtual machine to shutdown
|
||||
vmware_guest_powerstate:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
name: "{{ guest_name }}"
|
||||
state: shutdown-guest
|
||||
state_change_timeout: 200
|
||||
delegate_to: localhost
|
||||
register: deploy
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from random import randint
|
||||
from datetime import datetime
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.vmware import PyVmomi, set_vm_power_state, vmware_argument_spec
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
state=dict(type='str', default='present',
|
||||
choices=['present', 'powered-off', 'powered-on', 'reboot-guest', 'restarted', 'shutdown-guest', 'suspended']),
|
||||
name=dict(type='str'),
|
||||
name_match=dict(type='str', choices=['first', 'last'], default='first'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
use_instance_uuid=dict(type='bool', default=False),
|
||||
folder=dict(type='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
scheduled_at=dict(type='str'),
|
||||
schedule_task_name=dict(),
|
||||
schedule_task_description=dict(),
|
||||
schedule_task_enabled=dict(type='bool', default=True),
|
||||
state_change_timeout=dict(type='int', default=0),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=False,
|
||||
mutually_exclusive=[
|
||||
['name', 'uuid', 'moid'],
|
||||
],
|
||||
)
|
||||
|
||||
result = dict(changed=False,)
|
||||
|
||||
pyv = PyVmomi(module)
|
||||
|
||||
# Check if the VM exists before continuing
|
||||
vm = pyv.get_vm()
|
||||
|
||||
if vm:
|
||||
# VM already exists, so set power state
|
||||
scheduled_at = module.params.get('scheduled_at', None)
|
||||
if scheduled_at:
|
||||
if not pyv.is_vcenter():
|
||||
module.fail_json(msg="Scheduling task requires vCenter, hostname %s "
|
||||
"is an ESXi server." % module.params.get('hostname'))
|
||||
powerstate = {
|
||||
'present': vim.VirtualMachine.PowerOn,
|
||||
'powered-off': vim.VirtualMachine.PowerOff,
|
||||
'powered-on': vim.VirtualMachine.PowerOn,
|
||||
'reboot-guest': vim.VirtualMachine.RebootGuest,
|
||||
'restarted': vim.VirtualMachine.Reset,
|
||||
'shutdown-guest': vim.VirtualMachine.ShutdownGuest,
|
||||
'suspended': vim.VirtualMachine.Suspend,
|
||||
}
|
||||
dt = ''
|
||||
try:
|
||||
dt = datetime.strptime(scheduled_at, '%d/%m/%Y %H:%M')
|
||||
except ValueError as e:
|
||||
module.fail_json(msg="Failed to convert given date and time string to Python datetime object,"
|
||||
"please specify string in 'dd/mm/yyyy hh:mm' format: %s" % to_native(e))
|
||||
schedule_task_spec = vim.scheduler.ScheduledTaskSpec()
|
||||
schedule_task_name = module.params['schedule_task_name'] or 'task_%s' % str(randint(10000, 99999))
|
||||
schedule_task_desc = module.params['schedule_task_description']
|
||||
if schedule_task_desc is None:
|
||||
schedule_task_desc = 'Schedule task for vm %s for ' \
|
||||
'operation %s at %s' % (vm.name, module.params['state'], scheduled_at)
|
||||
schedule_task_spec.name = schedule_task_name
|
||||
schedule_task_spec.description = schedule_task_desc
|
||||
schedule_task_spec.scheduler = vim.scheduler.OnceTaskScheduler()
|
||||
schedule_task_spec.scheduler.runAt = dt
|
||||
schedule_task_spec.action = vim.action.MethodAction()
|
||||
schedule_task_spec.action.name = powerstate[module.params['state']]
|
||||
schedule_task_spec.enabled = module.params['schedule_task_enabled']
|
||||
|
||||
try:
|
||||
pyv.content.scheduledTaskManager.CreateScheduledTask(vm, schedule_task_spec)
|
||||
# As this is async task, we create scheduled task and mark state to changed.
|
||||
module.exit_json(changed=True)
|
||||
except vim.fault.InvalidName as e:
|
||||
module.fail_json(msg="Failed to create scheduled task %s for %s : %s" % (module.params.get('state'),
|
||||
vm.name,
|
||||
to_native(e.msg)))
|
||||
except vim.fault.DuplicateName as e:
|
||||
module.exit_json(changed=False, details=to_native(e.msg))
|
||||
except vmodl.fault.InvalidArgument as e:
|
||||
module.fail_json(msg="Failed to create scheduled task %s as specifications "
|
||||
"given are invalid: %s" % (module.params.get('state'),
|
||||
to_native(e.msg)))
|
||||
else:
|
||||
result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
|
||||
else:
|
||||
id = module.params.get('uuid') or module.params.get('moid') or module.params.get('name')
|
||||
module.fail_json(msg="Unable to set power state for non-existing virtual machine : '%s'" % id)
|
||||
|
||||
if result.get('failed') is True:
|
||||
module.fail_json(**result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,276 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, sky-joker
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: vmware_guest_register_operation
|
||||
short_description: VM inventory registration operation
|
||||
author:
|
||||
- sky-joker (@sky-joker)
|
||||
version_added: '2.10'
|
||||
description:
|
||||
- This module can register or unregister VMs to the inventory.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- PyVmomi
|
||||
options:
|
||||
datacenter:
|
||||
description:
|
||||
- Destination datacenter for the register/unregister operation.
|
||||
- This parameter is case sensitive.
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- Specify a cluster name to register VM.
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Description folder, absolute path of the target folder.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
|
||||
- This parameter is case sensitive.
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Specify VM name to be registered in the inventory.
|
||||
required: True
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
|
||||
- If virtual machine does not exists, then this parameter is ignored.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- The ESXi hostname where the virtual machine will run.
|
||||
- This parameter is case sensitive.
|
||||
type: str
|
||||
template:
|
||||
description:
|
||||
- Whether to register VM as a template.
|
||||
default: False
|
||||
type: bool
|
||||
path:
|
||||
description:
|
||||
- Specify the path of vmx file.
|
||||
- 'Examples:'
|
||||
- ' [datastore1] vm/vm.vmx'
|
||||
- ' [datastore1] vm/vm.vmtx'
|
||||
type: str
|
||||
resource_pool:
|
||||
description:
|
||||
- Specify a resource pool name to register VM.
|
||||
- This parameter is case sensitive.
|
||||
- Resource pool should be child of the selected host parent.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Specify the state the virtual machine should be in.
|
||||
- if set to C(present), register VM in inventory.
|
||||
- if set to C(absent), unregister VM from inventory.
|
||||
default: present
|
||||
choices: [ present, absent ]
|
||||
type: str
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Register VM to inventory
|
||||
vmware_guest_register_operation:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter }}"
|
||||
folder: "/vm"
|
||||
esxi_hostname: "{{ esxi_hostname }}"
|
||||
name: "{{ vm_name }}"
|
||||
template: no
|
||||
path: "[datastore1] vm/vm.vmx"
|
||||
state: present
|
||||
|
||||
- name: Register VM in resource pool
|
||||
vmware_guest_register_operation:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter }}"
|
||||
folder: "/vm"
|
||||
resource_pool: "{{ resource_pool }}"
|
||||
name: "{{ vm_name }}"
|
||||
template: no
|
||||
path: "[datastore1] vm/vm.vmx"
|
||||
state: present
|
||||
|
||||
- name: Register VM in Cluster
|
||||
vmware_guest_register_operation:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter }}"
|
||||
folder: "/vm"
|
||||
cluster: "{{ cluster_name }}"
|
||||
name: "{{ vm_name }}"
|
||||
template: no
|
||||
path: "[datastore1] vm/vm.vmx"
|
||||
state: present
|
||||
|
||||
- name: UnRegister VM from inventory
|
||||
vmware_guest_register_operation:
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
validate_certs: no
|
||||
datacenter: "{{ datacenter }}"
|
||||
folder: "/vm"
|
||||
name: "{{ vm_name }}"
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
'''
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
HAS_PYVMOMI = True
|
||||
except ImportError:
|
||||
HAS_PYVMOMI = False
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_resource_pool_by_name, \
|
||||
wait_for_task, compile_folder_path_for_object, find_cluster_by_name
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class VMwareGuestRegisterOperation(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(VMwareGuestRegisterOperation, self).__init__(module)
|
||||
self.datacenter = module.params["datacenter"]
|
||||
self.cluster = module.params["cluster"]
|
||||
self.folder = module.params["folder"]
|
||||
self.name = module.params["name"]
|
||||
self.esxi_hostname = module.params["esxi_hostname"]
|
||||
self.path = module.params["path"]
|
||||
self.template = module.params["template"]
|
||||
self.resource_pool = module.params["resource_pool"]
|
||||
self.state = module.params["state"]
|
||||
|
||||
def execute(self):
|
||||
result = dict(changed=False)
|
||||
|
||||
datacenter = self.find_datacenter_by_name(self.datacenter)
|
||||
if not datacenter:
|
||||
self.module.fail_json(msg="Cannot find the specified Datacenter: %s" % self.datacenter)
|
||||
|
||||
dcpath = compile_folder_path_for_object(datacenter)
|
||||
if not dcpath.endswith("/"):
|
||||
dcpath += "/"
|
||||
|
||||
if(self.folder in [None, "", "/"]):
|
||||
self.module.fail_json(msg="Please specify folder path other than blank or '/'")
|
||||
elif(self.folder.startswith("/vm")):
|
||||
fullpath = "%s%s%s" % (dcpath, self.datacenter, self.folder)
|
||||
else:
|
||||
fullpath = "%s%s" % (dcpath, self.folder)
|
||||
|
||||
folder_obj = self.content.searchIndex.FindByInventoryPath(inventoryPath="%s" % fullpath)
|
||||
if not folder_obj:
|
||||
details = {
|
||||
'datacenter': datacenter.name,
|
||||
'datacenter_path': dcpath,
|
||||
'folder': self.folder,
|
||||
'full_search_path': fullpath,
|
||||
}
|
||||
self.module.fail_json(msg="No folder %s matched in the search path : %s" % (self.folder, fullpath),
|
||||
details=details)
|
||||
|
||||
if self.state == "present":
|
||||
if self.get_vm():
|
||||
self.module.exit_json(**result)
|
||||
|
||||
if self.esxi_hostname:
|
||||
host_obj = self.find_hostsystem_by_name(self.esxi_hostname)
|
||||
if not host_obj:
|
||||
self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.esxi_hostname)
|
||||
else:
|
||||
host_obj = None
|
||||
|
||||
if self.cluster:
|
||||
cluster_obj = find_cluster_by_name(self.content, self.cluster, datacenter)
|
||||
if not cluster_obj:
|
||||
self.module.fail_json(msg="Cannot find the specified cluster name: %s" % self.cluster)
|
||||
|
||||
resource_pool_obj = cluster_obj.resourcePool
|
||||
elif self.resource_pool:
|
||||
resource_pool_obj = find_resource_pool_by_name(self.content, self.resource_pool)
|
||||
if not resource_pool_obj:
|
||||
self.module.fail_json(msg="Cannot find the specified resource pool: %s" % self.resource_pool)
|
||||
else:
|
||||
resource_pool_obj = host_obj.parent.resourcePool
|
||||
|
||||
task = folder_obj.RegisterVM_Task(path=self.path, name=self.name, asTemplate=self.template,
|
||||
pool=resource_pool_obj, host=host_obj)
|
||||
|
||||
changed = False
|
||||
try:
|
||||
changed, info = wait_for_task(task)
|
||||
except Exception as task_e:
|
||||
self.module.fail_json(msg=to_native(task_e))
|
||||
|
||||
result.update(changed=changed)
|
||||
self.module.exit_json(**result)
|
||||
|
||||
if self.state == "absent":
|
||||
vm_obj = self.get_vm()
|
||||
if vm_obj:
|
||||
try:
|
||||
vm_obj.UnregisterVM()
|
||||
result.update(changed=True)
|
||||
except Exception as exc:
|
||||
self.module.fail_json(msg=to_native(exc))
|
||||
|
||||
self.module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(datacenter=dict(type="str"),
|
||||
cluster=dict(type="str"),
|
||||
folder=dict(type="str"),
|
||||
name=dict(type="str", required=True),
|
||||
uuid=dict(type="str"),
|
||||
esxi_hostname=dict(type="str"),
|
||||
path=dict(type="str"),
|
||||
template=dict(type="bool", default=False),
|
||||
resource_pool=dict(type="str"),
|
||||
state=dict(type="str", default="present", choices=["present", "absent"]))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
vmware_guest_register_operation = VMwareGuestRegisterOperation(module)
|
||||
vmware_guest_register_operation.execute()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,287 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Ansible Project
|
||||
# Copyright: (c) 2019, Diane Wang <dianew@vmware.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_screenshot
|
||||
short_description: Create a screenshot of the Virtual Machine console.
|
||||
description:
|
||||
- This module is used to take screenshot of the given virtual machine when virtual machine is powered on.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Diane Wang (@Tomorrow9) <dianew@vmware.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.5 and 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine.
|
||||
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to gather facts if known, this is VMware's unique identifier.
|
||||
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
version_added: '2.9'
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- This is a required parameter, only if multiple VMs are found with same name.
|
||||
- The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- The name of cluster where the virtual machine is running.
|
||||
- This is a required parameter, if C(esxi_hostname) is not set.
|
||||
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- The ESXi hostname where the virtual machine is running.
|
||||
- This is a required parameter, if C(cluster) is not set.
|
||||
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter name to which virtual machine belongs to.
|
||||
type: str
|
||||
local_path:
|
||||
description:
|
||||
- 'If C(local_path) is not set, the created screenshot file will be kept in the directory of the virtual machine
|
||||
on ESXi host. If C(local_path) is set to a valid path on local machine, then the screenshot file will be
|
||||
downloaded from ESXi host to the local directory.'
|
||||
- 'If not download screenshot file to local machine, you can open it through the returned file URL in screenshot
|
||||
facts manually.'
|
||||
type: path
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: take a screenshot of the virtual machine console
|
||||
vmware_guest_screenshot:
|
||||
validate_certs: no
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
folder: "{{ folder_name }}"
|
||||
name: "{{ vm_name }}"
|
||||
local_path: "/tmp/"
|
||||
delegate_to: localhost
|
||||
register: take_screenshot
|
||||
|
||||
- name: Take a screenshot of the virtual machine console using MoID
|
||||
vmware_guest_screenshot:
|
||||
validate_certs: no
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
folder: "{{ folder_name }}"
|
||||
moid: vm-42
|
||||
local_path: "/tmp/"
|
||||
delegate_to: localhost
|
||||
register: take_screenshot
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
screenshot_info:
|
||||
description: display the facts of captured virtual machine screenshot file
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"virtual_machine": "test_vm",
|
||||
"screenshot_file": "[datastore0] test_vm/test_vm-1.png",
|
||||
"task_start_time": "2019-05-25T10:35:04.215016Z",
|
||||
"task_complete_time": "2019-05-25T10:35:04.412622Z",
|
||||
"result": "success",
|
||||
"screenshot_file_url": "https://test_vcenter/folder/test_vm/test_vm-1.png?dcPath=test-dc&dsName=datastore0",
|
||||
"download_local_path": "/tmp/",
|
||||
"download_file_size": 2367,
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, get_parent_datacenter
|
||||
import os
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
self.change_detected = False
|
||||
|
||||
def generate_http_access_url(self, file_path):
|
||||
# e.g., file_path is like this format: [datastore0] test_vm/test_vm-1.png
|
||||
# from file_path generate URL
|
||||
url_path = None
|
||||
if not file_path:
|
||||
return url_path
|
||||
|
||||
path = "/folder/%s" % quote(file_path.split()[1])
|
||||
params = dict(dsName=file_path.split()[0].strip('[]'))
|
||||
if not self.is_vcenter():
|
||||
datacenter = 'ha-datacenter'
|
||||
else:
|
||||
datacenter = get_parent_datacenter(self.current_vm_obj).name.replace('&', '%26')
|
||||
params['dcPath'] = datacenter
|
||||
url_path = "https://%s%s?%s" % (self.params['hostname'], path, urlencode(params))
|
||||
|
||||
return url_path
|
||||
|
||||
def download_screenshot_file(self, file_url, local_file_path, file_name):
|
||||
response = None
|
||||
download_size = 0
|
||||
# file is downloaded as local_file_name when specified, or use original file name
|
||||
if local_file_path.endswith('.png'):
|
||||
local_file_name = local_file_path.split('/')[-1]
|
||||
local_file_path = local_file_path.rsplit('/', 1)[0]
|
||||
else:
|
||||
local_file_name = file_name
|
||||
if not os.path.exists(local_file_path):
|
||||
try:
|
||||
os.makedirs(local_file_path)
|
||||
except OSError as err:
|
||||
self.module.fail_json(msg="Exception caught when create folder %s on local machine, with error %s"
|
||||
% (local_file_path, to_native(err)))
|
||||
local_file = os.path.join(local_file_path, local_file_name)
|
||||
with open(local_file, 'wb') as handle:
|
||||
try:
|
||||
response = open_url(file_url, url_username=self.params.get('username'),
|
||||
url_password=self.params.get('password'), validate_certs=False)
|
||||
except Exception as err:
|
||||
self.module.fail_json(msg="Download screenshot file from URL %s, failed due to %s" % (file_url, to_native(err)))
|
||||
if not response or response.getcode() >= 400:
|
||||
self.module.fail_json(msg="Download screenshot file from URL %s, failed with response %s, response code %s"
|
||||
% (file_url, response, response.getcode()))
|
||||
bytes_read = response.read(2 ** 20)
|
||||
while bytes_read:
|
||||
handle.write(bytes_read)
|
||||
handle.flush()
|
||||
os.fsync(handle.fileno())
|
||||
download_size += len(bytes_read)
|
||||
bytes_read = response.read(2 ** 20)
|
||||
|
||||
return download_size
|
||||
|
||||
def get_screenshot_facts(self, task_info, file_url, file_size):
|
||||
screenshot_facts = dict()
|
||||
if task_info is not None:
|
||||
screenshot_facts = dict(
|
||||
virtual_machine=task_info.entityName,
|
||||
screenshot_file=task_info.result,
|
||||
task_start_time=task_info.startTime,
|
||||
task_complete_time=task_info.completeTime,
|
||||
result=task_info.state,
|
||||
screenshot_file_url=file_url,
|
||||
download_local_path=self.params.get('local_path'),
|
||||
download_file_size=file_size,
|
||||
)
|
||||
|
||||
return screenshot_facts
|
||||
|
||||
def take_vm_screenshot(self):
|
||||
if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn:
|
||||
self.module.fail_json(msg="VM is %s, valid power state is poweredOn." % self.current_vm_obj.runtime.powerState)
|
||||
try:
|
||||
task = self.current_vm_obj.CreateScreenshot_Task()
|
||||
wait_for_task(task)
|
||||
except vim.fault.FileFault as e:
|
||||
self.module.fail_json(msg="Failed to create screenshot due to errors when creating or accessing one or more"
|
||||
" files needed for this operation, %s" % to_native(e.msg))
|
||||
except vim.fault.InvalidState as e:
|
||||
self.module.fail_json(msg="Failed to create screenshot due to VM is not ready to respond to such requests,"
|
||||
" %s" % to_native(e.msg))
|
||||
except vmodl.RuntimeFault as e:
|
||||
self.module.fail_json(msg="Failed to create screenshot due to runtime fault, %s," % to_native(e.msg))
|
||||
except vim.fault.TaskInProgress as e:
|
||||
self.module.fail_json(msg="Failed to create screenshot due to VM is busy, %s" % to_native(e.msg))
|
||||
|
||||
if task.info.state == 'error':
|
||||
return {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
|
||||
else:
|
||||
download_file_size = None
|
||||
self.change_detected = True
|
||||
file_url = self.generate_http_access_url(task.info.result)
|
||||
if self.params.get('local_path'):
|
||||
if file_url:
|
||||
download_file_size = self.download_screenshot_file(file_url=file_url,
|
||||
local_file_path=self.params['local_path'],
|
||||
file_name=task.info.result.split('/')[-1])
|
||||
screenshot_facts = self.get_screenshot_facts(task.info, file_url, download_file_size)
|
||||
return {'changed': self.change_detected, 'failed': False, 'screenshot_info': screenshot_facts}
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str'),
|
||||
esxi_hostname=dict(type='str'),
|
||||
cluster=dict(type='str'),
|
||||
local_path=dict(type='path'),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
]
|
||||
)
|
||||
pyv = PyVmomiHelper(module)
|
||||
vm = pyv.get_vm()
|
||||
if not vm:
|
||||
vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
|
||||
module.fail_json(msg='Unable to find the specified virtual machine : %s' % vm_id)
|
||||
|
||||
result = pyv.take_vm_screenshot()
|
||||
if result['failed']:
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,388 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2018, Ansible Project
|
||||
# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: vmware_guest_sendkey
|
||||
short_description: Send USB HID codes to the Virtual Machine's keyboard.
|
||||
description:
|
||||
- This module is used to send keystrokes to given virtual machine.
|
||||
- All parameters and VMware object names are case sensitive.
|
||||
version_added: '2.9'
|
||||
author:
|
||||
- Diane Wang (@Tomorrow9) <dianew@vmware.com>
|
||||
notes:
|
||||
- Tested on vSphere 6.5 and 6.7
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- PyVmomi
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the virtual machine.
|
||||
- This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
|
||||
type: str
|
||||
uuid:
|
||||
description:
|
||||
- UUID of the instance to gather facts if known, this is VMware's unique identifier.
|
||||
- This is a required parameter, if parameter C(name) or C(moid) is not supplied.
|
||||
type: str
|
||||
moid:
|
||||
description:
|
||||
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
|
||||
- This is required if C(name) or C(uuid) is not supplied.
|
||||
type: str
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute or relative path to find an existing guest.
|
||||
- This is a required parameter, only if multiple VMs are found with same name.
|
||||
- The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
type: str
|
||||
cluster:
|
||||
description:
|
||||
- The name of cluster where the virtual machine is running.
|
||||
- This is a required parameter, if C(esxi_hostname) is not set.
|
||||
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
|
||||
type: str
|
||||
esxi_hostname:
|
||||
description:
|
||||
- The ESXi hostname where the virtual machine is running.
|
||||
- This is a required parameter, if C(cluster) is not set.
|
||||
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The datacenter name to which virtual machine belongs to.
|
||||
type: str
|
||||
string_send:
|
||||
description:
|
||||
- The string will be sent to the virtual machine.
|
||||
- This string can contain valid special character, alphabet and digit on the keyboard.
|
||||
type: str
|
||||
keys_send:
|
||||
description:
|
||||
- The list of the keys will be sent to the virtual machine.
|
||||
- 'Valid values are C(ENTER), C(ESC), C(BACKSPACE), C(TAB), C(SPACE), C(CAPSLOCK), C(DELETE), C(CTRL_ALT_DEL),
|
||||
C(CTRL_C) and C(F1) to C(F12), C(RIGHTARROW), C(LEFTARROW), C(DOWNARROW), C(UPARROW).'
|
||||
- If both C(keys_send) and C(string_send) are specified, keys in C(keys_send) list will be sent in front of the C(string_send).
|
||||
type: list
|
||||
extends_documentation_fragment: vmware.documentation
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Send list of keys to virtual machine
|
||||
vmware_guest_sendkey:
|
||||
validate_certs: no
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
folder: "{{ folder_name }}"
|
||||
name: "{{ vm_name }}"
|
||||
keys_send:
|
||||
- TAB
|
||||
- TAB
|
||||
- ENTER
|
||||
delegate_to: localhost
|
||||
register: keys_num_sent
|
||||
|
||||
- name: Send list of keys to virtual machine using MoID
|
||||
vmware_guest_sendkey:
|
||||
validate_certs: no
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
folder: "{{ folder_name }}"
|
||||
moid: vm-42
|
||||
keys_send:
|
||||
- CTRL_ALT_DEL
|
||||
delegate_to: localhost
|
||||
register: ctrl_alt_del_sent
|
||||
|
||||
- name: Send a string to virtual machine
|
||||
vmware_guest_sendkey:
|
||||
validate_certs: no
|
||||
hostname: "{{ vcenter_hostname }}"
|
||||
username: "{{ vcenter_username }}"
|
||||
password: "{{ vcenter_password }}"
|
||||
datacenter: "{{ datacenter_name }}"
|
||||
folder: "{{ folder_name }}"
|
||||
name: "{{ vm_name }}"
|
||||
string_send: "user_logon"
|
||||
delegate_to: localhost
|
||||
register: keys_num_sent
|
||||
'''
|
||||
|
||||
RETURN = """
|
||||
sendkey_info:
|
||||
description: display the keys and the number of keys sent to the virtual machine
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"virtual_machine": "test_vm",
|
||||
"keys_send": [
|
||||
"SPACE",
|
||||
"DOWNARROW",
|
||||
"DOWNARROW",
|
||||
"ENTER"
|
||||
],
|
||||
"string_send": null,
|
||||
"keys_send_number": 4,
|
||||
"returned_keys_send_number": 4,
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
from pyVmomi import vim, vmodl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
|
||||
|
||||
|
||||
class PyVmomiHelper(PyVmomi):
|
||||
def __init__(self, module):
|
||||
super(PyVmomiHelper, self).__init__(module)
|
||||
self.change_detected = False
|
||||
self.usb_scan_code_spec = vim.UsbScanCodeSpec()
|
||||
self.num_keys_send = 0
|
||||
# HID usage tables https://www.usb.org/sites/default/files/documents/hut1_12v2.pdf
|
||||
# define valid characters and keys value, hex_code, key value and key modifier
|
||||
self.keys_hid_code = [
|
||||
(('a', 'A'), '0x04', [('a', []), ('A', ['LEFTSHIFT'])]),
|
||||
(('b', 'B'), '0x05', [('b', []), ('B', ['LEFTSHIFT'])]),
|
||||
(('c', 'C'), '0x06', [('c', []), ('C', ['LEFTSHIFT'])]),
|
||||
(('d', 'D'), '0x07', [('d', []), ('D', ['LEFTSHIFT'])]),
|
||||
(('e', 'E'), '0x08', [('e', []), ('E', ['LEFTSHIFT'])]),
|
||||
(('f', 'F'), '0x09', [('f', []), ('F', ['LEFTSHIFT'])]),
|
||||
(('g', 'G'), '0x0a', [('g', []), ('G', ['LEFTSHIFT'])]),
|
||||
(('h', 'H'), '0x0b', [('h', []), ('H', ['LEFTSHIFT'])]),
|
||||
(('i', 'I'), '0x0c', [('i', []), ('I', ['LEFTSHIFT'])]),
|
||||
(('j', 'J'), '0x0d', [('j', []), ('J', ['LEFTSHIFT'])]),
|
||||
(('k', 'K'), '0x0e', [('k', []), ('K', ['LEFTSHIFT'])]),
|
||||
(('l', 'L'), '0x0f', [('l', []), ('L', ['LEFTSHIFT'])]),
|
||||
(('m', 'M'), '0x10', [('m', []), ('M', ['LEFTSHIFT'])]),
|
||||
(('n', 'N'), '0x11', [('n', []), ('N', ['LEFTSHIFT'])]),
|
||||
(('o', 'O'), '0x12', [('o', []), ('O', ['LEFTSHIFT'])]),
|
||||
(('p', 'P'), '0x13', [('p', []), ('P', ['LEFTSHIFT'])]),
|
||||
(('q', 'Q'), '0x14', [('q', []), ('Q', ['LEFTSHIFT'])]),
|
||||
(('r', 'R'), '0x15', [('r', []), ('R', ['LEFTSHIFT'])]),
|
||||
(('s', 'S'), '0x16', [('s', []), ('S', ['LEFTSHIFT'])]),
|
||||
(('t', 'T'), '0x17', [('t', []), ('T', ['LEFTSHIFT'])]),
|
||||
(('u', 'U'), '0x18', [('u', []), ('U', ['LEFTSHIFT'])]),
|
||||
(('v', 'V'), '0x19', [('v', []), ('V', ['LEFTSHIFT'])]),
|
||||
(('w', 'W'), '0x1a', [('w', []), ('W', ['LEFTSHIFT'])]),
|
||||
(('x', 'X'), '0x1b', [('x', []), ('X', ['LEFTSHIFT'])]),
|
||||
(('y', 'Y'), '0x1c', [('y', []), ('Y', ['LEFTSHIFT'])]),
|
||||
(('z', 'Z'), '0x1d', [('z', []), ('Z', ['LEFTSHIFT'])]),
|
||||
(('1', '!'), '0x1e', [('1', []), ('!', ['LEFTSHIFT'])]),
|
||||
(('2', '@'), '0x1f', [('2', []), ('@', ['LEFTSHIFT'])]),
|
||||
(('3', '#'), '0x20', [('3', []), ('#', ['LEFTSHIFT'])]),
|
||||
(('4', '$'), '0x21', [('4', []), ('$', ['LEFTSHIFT'])]),
|
||||
(('5', '%'), '0x22', [('5', []), ('%', ['LEFTSHIFT'])]),
|
||||
(('6', '^'), '0x23', [('6', []), ('^', ['LEFTSHIFT'])]),
|
||||
(('7', '&'), '0x24', [('7', []), ('&', ['LEFTSHIFT'])]),
|
||||
(('8', '*'), '0x25', [('8', []), ('*', ['LEFTSHIFT'])]),
|
||||
(('9', '('), '0x26', [('9', []), ('(', ['LEFTSHIFT'])]),
|
||||
(('0', ')'), '0x27', [('0', []), (')', ['LEFTSHIFT'])]),
|
||||
(('-', '_'), '0x2d', [('-', []), ('_', ['LEFTSHIFT'])]),
|
||||
(('=', '+'), '0x2e', [('=', []), ('+', ['LEFTSHIFT'])]),
|
||||
(('[', '{'), '0x2f', [('[', []), ('{', ['LEFTSHIFT'])]),
|
||||
((']', '}'), '0x30', [(']', []), ('}', ['LEFTSHIFT'])]),
|
||||
(('\\', '|'), '0x31', [('\\', []), ('|', ['LEFTSHIFT'])]),
|
||||
((';', ':'), '0x33', [(';', []), (':', ['LEFTSHIFT'])]),
|
||||
(('\'', '"'), '0x34', [('\'', []), ('"', ['LEFTSHIFT'])]),
|
||||
(('`', '~'), '0x35', [('`', []), ('~', ['LEFTSHIFT'])]),
|
||||
((',', '<'), '0x36', [(',', []), ('<', ['LEFTSHIFT'])]),
|
||||
(('.', '>'), '0x37', [('.', []), ('>', ['LEFTSHIFT'])]),
|
||||
(('/', '?'), '0x38', [('/', []), ('?', ['LEFTSHIFT'])]),
|
||||
('ENTER', '0x28', [('', [])]),
|
||||
('ESC', '0x29', [('', [])]),
|
||||
('BACKSPACE', '0x2a', [('', [])]),
|
||||
('TAB', '0x2b', [('', [])]),
|
||||
('SPACE', '0x2c', [(' ', [])]),
|
||||
('CAPSLOCK', '0x39', [('', [])]),
|
||||
('F1', '0x3a', [('', [])]),
|
||||
('F2', '0x3b', [('', [])]),
|
||||
('F3', '0x3c', [('', [])]),
|
||||
('F4', '0x3d', [('', [])]),
|
||||
('F5', '0x3e', [('', [])]),
|
||||
('F6', '0x3f', [('', [])]),
|
||||
('F7', '0x40', [('', [])]),
|
||||
('F8', '0x41', [('', [])]),
|
||||
('F9', '0x42', [('', [])]),
|
||||
('F10', '0x43', [('', [])]),
|
||||
('F11', '0x44', [('', [])]),
|
||||
('F12', '0x45', [('', [])]),
|
||||
('DELETE', '0x4c', [('', [])]),
|
||||
('CTRL_ALT_DEL', '0x4c', [('', ['CTRL', 'ALT'])]),
|
||||
('CTRL_C', '0x06', [('', ['CTRL'])]),
|
||||
('RIGHTARROW', '0x4f', [('', [])]),
|
||||
('LEFTARROW', '0x50', [('', [])]),
|
||||
('DOWNARROW', '0x51', [('', [])]),
|
||||
('UPARROW', '0x52', [('', [])]),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def hid_to_hex(hid_code):
|
||||
return int(hid_code, 16) << 16 | 0o0007
|
||||
|
||||
def get_hid_from_key(self, key):
|
||||
if key == ' ':
|
||||
return '0x2c', []
|
||||
for keys_name, key_code, keys_value in self.keys_hid_code:
|
||||
if isinstance(keys_name, tuple):
|
||||
for keys in keys_value:
|
||||
if key == keys[0]:
|
||||
return key_code, keys[1]
|
||||
else:
|
||||
if key == keys_name:
|
||||
return key_code, keys_value[0][1]
|
||||
|
||||
def get_key_event(self, hid_code, modifiers):
|
||||
key_event = vim.UsbScanCodeSpecKeyEvent()
|
||||
key_modifier = vim.UsbScanCodeSpecModifierType()
|
||||
key_modifier.leftAlt = False
|
||||
key_modifier.leftControl = False
|
||||
key_modifier.leftGui = False
|
||||
key_modifier.leftShift = False
|
||||
key_modifier.rightAlt = False
|
||||
key_modifier.rightControl = False
|
||||
key_modifier.rightGui = False
|
||||
key_modifier.rightShift = False
|
||||
# rightShift, rightControl, rightAlt, leftGui, rightGui are not used
|
||||
if "LEFTSHIFT" in modifiers:
|
||||
key_modifier.leftShift = True
|
||||
if "CTRL" in modifiers:
|
||||
key_modifier.leftControl = True
|
||||
if "ALT" in modifiers:
|
||||
key_modifier.leftAlt = True
|
||||
key_event.modifiers = key_modifier
|
||||
key_event.usbHidCode = self.hid_to_hex(hid_code)
|
||||
|
||||
return key_event
|
||||
|
||||
def get_sendkey_facts(self, vm_obj, returned_value=0):
|
||||
sendkey_facts = dict()
|
||||
if vm_obj is not None:
|
||||
sendkey_facts = dict(
|
||||
virtual_machine=vm_obj.name,
|
||||
keys_send=self.params['keys_send'],
|
||||
string_send=self.params['string_send'],
|
||||
keys_send_number=self.num_keys_send,
|
||||
returned_keys_send_number=returned_value,
|
||||
)
|
||||
|
||||
return sendkey_facts
|
||||
|
||||
def send_key_to_vm(self, vm_obj):
|
||||
key_event = None
|
||||
num_keys_returned = 0
|
||||
if self.params['keys_send']:
|
||||
for specified_key in self.params['keys_send']:
|
||||
key_found = False
|
||||
for keys in self.keys_hid_code:
|
||||
if (isinstance(keys[0], tuple) and specified_key in keys[0]) or \
|
||||
(not isinstance(keys[0], tuple) and specified_key == keys[0]):
|
||||
hid_code, modifiers = self.get_hid_from_key(specified_key)
|
||||
key_event = self.get_key_event(hid_code, modifiers)
|
||||
self.usb_scan_code_spec.keyEvents.append(key_event)
|
||||
self.num_keys_send += 1
|
||||
key_found = True
|
||||
break
|
||||
if not key_found:
|
||||
self.module.fail_json(msg="keys_send parameter: '%s' in %s not supported."
|
||||
% (specified_key, self.params['keys_send']))
|
||||
|
||||
if self.params['string_send']:
|
||||
for char in self.params['string_send']:
|
||||
key_found = False
|
||||
for keys in self.keys_hid_code:
|
||||
if (isinstance(keys[0], tuple) and char in keys[0]) or char == ' ':
|
||||
hid_code, modifiers = self.get_hid_from_key(char)
|
||||
key_event = self.get_key_event(hid_code, modifiers)
|
||||
self.usb_scan_code_spec.keyEvents.append(key_event)
|
||||
self.num_keys_send += 1
|
||||
key_found = True
|
||||
break
|
||||
if not key_found:
|
||||
self.module.fail_json(msg="string_send parameter: '%s' contains char: '%s' not supported."
|
||||
% (self.params['string_send'], char))
|
||||
|
||||
if self.usb_scan_code_spec.keyEvents:
|
||||
try:
|
||||
num_keys_returned = vm_obj.PutUsbScanCodes(self.usb_scan_code_spec)
|
||||
self.change_detected = True
|
||||
except vmodl.RuntimeFault as e:
|
||||
self.module.fail_json(msg="Failed to send key %s to virtual machine due to %s" % (key_event, to_native(e.msg)))
|
||||
|
||||
sendkey_facts = self.get_sendkey_facts(vm_obj, num_keys_returned)
|
||||
if num_keys_returned != self.num_keys_send:
|
||||
results = {'changed': self.change_detected, 'failed': True, 'sendkey_info': sendkey_facts}
|
||||
else:
|
||||
results = {'changed': self.change_detected, 'failed': False, 'sendkey_info': sendkey_facts}
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = vmware_argument_spec()
|
||||
argument_spec.update(
|
||||
name=dict(type='str'),
|
||||
uuid=dict(type='str'),
|
||||
moid=dict(type='str'),
|
||||
folder=dict(type='str'),
|
||||
datacenter=dict(type='str'),
|
||||
esxi_hostname=dict(type='str'),
|
||||
cluster=dict(type='str'),
|
||||
keys_send=dict(type='list', default=[]),
|
||||
string_send=dict(type='str')
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_one_of=[
|
||||
['name', 'uuid', 'moid']
|
||||
]
|
||||
)
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
vm = pyv.get_vm()
|
||||
if not vm:
|
||||
vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
|
||||
module.fail_json(msg='Unable to find the specified virtual machine : %s ' % vm_id)
|
||||
|
||||
result = pyv.send_key_to_vm(vm)
|
||||
if result['failed']:
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue