|
|
@ -87,102 +87,118 @@ except ImportError:
|
|
|
|
HAS_PYVMOMI = False
|
|
|
|
HAS_PYVMOMI = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def find_host_by_cluster_datacenter(module):
|
|
|
|
class VMwareHost(object):
|
|
|
|
datacenter_name = module.params['datacenter_name']
|
|
|
|
def __init__(self, module):
|
|
|
|
cluster_name = module.params['cluster_name']
|
|
|
|
self.module = module
|
|
|
|
content = module.params['content']
|
|
|
|
self.datacenter_name = module.params['datacenter_name']
|
|
|
|
esxi_hostname = module.params['esxi_hostname']
|
|
|
|
self.cluster_name = module.params['cluster_name']
|
|
|
|
|
|
|
|
self.esxi_hostname = module.params['esxi_hostname']
|
|
|
|
dc = find_datacenter_by_name(content, datacenter_name)
|
|
|
|
self.esxi_username = module.params['esxi_username']
|
|
|
|
cluster = find_cluster_by_name_datacenter(dc, cluster_name)
|
|
|
|
self.esxi_password = module.params['esxi_password']
|
|
|
|
|
|
|
|
self.state = module.params['state']
|
|
|
|
for host in cluster.host:
|
|
|
|
self.dc = None
|
|
|
|
if host.name == esxi_hostname:
|
|
|
|
self.cluster = None
|
|
|
|
return host, cluster
|
|
|
|
self.host = None
|
|
|
|
|
|
|
|
self.content = connect_to_api(module)
|
|
|
|
return None, cluster
|
|
|
|
|
|
|
|
|
|
|
|
def process_state(self):
|
|
|
|
|
|
|
|
try:
|
|
|
|
def add_host_to_vcenter(module):
|
|
|
|
# Currently state_update_dvs is not implemented.
|
|
|
|
cluster = module.params['cluster']
|
|
|
|
host_states = {
|
|
|
|
|
|
|
|
'absent': {
|
|
|
|
host_connect_spec = vim.host.ConnectSpec()
|
|
|
|
'present': self.state_remove_host,
|
|
|
|
host_connect_spec.hostName = module.params['esxi_hostname']
|
|
|
|
'absent': self.state_exit_unchanged,
|
|
|
|
host_connect_spec.userName = module.params['esxi_username']
|
|
|
|
},
|
|
|
|
host_connect_spec.password = module.params['esxi_password']
|
|
|
|
'present': {
|
|
|
|
host_connect_spec.force = True
|
|
|
|
'present': self.state_exit_unchanged,
|
|
|
|
host_connect_spec.sslThumbprint = ""
|
|
|
|
'absent': self.state_add_host,
|
|
|
|
as_connected = True
|
|
|
|
}
|
|
|
|
esxi_license = None
|
|
|
|
}
|
|
|
|
resource_pool = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
host_states[self.state][self.check_host_state()]()
|
|
|
|
task = cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
|
|
|
|
|
|
|
|
|
|
|
|
except vmodl.RuntimeFault as runtime_fault:
|
|
|
|
|
|
|
|
self.module.fail_json(msg=runtime_fault.msg)
|
|
|
|
|
|
|
|
except vmodl.MethodFault as method_fault:
|
|
|
|
|
|
|
|
self.module.fail_json(msg=method_fault.msg)
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
|
|
self.module.fail_json(msg=str(e))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def find_host_by_cluster_datacenter(self):
|
|
|
|
|
|
|
|
self.dc = find_datacenter_by_name(self.content, self.datacenter_name)
|
|
|
|
|
|
|
|
self.cluster = find_cluster_by_name_datacenter(self.dc, self.cluster_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for host in self.cluster.host:
|
|
|
|
|
|
|
|
if host.name == self.esxi_hostname:
|
|
|
|
|
|
|
|
return host, self.cluster
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return None, self.cluster
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def add_host_to_vcenter(self):
|
|
|
|
|
|
|
|
host_connect_spec = vim.host.ConnectSpec()
|
|
|
|
|
|
|
|
host_connect_spec.hostName = self.esxi_hostname
|
|
|
|
|
|
|
|
host_connect_spec.userName = self.esxi_username
|
|
|
|
|
|
|
|
host_connect_spec.password = self.esxi_password
|
|
|
|
|
|
|
|
host_connect_spec.force = True
|
|
|
|
|
|
|
|
host_connect_spec.sslThumbprint = ""
|
|
|
|
|
|
|
|
as_connected = True
|
|
|
|
|
|
|
|
esxi_license = None
|
|
|
|
|
|
|
|
resource_pool = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
|
|
task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
|
|
|
|
|
|
|
|
success, result = wait_for_task(task)
|
|
|
|
|
|
|
|
return success, result
|
|
|
|
|
|
|
|
except TaskError as add_task_error:
|
|
|
|
|
|
|
|
# This is almost certain to fail the first time.
|
|
|
|
|
|
|
|
# In order to get the sslThumbprint we first connect
|
|
|
|
|
|
|
|
# get the vim.fault.SSLVerifyFault then grab the sslThumbprint
|
|
|
|
|
|
|
|
# from that object.
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
# args is a tuple, selecting the first tuple
|
|
|
|
|
|
|
|
ssl_verify_fault = add_task_error.args[0]
|
|
|
|
|
|
|
|
host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
|
|
|
|
success, result = wait_for_task(task)
|
|
|
|
success, result = wait_for_task(task)
|
|
|
|
return success, result
|
|
|
|
return success, result
|
|
|
|
except TaskError as add_task_error:
|
|
|
|
|
|
|
|
# This is almost certain to fail the first time.
|
|
|
|
|
|
|
|
# In order to get the sslThumbprint we first connect
|
|
|
|
|
|
|
|
# get the vim.fault.SSLVerifyFault then grab the sslThumbprint
|
|
|
|
|
|
|
|
# from that object.
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
# args is a tuple, selecting the first tuple
|
|
|
|
|
|
|
|
ssl_verify_fault = add_task_error.args[0]
|
|
|
|
|
|
|
|
host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
task = cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
|
|
|
|
|
|
|
|
success, result = wait_for_task(task)
|
|
|
|
|
|
|
|
return success, result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def state_exit_unchanged(module):
|
|
|
|
|
|
|
|
module.exit_json(changed=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def state_remove_host(module):
|
|
|
|
|
|
|
|
host = module.params['host']
|
|
|
|
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
result = None
|
|
|
|
|
|
|
|
if not module.check_mode:
|
|
|
|
|
|
|
|
if not host.runtime.inMaintenanceMode:
|
|
|
|
|
|
|
|
maintenance_mode_task = host.EnterMaintenanceMode_Task(300, True, None)
|
|
|
|
|
|
|
|
changed, result = wait_for_task(maintenance_mode_task)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if changed:
|
|
|
|
|
|
|
|
task = host.Destroy_Task()
|
|
|
|
|
|
|
|
changed, result = wait_for_task(task)
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
raise Exception(result)
|
|
|
|
|
|
|
|
module.exit_json(changed=changed, result=str(result))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def state_exit_unchanged(self):
|
|
|
|
|
|
|
|
self.module.exit_json(changed=False)
|
|
|
|
|
|
|
|
|
|
|
|
def state_update_host(module):
|
|
|
|
def state_remove_host(self):
|
|
|
|
module.exit_json(changed=False, msg="Currently not implemented.")
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
result = None
|
|
|
|
|
|
|
|
if not self.module.check_mode:
|
|
|
|
|
|
|
|
if not self.host.runtime.inMaintenanceMode:
|
|
|
|
|
|
|
|
maintenance_mode_task = self.host.EnterMaintenanceMode_Task(300, True, None)
|
|
|
|
|
|
|
|
changed, result = wait_for_task(maintenance_mode_task)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if changed:
|
|
|
|
|
|
|
|
task = self.host.Destroy_Task()
|
|
|
|
|
|
|
|
changed, result = wait_for_task(task)
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
raise Exception(result)
|
|
|
|
|
|
|
|
self.module.exit_json(changed=changed, result=str(result))
|
|
|
|
|
|
|
|
|
|
|
|
def state_add_host(module):
|
|
|
|
def state_update_host(self):
|
|
|
|
|
|
|
|
self.module.exit_json(changed=False, msg="Currently not implemented.")
|
|
|
|
|
|
|
|
|
|
|
|
changed = True
|
|
|
|
def state_add_host(self):
|
|
|
|
result = None
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
result = None
|
|
|
|
|
|
|
|
|
|
|
|
if not module.check_mode:
|
|
|
|
if not self.module.check_mode:
|
|
|
|
changed, result = add_host_to_vcenter(module)
|
|
|
|
changed, result = self.add_host_to_vcenter()
|
|
|
|
module.exit_json(changed=changed, result=str(result))
|
|
|
|
self.module.exit_json(changed=changed, result=str(result))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def check_host_state(self):
|
|
|
|
|
|
|
|
self.host, self.cluster = self.find_host_by_cluster_datacenter()
|
|
|
|
|
|
|
|
|
|
|
|
def check_host_state(module):
|
|
|
|
if self.host is None:
|
|
|
|
|
|
|
|
return 'absent'
|
|
|
|
content = connect_to_api(module)
|
|
|
|
else:
|
|
|
|
module.params['content'] = content
|
|
|
|
return 'present'
|
|
|
|
|
|
|
|
|
|
|
|
host, cluster = find_host_by_cluster_datacenter(module)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
module.params['cluster'] = cluster
|
|
|
|
|
|
|
|
if host is None:
|
|
|
|
|
|
|
|
return 'absent'
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
module.params['host'] = host
|
|
|
|
|
|
|
|
return 'present'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
def main():
|
|
|
@ -199,27 +215,8 @@ def main():
|
|
|
|
if not HAS_PYVMOMI:
|
|
|
|
if not HAS_PYVMOMI:
|
|
|
|
module.fail_json(msg='pyvmomi is required for this module')
|
|
|
|
module.fail_json(msg='pyvmomi is required for this module')
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
vmware_host = VMwareHost(module)
|
|
|
|
# Currently state_update_dvs is not implemented.
|
|
|
|
vmware_host.process_state()
|
|
|
|
host_states = {
|
|
|
|
|
|
|
|
'absent': {
|
|
|
|
|
|
|
|
'present': state_remove_host,
|
|
|
|
|
|
|
|
'absent': state_exit_unchanged,
|
|
|
|
|
|
|
|
},
|
|
|
|
|
|
|
|
'present': {
|
|
|
|
|
|
|
|
'present': state_exit_unchanged,
|
|
|
|
|
|
|
|
'absent': state_add_host,
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
host_states[module.params['state']][check_host_state(module)](module)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except vmodl.RuntimeFault as runtime_fault:
|
|
|
|
|
|
|
|
module.fail_json(msg=runtime_fault.msg)
|
|
|
|
|
|
|
|
except vmodl.MethodFault as method_fault:
|
|
|
|
|
|
|
|
module.fail_json(msg=method_fault.msg)
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
|
|
module.fail_json(msg=str(e))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from ansible.module_utils.vmware import *
|
|
|
|
from ansible.module_utils.vmware import *
|
|
|
|
from ansible.module_utils.basic import *
|
|
|
|
from ansible.module_utils.basic import *
|
|
|
|