Resolves issue with vmware_dvs_host module for v2.0

When this module was written back in May 2015 we were using 1.9.x. Being lazy I added to param the objects that the other functions would need. What I have noticed is in 2.0 exit_json is trying to jsonify those complex objects and failing. This PR resolves that issue with the vmware_dvs_host module.

@kamsz reported this issue in https://github.com/ansible/ansible-modules-extras/pull/1568

Playbook
```
- name: Add Host to dVS
      local_action:
        module: vmware_dvs_host
        hostname: "{{ mgmt_ip_address }}"
        username: "{{ vcsa_user }}"
        password: "{{ vcsa_pass }}"
        esxi_hostname: "{{ hostvars[item].hostname }}"
        switch_name: dvSwitch
        vmnics: "{{ dvs_vmnic }}"
        state: present
      with_items: groups['foundation_esxi']
```
Module Testing
```
TASK [Add Host to dVS] *********************************************************
task path: /opt/autodeploy/projects/emmet/site_deploy.yml:234
ESTABLISH LOCAL CONNECTION FOR USER: root
localhost EXEC ( umask 22 && mkdir -p "$( echo $HOME/.ansible/tmp/ansible-tmp-1454694039.6-259977654985844 )" && echo "$( echo $HOME/.ansible/tmp/ansible-tmp-1454694039.6-259977654985844 )" )
localhost PUT /tmp/tmpGrHqbd TO /root/.ansible/tmp/ansible-tmp-1454694039.6-259977654985844/vmware_dvs_host
localhost EXEC LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /root/.ansible/tmp/ansible-tmp-1454694039.6-259977654985844/vmware_dvs_host; rm -rf "/root/.ansible/tmp/ansible-tmp-1454694039.6-259977654985844/" > /dev/null 2>&1
localhost EXEC ( umask 22 && mkdir -p "$( echo $HOME/.ansible/tmp/ansible-tmp-1454694058.76-121920794239796 )" && echo "$( echo $HOME/.ansible/tmp/ansible-tmp-1454694058.76-121920794239796 )" )
localhost PUT /tmp/tmpkP7DPu TO /root/.ansible/tmp/ansible-tmp-1454694058.76-121920794239796/vmware_dvs_host
localhost EXEC LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /root/.ansible/tmp/ansible-tmp-1454694058.76-121920794239796/vmware_dvs_host; rm -rf "/root/.ansible/tmp/ansible-tmp-1454694058.76-121920794239796/" > /dev/null 2>&1
localhost EXEC ( umask 22 && mkdir -p "$( echo $HOME/.ansible/tmp/ansible-tmp-1454694090.2-33641188152663 )" && echo "$( echo $HOME/.ansible/tmp/ansible-tmp-1454694090.2-33641188152663 )" )
localhost PUT /tmp/tmp216NwV TO /root/.ansible/tmp/ansible-tmp-1454694090.2-33641188152663/vmware_dvs_host
localhost EXEC LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /root/.ansible/tmp/ansible-tmp-1454694090.2-33641188152663/vmware_dvs_host; rm -rf "/root/.ansible/tmp/ansible-tmp-1454694090.2-33641188152663/" > /dev/null 2>&1
changed: [foundation-vcsa -> localhost] => (item=foundation-esxi-01) => {"changed": true, "invocation": {"module_args": {"esxi_hostname": "cscesxtmp001", "hostname": "172.27.0.100", "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "state": "present", "switch_name": "dvSwitch", "username": "root", "vmnics": ["vmnic2"]}, "module_name": "vmware_dvs_host"}, "item": "foundation-esxi-01", "result": "None"}
changed: [foundation-vcsa -> localhost] => (item=foundation-esxi-02) => {"changed": true, "invocation": {"module_args": {"esxi_hostname": "cscesxtmp002", "hostname": "172.27.0.100", "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "state": "present", "switch_name": "dvSwitch", "username": "root", "vmnics": ["vmnic2"]}, "module_name": "vmware_dvs_host"}, "item": "foundation-esxi-02", "result": "None"}
changed: [foundation-vcsa -> localhost] => (item=foundation-esxi-03) => {"changed": true, "invocation": {"module_args": {"esxi_hostname": "cscesxtmp003", "hostname": "172.27.0.100", "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "state": "present", "switch_name": "dvSwitch", "username": "root", "vmnics": ["vmnic2"]}, "module_name": "vmware_dvs_host"}, "item": "foundation-esxi-03", "result": "None"}
```
pull/18777/head
Joseph Callen 9 years ago committed by Matt Clay
parent fb4c299f13
commit 0aa4f867de

@ -76,154 +76,154 @@ except ImportError:
HAS_PYVMOMI = False HAS_PYVMOMI = False
def find_dvspg_by_name(dv_switch, portgroup_name): class VMwareDvsHost(object):
portgroups = dv_switch.portgroup def __init__(self, module):
self.module = module
for pg in portgroups: self.dv_switch = None
if pg.name == portgroup_name: self.uplink_portgroup = None
return pg self.host = None
self.dv_switch = None
return None self.nic = None
self.content = connect_to_api(self.module)
self.state = self.module.params['state']
self.switch_name = self.module.params['switch_name']
self.esxi_hostname = self.module.params['esxi_hostname']
self.vmnics = self.module.params['vmnics']
def process_state(self):
try:
dvs_host_states = {
'absent': {
'present': self.state_destroy_dvs_host,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs_host,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs_host,
}
}
dvs_host_states[self.state][self.check_dvs_host_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def find_dvs_uplink_pg(dv_switch): def find_dvspg_by_name(self):
# There should only always be a single uplink port group on portgroups = self.dv_switch.portgroup
# a distributed virtual switch
if len(dv_switch.config.uplinkPortgroup): for pg in portgroups:
return dv_switch.config.uplinkPortgroup[0] if pg.name == self.portgroup_name:
else: return pg
return None return None
def find_dvs_uplink_pg(self):
# There should only always be a single uplink port group on
# a distributed virtual switch
# operation should be edit, add and remove if len(self.dv_switch.config.uplinkPortgroup):
def modify_dvs_host(dv_switch, host, operation, uplink_portgroup=None, vmnics=None): return self.dv_switch.config.uplinkPortgroup[0]
else:
spec = vim.DistributedVirtualSwitch.ConfigSpec() return None
spec.configVersion = dv_switch.config.configVersion # operation should be edit, add and remove
spec.host = [vim.dvs.HostMember.ConfigSpec()] def modify_dvs_host(self, operation):
spec.host[0].operation = operation spec = vim.DistributedVirtualSwitch.ConfigSpec()
spec.host[0].host = host spec.configVersion = self.dv_switch.config.configVersion
spec.host = [vim.dvs.HostMember.ConfigSpec()]
if operation in ("edit", "add"): spec.host[0].operation = operation
spec.host[0].backing = vim.dvs.HostMember.PnicBacking() spec.host[0].host = self.host
count = 0
if operation in ("edit", "add"):
for nic in vmnics: spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec()) count = 0
spec.host[0].backing.pnicSpec[count].pnicDevice = nic
spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = uplink_portgroup.key for nic in self.vmnics:
count += 1 spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec())
spec.host[0].backing.pnicSpec[count].pnicDevice = nic
task = dv_switch.ReconfigureDvs_Task(spec) spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key
changed, result = wait_for_task(task) count += 1
return changed, result
task = self.dv_switch.ReconfigureDvs_Task(spec)
changed, result = wait_for_task(task)
def state_destroy_dvs_host(module): return changed, result
operation = "remove" def state_destroy_dvs_host(self):
host = module.params['host'] operation = "remove"
dv_switch = module.params['dv_switch'] changed = True
result = None
changed = True
result = None if not self.module.check_mode:
changed, result = self.modify_dvs_host(operation)
if not module.check_mode: self.module.exit_json(changed=changed, result=str(result))
changed, result = modify_dvs_host(dv_switch, host, operation)
module.exit_json(changed=changed, result=str(result)) def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_exit_unchanged(module): def state_update_dvs_host(self):
module.exit_json(changed=False) operation = "edit"
changed = True
result = None
def state_update_dvs_host(module):
dv_switch = module.params['dv_switch'] if not self.module.check_mode:
uplink_portgroup = module.params['uplink_portgroup'] changed, result = self.modify_dvs_host(operation)
vmnics = module.params['vmnics'] self.module.exit_json(changed=changed, result=str(result))
host = module.params['host']
operation = "edit" def state_create_dvs_host(self):
changed = True operation = "add"
result = None changed = True
result = None
if not module.check_mode:
changed, result = modify_dvs_host(dv_switch, host, operation, uplink_portgroup, vmnics) if not self.module.check_mode:
module.exit_json(changed=changed, result=str(result)) changed, result = self.modify_dvs_host(operation)
self.module.exit_json(changed=changed, result=str(result))
def state_create_dvs_host(module): def find_host_attached_dvs(self):
dv_switch = module.params['dv_switch'] for dvs_host_member in self.dv_switch.config.host:
uplink_portgroup = module.params['uplink_portgroup'] if dvs_host_member.config.host.name == self.esxi_hostname:
vmnics = module.params['vmnics'] return dvs_host_member.config.host
host = module.params['host']
operation = "add"
changed = True
result = None
if not module.check_mode:
changed, result = modify_dvs_host(dv_switch, host, operation, uplink_portgroup, vmnics)
module.exit_json(changed=changed, result=str(result))
def find_host_attached_dvs(esxi_hostname, dv_switch):
for dvs_host_member in dv_switch.config.host:
if dvs_host_member.config.host.name == esxi_hostname:
return dvs_host_member.config.host
return None
def check_uplinks(dv_switch, host, vmnics):
pnic_device = []
for dvs_host_member in dv_switch.config.host:
if dvs_host_member.config.host == host:
for pnicSpec in dvs_host_member.config.backing.pnicSpec:
pnic_device.append(pnicSpec.pnicDevice)
return collections.Counter(pnic_device) == collections.Counter(vmnics)
def check_dvs_host_state(module): return None
switch_name = module.params['switch_name'] def check_uplinks(self):
esxi_hostname = module.params['esxi_hostname'] pnic_device = []
vmnics = module.params['vmnics']
content = connect_to_api(module) for dvs_host_member in self.dv_switch.config.host:
module.params['content'] = content if dvs_host_member.config.host == self.host:
for pnicSpec in dvs_host_member.config.backing.pnicSpec:
pnic_device.append(pnicSpec.pnicDevice)
dv_switch = find_dvs_by_name(content, switch_name) return collections.Counter(pnic_device) == collections.Counter(self.vmnics)
if dv_switch is None: def check_dvs_host_state(self):
raise Exception("A distributed virtual switch %s does not exist" % switch_name) self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
uplink_portgroup = find_dvs_uplink_pg(dv_switch) if self.dv_switch is None:
raise Exception("A distributed virtual switch %s does not exist" % self.switch_name)
if uplink_portgroup is None: self.uplink_portgroup = self.find_dvs_uplink_pg()
raise Exception("An uplink portgroup does not exist on the distributed virtual switch %s" % switch_name)
module.params['dv_switch'] = dv_switch if self.uplink_portgroup is None:
module.params['uplink_portgroup'] = uplink_portgroup raise Exception("An uplink portgroup does not exist on the distributed virtual switch %s"
% self.switch_name)
host = find_host_attached_dvs(esxi_hostname, dv_switch) self.host = self.find_host_attached_dvs()
if host is None: if self.host is None:
# We still need the HostSystem object to add the host # We still need the HostSystem object to add the host
# to the distributed vswitch # to the distributed vswitch
host = find_hostsystem_by_name(content, esxi_hostname) self.host = find_hostsystem_by_name(self.content, self.esxi_hostname)
if host is None: if self.host is None:
module.fail_json(msg="The esxi_hostname %s does not exist in vCenter" % esxi_hostname) self.module.fail_json(msg="The esxi_hostname %s does not exist in vCenter" % self.esxi_hostname)
module.params['host'] = host return 'absent'
return 'absent'
else:
module.params['host'] = host
if check_uplinks(dv_switch, host, vmnics):
return 'present'
else: else:
return 'update' if self.check_uplinks():
return 'present'
else:
return 'update'
def main(): def main():
@ -239,27 +239,8 @@ def main():
if not HAS_PYVMOMI: if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module') module.fail_json(msg='pyvmomi is required for this module')
try: vmware_dvs_host = VMwareDvsHost(module)
vmware_dvs_host.process_state()
dvs_host_states = {
'absent': {
'present': state_destroy_dvs_host,
'absent': state_exit_unchanged,
},
'present': {
'update': state_update_dvs_host,
'present': state_exit_unchanged,
'absent': state_create_dvs_host,
}
}
dvs_host_states[module.params['state']][check_dvs_host_state(module)](module)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import * from ansible.module_utils.vmware import *
from ansible.module_utils.basic import * from ansible.module_utils.basic import *

Loading…
Cancel
Save