From 35a04ff134645ca91caaaee49b4fb591979f3478 Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Fri, 5 Feb 2016 15:04:04 -0500 Subject: [PATCH] Resolves issue with vmware_migrate_vmk module for v2.0 When this module was written back in May 2015 we were using 1.9.x. Being lazy I added to param the objects that the other functions would need. What I have noticed is in 2.0 exit_json is trying to jsonify those complex objects and failing. This PR resolves that issue with the vmware_migrate_vmk module. @kamsz reported this issue in https://github.com/ansible/ansible-modules-extras/pull/1568 Playbook ``` - name: Migrate Management vmk local_action: module: vmware_migrate_vmk hostname: "{{ mgmt_ip_address }}" username: "{{ vcsa_user }}" password: "{{ vcsa_pass }}" esxi_hostname: "{{ hostvars[item].hostname }}" device: vmk1 current_switch_name: temp_vswitch current_portgroup_name: esx-mgmt migrate_switch_name: dvSwitch migrate_portgroup_name: Management with_items: groups['foundation_esxi'] ``` Module Testing ``` TASK [Migrate Management vmk] ************************************************** task path: /opt/autodeploy/projects/emmet/tasks/deploy/migrate_vmk.yml:3 ESTABLISH LOCAL CONNECTION FOR USER: root localhost EXEC ( umask 22 && mkdir -p "$( echo $HOME/.ansible/tmp/ansible-tmp-1454695485.85-245405603184252 )" && echo "$( echo $HOME/.ansible/tmp/ansible-tmp-1454695485.85-245405603184252 )" ) localhost PUT /tmp/tmpdlhr6t TO /root/.ansible/tmp/ansible-tmp-1454695485.85-245405603184252/vmware_migrate_vmk localhost EXEC LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /root/.ansible/tmp/ansible-tmp-1454695485.85-245405603184252/vmware_migrate_vmk; rm -rf "/root/.ansible/tmp/ansible-tmp-1454695485.85-245405603184252/" > /dev/null 2>&1 localhost EXEC ( umask 22 && mkdir -p "$( echo $HOME/.ansible/tmp/ansible-tmp-1454695490.35-143738865490168 )" && echo "$( echo $HOME/.ansible/tmp/ansible-tmp-1454695490.35-143738865490168 )" ) localhost PUT /tmp/tmpqfZqh1 TO /root/.ansible/tmp/ansible-tmp-1454695490.35-143738865490168/vmware_migrate_vmk localhost EXEC LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /root/.ansible/tmp/ansible-tmp-1454695490.35-143738865490168/vmware_migrate_vmk; rm -rf "/root/.ansible/tmp/ansible-tmp-1454695490.35-143738865490168/" > /dev/null 2>&1 localhost EXEC ( umask 22 && mkdir -p "$( echo $HOME/.ansible/tmp/ansible-tmp-1454695491.96-124154332968882 )" && echo "$( echo $HOME/.ansible/tmp/ansible-tmp-1454695491.96-124154332968882 )" ) localhost PUT /tmp/tmpf3rKZq TO /root/.ansible/tmp/ansible-tmp-1454695491.96-124154332968882/vmware_migrate_vmk localhost EXEC LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /root/.ansible/tmp/ansible-tmp-1454695491.96-124154332968882/vmware_migrate_vmk; rm -rf "/root/.ansible/tmp/ansible-tmp-1454695491.96-124154332968882/" > /dev/null 2>&1 ok: [foundation-vcsa -> localhost] => (item=foundation-esxi-01) => {"changed": false, "invocation": {"module_args": {"current_portgroup_name": "esx-mgmt", "current_switch_name": "temp_vswitch", "device": "vmk1", "esxi_hostname": "cscesxtmp001", "hostname": "172.27.0.100", "migrate_portgroup_name": "Management", "migrate_switch_name": "dvSwitch", "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "username": "root"}, "module_name": "vmware_migrate_vmk"}, "item": "foundation-esxi-01"} ok: [foundation-vcsa -> localhost] => (item=foundation-esxi-02) => {"changed": false, "invocation": {"module_args": {"current_portgroup_name": "esx-mgmt", "current_switch_name": "temp_vswitch", "device": "vmk1", "esxi_hostname": "cscesxtmp002", "hostname": "172.27.0.100", "migrate_portgroup_name": "Management", "migrate_switch_name": "dvSwitch", "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "username": "root"}, "module_name": "vmware_migrate_vmk"}, "item": "foundation-esxi-02"} ok: [foundation-vcsa -> localhost] => (item=foundation-esxi-03) => {"changed": false, "invocation": {"module_args": {"current_portgroup_name": "esx-mgmt", "current_switch_name": "temp_vswitch", "device": "vmk1", "esxi_hostname": "cscesxtmp003", "hostname": "172.27.0.100", "migrate_portgroup_name": "Management", "migrate_switch_name": "dvSwitch", "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", "username": "root"}, "module_name": "vmware_migrate_vmk"}, "item": "foundation-esxi-03"} ``` --- cloud/vmware/vmware_migrate_vmk.py | 192 ++++++++++++++--------------- 1 file changed, 91 insertions(+), 101 deletions(-) diff --git a/cloud/vmware/vmware_migrate_vmk.py b/cloud/vmware/vmware_migrate_vmk.py index a3f3db764ca..a18dcc4a883 100644 --- a/cloud/vmware/vmware_migrate_vmk.py +++ b/cloud/vmware/vmware_migrate_vmk.py @@ -75,8 +75,6 @@ Example from Ansible playbook migrate_switch_name: dvSwitch migrate_portgroup_name: Management ''' - - try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True @@ -84,88 +82,93 @@ except ImportError: HAS_PYVMOMI = False -def state_exit_unchanged(module): - module.exit_json(changed=False) - - -def state_migrate_vds_vss(module): - module.exit_json(changed=False, msg="Currently Not Implemented") - - -def create_host_vnic_config(dv_switch_uuid, portgroup_key, device): - - host_vnic_config = vim.host.VirtualNic.Config() - host_vnic_config.spec = vim.host.VirtualNic.Specification() - host_vnic_config.changeOperation = "edit" - host_vnic_config.device = device - host_vnic_config.portgroup = "" - host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() - host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid - host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key - - return host_vnic_config - - -def create_port_group_config(switch_name, portgroup_name): - port_group_config = vim.host.PortGroup.Config() - port_group_config.spec = vim.host.PortGroup.Specification() - - port_group_config.changeOperation = "remove" - port_group_config.spec.name = portgroup_name - port_group_config.spec.vlanId = -1 - port_group_config.spec.vswitchName = switch_name - port_group_config.spec.policy = vim.host.NetworkPolicy() - - return port_group_config - - -def state_migrate_vss_vds(module): - content = module.params['content'] - host_system = module.params['host_system'] - migrate_switch_name = module.params['migrate_switch_name'] - migrate_portgroup_name = module.params['migrate_portgroup_name'] - current_portgroup_name = module.params['current_portgroup_name'] - current_switch_name = module.params['current_switch_name'] - device = module.params['device'] - - host_network_system = host_system.configManager.networkSystem - - dv_switch = find_dvs_by_name(content, migrate_switch_name) - pg = find_dvspg_by_name(dv_switch, migrate_portgroup_name) - - config = vim.host.NetworkConfig() - config.portgroup = [create_port_group_config(current_switch_name, current_portgroup_name)] - config.vnic = [create_host_vnic_config(dv_switch.uuid, pg.key, device)] - host_network_system.UpdateNetworkConfig(config, "modify") - module.exit_json(changed=True) - - -def check_vmk_current_state(module): - - device = module.params['device'] - esxi_hostname = module.params['esxi_hostname'] - current_portgroup_name = module.params['current_portgroup_name'] - current_switch_name = module.params['current_switch_name'] - - content = connect_to_api(module) - - host_system = find_hostsystem_by_name(content, esxi_hostname) - - module.params['content'] = content - module.params['host_system'] = host_system - - for vnic in host_system.configManager.networkSystem.networkInfo.vnic: - if vnic.device == device: - module.params['vnic'] = vnic - if vnic.spec.distributedVirtualPort is None: - if vnic.portgroup == current_portgroup_name: - return "migrate_vss_vds" - else: - dvs = find_dvs_by_name(content, current_switch_name) - if dvs is None: - return "migrated" - if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: - return "migrate_vds_vss" +class VMwareMigrateVmk(object): + def __init__(self, module): + self.module = module + self.host_system = None + self.migrate_switch_name = self.module.params['migrate_switch_name'] + self.migrate_portgroup_name = self.module.params['migrate_portgroup_name'] + self.device = self.module.params['device'] + self.esxi_hostname = self.module.params['esxi_hostname'] + self.current_portgroup_name = self.module.params['current_portgroup_name'] + self.current_switch_name = self.module.params['current_switch_name'] + self.content = connect_to_api(module) + + def process_state(self): + try: + vmk_migration_states = { + 'migrate_vss_vds': self.state_migrate_vss_vds, + 'migrate_vds_vss': self.state_migrate_vds_vss, + 'migrated': self.state_exit_unchanged + } + + vmk_migration_states[self.check_vmk_current_state()]() + + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except Exception as e: + self.module.fail_json(msg=str(e)) + + def state_exit_unchanged(self): + self.module.exit_json(changed=False) + + def state_migrate_vds_vss(self): + self.module.exit_json(changed=False, msg="Currently Not Implemented") + + def create_host_vnic_config(self, dv_switch_uuid, portgroup_key): + host_vnic_config = vim.host.VirtualNic.Config() + host_vnic_config.spec = vim.host.VirtualNic.Specification() + + host_vnic_config.changeOperation = "edit" + host_vnic_config.device = self.device + host_vnic_config.portgroup = "" + host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() + host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid + host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key + + return host_vnic_config + + def create_port_group_config(self): + port_group_config = vim.host.PortGroup.Config() + port_group_config.spec = vim.host.PortGroup.Specification() + + port_group_config.changeOperation = "remove" + port_group_config.spec.name = self.current_portgroup_name + port_group_config.spec.vlanId = -1 + port_group_config.spec.vswitchName = self.current_switch_name + port_group_config.spec.policy = vim.host.NetworkPolicy() + + return port_group_config + + def state_migrate_vss_vds(self): + host_network_system = self.host_system.configManager.networkSystem + + dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name) + pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name) + + config = vim.host.NetworkConfig() + config.portgroup = [self.create_port_group_config()] + config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)] + host_network_system.UpdateNetworkConfig(config, "modify") + self.module.exit_json(changed=True) + + def check_vmk_current_state(self): + self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname) + + for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic: + if vnic.device == self.device: + #self.vnic = vnic + if vnic.spec.distributedVirtualPort is None: + if vnic.portgroup == self.current_portgroup_name: + return "migrate_vss_vds" + else: + dvs = find_dvs_by_name(self.content, self.current_switch_name) + if dvs is None: + return "migrated" + if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: + return "migrate_vds_vss" def main(): @@ -181,23 +184,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi required for this module') - - try: - vmk_migration_states = { - 'migrate_vss_vds': state_migrate_vss_vds, - 'migrate_vds_vss': state_migrate_vds_vss, - 'migrated': state_exit_unchanged - } - - vmk_migration_states[check_vmk_current_state(module)](module) - - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except Exception as e: - module.fail_json(msg=str(e)) + self.module.fail_json(msg='pyvmomi required for this module') + + vmware_migrate_vmk = VMwareMigrateVmk(module) + vmware_migrate_vmk.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import *