From 249b7bf9695c8ef1950fa760ad2a115fdae15871 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 23 Aug 2015 00:01:52 +0200 Subject: [PATCH 01/44] cloudstack: cs_instance: fix ip address may not be set on default nic --- cloud/cloudstack/cs_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 201449b870d..4ead1317b2f 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -779,7 +779,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): self.result['affinity_groups'] = affinity_groups if 'nic' in instance: for nic in instance['nic']: - if nic['isdefault']: + if nic['isdefault'] and 'ipaddress' in nic: self.result['default_ip'] = nic['ipaddress'] return self.result From a279207c7bb05c58ae1fcc2d682fad345e656dc0 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 23 Aug 2015 00:04:51 +0200 Subject: [PATCH 02/44] cloudstack: cs_portforward: fix returns for int casting * missing self. * variable must be named portforwarding_rule, not resource --- cloud/cloudstack/cs_portforward.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index 2fc14aa5ed3..f2f87b660ef 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -361,9 +361,9 @@ class AnsibleCloudStackPortforwarding(AnsibleCloudStack): super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule) if portforwarding_rule: # Bad bad API does not always return int when it should. - for search_key, return_key in returns_to_int.iteritems(): - if search_key in resource: - self.result[return_key] = int(resource[search_key]) + for search_key, return_key in self.returns_to_int.iteritems(): + if search_key in portforwarding_rule: + self.result[return_key] = int(portforwarding_rule[search_key]) return self.result From 94614d0454e908fe5151451f1c3f23397f3ad747 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 23 Aug 2015 00:06:37 +0200 Subject: [PATCH 03/44] cloudstack: cs_staticnat: fix wrong class name used --- cloud/cloudstack/cs_staticnat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudstack/cs_staticnat.py b/cloud/cloudstack/cs_staticnat.py index 500c533915b..4b73d86e32b 100644 --- a/cloud/cloudstack/cs_staticnat.py +++ b/cloud/cloudstack/cs_staticnat.py @@ -154,7 +154,7 @@ from ansible.module_utils.cloudstack import * class AnsibleCloudStackStaticNat(AnsibleCloudStack): def __init__(self, module): - super(AnsibleCloudStackPortforwarding, self).__init__(module) + super(AnsibleCloudStackStaticNat, self).__init__(module) self.returns = { 'virtualmachinedisplayname': 'vm_display_name', 'virtualmachinename': 'vm_name', From 6945519411f92820ed14055176b642438774877e Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Mon, 24 Aug 2015 13:38:13 -0400 Subject: [PATCH 04/44] New VMware Module to support adding distributed portgroups --- cloud/vmware/vmware_dvs_portgroup.py | 219 +++++++++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 cloud/vmware/vmware_dvs_portgroup.py diff --git a/cloud/vmware/vmware_dvs_portgroup.py b/cloud/vmware/vmware_dvs_portgroup.py new file mode 100644 index 00000000000..265f9fd71ef --- /dev/null +++ b/cloud/vmware/vmware_dvs_portgroup.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_dvs_portgroup +short_description: Create or remove a Distributed vSwitch portgroup +description: + - Create or remove a Distributed vSwitch portgroup +version_added: 2.0 +author: "Joseph Callen (@jcpowermac)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter API server + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + portgroup_name: + description: + - The name of the portgroup that is to be created or deleted + required: True + switch_name: + description: + - The name of the distributed vSwitch the port group should be created on. + required: True + vlan_id: + description: + - The VLAN ID that should be configured with the portgroup + required: True + num_ports: + description: + - The number of ports the portgroup should contain + required: True + portgroup_type: + description: + - See VMware KB 1022312 regarding portgroup types + required: True + choices: + - 'earlyBinding' + - 'lateBinding' + - 'ephemeral' +''' + +EXAMPLES = ''' + - name: Create Management portgroup + local_action: + module: vmware_dvs_portgroup + hostname: vcenter_ip_or_hostname + username: vcenter_username + password: vcenter_password + portgroup_name: Management + switch_name: dvSwitch + vlan_id: 123 + num_ports: 120 + portgroup_type: earlyBinding + state: present +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def create_port_group(dv_switch, portgroup_name, vlan_id, num_ports, portgroup_type): + config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + + config.name = portgroup_name + config.numPorts = num_ports + + # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation + # but this is the correct managed object type. + + config.defaultPortConfig = vim.VMwareDVSPortSetting() + + # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the + # pyvmomi documentation but this is the correct managed object type + config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec() + config.defaultPortConfig.vlan.inherited = False + config.defaultPortConfig.vlan.vlanId = vlan_id + config.type = portgroup_type + + spec = [config] + task = dv_switch.AddDVPortgroup_Task(spec) + changed, result = wait_for_task(task) + return changed, result + + +def state_destroy_dvspg(module): + dvs_portgroup = module.params['dvs_portgroup'] + changed = True + result = None + + if not module.check_mode: + task = dvs_portgroup.Destroy_Task() + changed, result = wait_for_task(task) + module.exit_json(changed=changed, result=str(result)) + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def state_update_dvspg(module): + module.exit_json(changed=False, msg="Currently not implemented.") + return + + +def state_create_dvspg(module): + + switch_name = module.params['switch_name'] + portgroup_name = module.params['portgroup_name'] + dv_switch = module.params['dv_switch'] + vlan_id = module.params['vlan_id'] + num_ports = module.params['num_ports'] + portgroup_type = module.params['portgroup_type'] + changed = True + result = None + + if not module.check_mode: + changed, result = create_port_group(dv_switch, portgroup_name, vlan_id, num_ports, portgroup_type) + module.exit_json(changed=changed, result=str(result)) + + +def check_dvspg_state(module): + + switch_name = module.params['switch_name'] + portgroup_name = module.params['portgroup_name'] + + content = connect_to_api(module) + module.params['content'] = content + + dv_switch = find_dvs_by_name(content, switch_name) + + if dv_switch is None: + raise Exception("A distributed virtual switch with name %s does not exist" % switch_name) + + module.params['dv_switch'] = dv_switch + dvs_portgroup = find_dvspg_by_name(dv_switch, portgroup_name) + + if dvs_portgroup is None: + return 'absent' + else: + module.params['dvs_portgroup'] = dvs_portgroup + return 'present' + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(portgroup_name=dict(required=True, type='str'), + switch_name=dict(required=True, type='str'), + vlan_id=dict(required=True, type='int'), + num_ports=dict(required=True, type='int'), + portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + try: + dvspg_states = { + 'absent': { + 'present': state_destroy_dvspg, + 'absent': state_exit_unchanged, + }, + 'present': { + 'update': state_update_dvspg, + 'present': state_exit_unchanged, + 'absent': state_create_dvspg, + } + } + dvspg_states[module.params['state']][check_dvspg_state(module)](module) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + except Exception as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From 6fbadff17c9bc7481e5c67e2f0da690096deed06 Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Mon, 24 Aug 2015 13:39:02 -0400 Subject: [PATCH 05/44] Adding vmware_vmkernel_ip_config module --- cloud/vmware/vmware_vmkernel_ip_config.py | 136 ++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 cloud/vmware/vmware_vmkernel_ip_config.py diff --git a/cloud/vmware/vmware_vmkernel_ip_config.py b/cloud/vmware/vmware_vmkernel_ip_config.py new file mode 100644 index 00000000000..c07526f0aeb --- /dev/null +++ b/cloud/vmware/vmware_vmkernel_ip_config.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_vmkernel_ip_config +short_description: Configure the VMkernel IP Address +description: + - Configure the VMkernel IP Address +version_added: 2.0 +author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the ESXi server + required: True + username: + description: + - The username of the ESXi server + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the ESXi server + required: True + aliases: ['pass', 'pwd'] + vmk_name: + description: + - VMkernel interface name + required: True + ip_address: + description: + - IP address to assign to VMkernel interface + required: True + subnet_mask: + description: + - Subnet Mask to assign to VMkernel interface + required: True +''' + +EXAMPLES = ''' +# Example command from Ansible Playbook + +- name: Configure IP address on ESX host + local_action: + module: vmware_vmkernel_ip_config + hostname: esxi_hostname + username: esxi_username + password: esxi_password + vmk_name: vmk0 + ip_address: 10.0.0.10 + subnet_mask: 255.255.255.0 +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask): + + host_config_manager = host_system.configManager + host_network_system = host_config_manager.networkSystem + + for vnic in host_network_system.networkConfig.vnic: + if vnic.device == vmk_name: + spec = vnic.spec + if spec.ip.ipAddress != ip_address: + spec.ip.dhcp = False + spec.ip.ipAddress = ip_address + spec.ip.subnetMask = subnet_mask + host_network_system.UpdateVirtualNic(vmk_name, spec) + return True + return False + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(vmk_name=dict(required=True, type='str'), + ip_address=dict(required=True, type='str'), + subnet_mask=dict(required=True, type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + vmk_name = module.params['vmk_name'] + ip_address = module.params['ip_address'] + subnet_mask = module.params['subnet_mask'] + + try: + content = connect_to_api(module, False) + host = get_all_objs(content, [vim.HostSystem]) + if not host: + module.fail_json(msg="Unable to locate Physical Host.") + host_system = host.keys()[0] + changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask) + module.exit_json(changed=changed) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + except Exception as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From 7beea8a15234b903b7763c0dc1a82d20ab2b756c Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Mon, 24 Aug 2015 13:44:27 -0400 Subject: [PATCH 06/44] New VMware Module to support adding distribute vswitch --- cloud/vmware/vmware_dvswitch.py | 225 ++++++++++++++++++++++++++++++++ 1 file changed, 225 insertions(+) create mode 100644 cloud/vmware/vmware_dvswitch.py diff --git a/cloud/vmware/vmware_dvswitch.py b/cloud/vmware/vmware_dvswitch.py new file mode 100644 index 00000000000..26212a06c5f --- /dev/null +++ b/cloud/vmware/vmware_dvswitch.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_dvswitch +short_description: Create or remove a distributed vSwitch +description: + - Create or remove a distributed vSwitch +version_added: 2.0 +author: "Joseph Callen (@jcpowermac)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter API server + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + datacenter_name: + description: + - The name of the datacenter that will contain the dvSwitch + required: True + switch_name: + description: + - The name of the switch to create or remove + required: True + mtu: + description: + - The switch maximum transmission unit + required: True + uplink_quantity: + description: + - Quantity of uplink per ESXi host added to the switch + required: True + discovery_proto: + description: + - Link discovery protocol between Cisco and Link Layer discovery + choices: + - 'cdp' + - 'lldp' + required: True + discovery_operation: + description: + - Select the discovery operation + choices: + - 'both' + - 'none' + - 'advertise' + - 'listen' + state: + description: + - Create or remove dvSwitch + default: 'present' + choices: + - 'present' + - 'absent' + required: False +''' +EXAMPLES = ''' +- name: Create dvswitch + local_action: + module: vmware_dvswitch + hostname: vcenter_ip_or_hostname + username: vcenter_username + password: vcenter_password + datacenter_name: datacenter + switch_name: dvSwitch + mtu: 9000 + uplink_quantity: 2 + discovery_proto: lldp + discovery_operation: both + state: present +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def create_dvswitch(network_folder, switch_name, mtu, uplink_quantity, discovery_proto, discovery_operation): + + result = None + changed = False + + spec = vim.DistributedVirtualSwitch.CreateSpec() + spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() + spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() + spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig() + + spec.configSpec.name = switch_name + spec.configSpec.maxMtu = mtu + spec.configSpec.linkDiscoveryProtocolConfig.protocol = discovery_proto + spec.configSpec.linkDiscoveryProtocolConfig.operation = discovery_operation + spec.productInfo = vim.dvs.ProductSpec() + spec.productInfo.name = "DVS" + spec.productInfo.vendor = "VMware" + + for count in range(1, uplink_quantity+1): + spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count) + + task = network_folder.CreateDVS_Task(spec) + changed, result = wait_for_task(task) + return changed, result + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def state_destroy_dvs(module): + dvs = module.params['dvs'] + task = dvs.Destroy_Task() + changed, result = wait_for_task(task) + module.exit_json(changed=changed, result=str(result)) + + +def state_update_dvs(module): + module.exit_json(changed=False, msg="Currently not implemented.") + + +def state_create_dvs(module): + switch_name = module.params['switch_name'] + datacenter_name = module.params['datacenter_name'] + content = module.params['content'] + mtu = module.params['mtu'] + uplink_quantity = module.params['uplink_quantity'] + discovery_proto = module.params['discovery_proto'] + discovery_operation = module.params['discovery_operation'] + + changed = True + result = None + + if not module.check_mode: + dc = find_datacenter_by_name(content, datacenter_name) + changed, result = create_dvswitch(dc.networkFolder, switch_name, + mtu, uplink_quantity, discovery_proto, + discovery_operation) + module.exit_json(changed=changed, result=str(result)) + + +def check_dvs_configuration(module): + switch_name = module.params['switch_name'] + content = connect_to_api(module) + module.params['content'] = content + dvs = find_dvs_by_name(content, switch_name) + if dvs is None: + return 'absent' + else: + module.params['dvs'] = dvs + return 'present' + + +def main(): + argument_spec = vmware_argument_spec() + argument_spec.update(dict(datacenter_name=dict(required=True, type='str'), + switch_name=dict(required=True, type='str'), + mtu=dict(required=True, type='int'), + uplink_quantity=dict(required=True, type='int'), + discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'), + discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + try: + # Currently state_update_dvs is not implemented. + dvs_states = { + 'absent': { + 'present': state_destroy_dvs, + 'absent': state_exit_unchanged, + }, + 'present': { + 'update': state_update_dvs, + 'present': state_exit_unchanged, + 'absent': state_create_dvs, + } + } + dvs_states[module.params['state']][check_dvs_configuration(module)](module) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + except Exception as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From 72579ab3e11c02c4d7869fb4634dd08114a7dd3f Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Mon, 24 Aug 2015 13:44:45 -0400 Subject: [PATCH 07/44] Adding vmware_vmkernel module --- cloud/vmware/vmware_vmkernel.py | 221 ++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 cloud/vmware/vmware_vmkernel.py diff --git a/cloud/vmware/vmware_vmkernel.py b/cloud/vmware/vmware_vmkernel.py new file mode 100644 index 00000000000..0221f68ad2e --- /dev/null +++ b/cloud/vmware/vmware_vmkernel.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_vmkernel +short_description: Create a VMware VMkernel Interface +description: + - Create a VMware VMkernel Interface +version_added: 2.0 +author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the ESXi Server + required: True + username: + description: + - The username of the ESXi Server + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of ESXi Server + required: True + aliases: ['pass', 'pwd'] + vswitch_name: + description: + - The name of the vswitch where to add the VMK interface + required: True + portgroup_name: + description: + - The name of the portgroup for the VMK interface + required: True + ip_address: + description: + - The IP Address for the VMK interface + required: True + subnet_mask: + description: + - The Subnet Mask for the VMK interface + required: True + vland_id: + description: + - The VLAN ID for the VMK interface + required: True + mtu: + description: + - The MTU for the VMK interface + required: False + enable_vsan: + description: + - Enable the VMK interface for VSAN traffic + required: False + enable_vmotion: + description: + - Enable the VMK interface for vMotion traffic + required: False + enable_mgmt: + description: + - Enable the VMK interface for Management traffic + required: False + enable_ft: + description: + - Enable the VMK interface for Fault Tolerance traffic + required: False +''' + +EXAMPLES = ''' +# Example command from Ansible Playbook + +- name: Add Management vmkernel port (vmk1) + local_action: + module: vmware_vmkernel + hostname: esxi_hostname + username: esxi_username + password: esxi_password + vswitch_name: vswitch_name + portgroup_name: portgroup_name + vlan_id: vlan_id + ip_address: ip_address + subnet_mask: subnet_mask + enable_mgmt: True +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def create_vmkernel_adapter(host_system, port_group_name, + vlan_id, vswitch_name, + ip_address, subnet_mask, + mtu, enable_vsan, enable_vmotion, enable_mgmt, enable_ft): + + host_config_manager = host_system.configManager + host_network_system = host_config_manager.networkSystem + host_virtual_vic_manager = host_config_manager.virtualNicManager + config = vim.host.NetworkConfig() + + config.portgroup = [vim.host.PortGroup.Config()] + config.portgroup[0].changeOperation = "add" + config.portgroup[0].spec = vim.host.PortGroup.Specification() + config.portgroup[0].spec.name = port_group_name + config.portgroup[0].spec.vlanId = vlan_id + config.portgroup[0].spec.vswitchName = vswitch_name + config.portgroup[0].spec.policy = vim.host.NetworkPolicy() + + config.vnic = [vim.host.VirtualNic.Config()] + config.vnic[0].changeOperation = "add" + config.vnic[0].portgroup = port_group_name + config.vnic[0].spec = vim.host.VirtualNic.Specification() + config.vnic[0].spec.ip = vim.host.IpConfig() + config.vnic[0].spec.ip.dhcp = False + config.vnic[0].spec.ip.ipAddress = ip_address + config.vnic[0].spec.ip.subnetMask = subnet_mask + if mtu: + config.vnic[0].spec.mtu = mtu + + host_network_config_result = host_network_system.UpdateNetworkConfig(config, "modify") + + for vnic_device in host_network_config_result.vnicDevice: + if enable_vsan: + vsan_system = host_config_manager.vsanSystem + vsan_config = vim.vsan.host.ConfigInfo() + vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo() + + vsan_config.networkInfo.port = [vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()] + + vsan_config.networkInfo.port[0].device = vnic_device + host_vsan_config_result = vsan_system.UpdateVsan_Task(vsan_config) + + if enable_vmotion: + host_virtual_vic_manager.SelectVnicForNicType("vmotion", vnic_device) + + if enable_mgmt: + host_virtual_vic_manager.SelectVnicForNicType("management", vnic_device) + + if enable_ft: + host_virtual_vic_manager.SelectVnicForNicType("faultToleranceLogging", vnic_device) + return True + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(portgroup_name=dict(required=True, type='str'), + ip_address=dict(required=True, type='str'), + subnet_mask=dict(required=True, type='str'), + mtu=dict(required=False, type='int'), + enable_vsan=dict(required=False, type='bool'), + enable_vmotion=dict(required=False, type='bool'), + enable_mgmt=dict(required=False, type='bool'), + enable_ft=dict(required=False, type='bool'), + vswitch_name=dict(required=True, type='str'), + vlan_id=dict(required=True, type='int'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + port_group_name = module.params['portgroup_name'] + ip_address = module.params['ip_address'] + subnet_mask = module.params['subnet_mask'] + mtu = module.params['mtu'] + enable_vsan = module.params['enable_vsan'] + enable_vmotion = module.params['enable_vmotion'] + enable_mgmt = module.params['enable_mgmt'] + enable_ft = module.params['enable_ft'] + vswitch_name = module.params['vswitch_name'] + vlan_id = module.params['vlan_id'] + + try: + content = connect_to_api(module) + host = get_all_objs(content, [vim.HostSystem]) + if not host: + module.fail_json(msg="Unable to locate Physical Host.") + host_system = host.keys()[0] + changed = create_vmkernel_adapter(host_system, port_group_name, + vlan_id, vswitch_name, + ip_address, subnet_mask, + mtu, enable_vsan, enable_vmotion, enable_mgmt, enable_ft) + module.exit_json(changed=changed) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + except Exception as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From d5e3bd770a45d9d333de56571e0208db4c3dedff Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Mon, 24 Aug 2015 13:48:16 -0400 Subject: [PATCH 08/44] Adding vmware_vm_vss_dvs_migrate module --- cloud/vmware/vmware_vm_vss_dvs_migrate.py | 176 ++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 cloud/vmware/vmware_vm_vss_dvs_migrate.py diff --git a/cloud/vmware/vmware_vm_vss_dvs_migrate.py b/cloud/vmware/vmware_vm_vss_dvs_migrate.py new file mode 100644 index 00000000000..ff51f86ed09 --- /dev/null +++ b/cloud/vmware/vmware_vm_vss_dvs_migrate.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_vm_vss_dvs_migrate +short_description: Migrates a virtual machine from a standard vswitch to distributed +description: + - Migrates a virtual machine from a standard vswitch to distributed +version_added: 2.0 +author: "Joseph Callen (@jcpowermac)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter API server + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + vm_name: + description: + - Name of the virtual machine to migrate to a dvSwitch + required: True + dvportgroup_name: + description: + - Name of the portgroup to migrate to the virtual machine to + required: True +''' + +EXAMPLES = ''' +- name: Migrate VCSA to vDS + local_action: + module: vmware_vm_vss_dvs_migrate + hostname: vcenter_ip_or_hostname + username: vcenter_username + password: vcenter_password + vm_name: virtual_machine_name + dvportgroup_name: distributed_portgroup_name +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def _find_dvspg_by_name(content, pg_name): + + vmware_distributed_port_group = get_all_objs(content, [vim.dvs.DistributedVirtualPortgroup]) + for dvspg in vmware_distributed_port_group: + if dvspg.name == pg_name: + return dvspg + return None + + +def find_vm_by_name(content, vm_name): + + virtual_machines = get_all_objs(content, [vim.VirtualMachine]) + for vm in virtual_machines: + if vm.name == vm_name: + return vm + return None + + +def migrate_network_adapter_vds(module): + vm_name = module.params['vm_name'] + dvportgroup_name = module.params['dvportgroup_name'] + content = module.params['content'] + + vm_configspec = vim.vm.ConfigSpec() + nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + port = vim.dvs.PortConnection() + devicespec = vim.vm.device.VirtualDeviceSpec() + + pg = _find_dvspg_by_name(content, dvportgroup_name) + + if pg is None: + module.fail_json(msg="The standard portgroup was not found") + + vm = find_vm_by_name(content, vm_name) + if vm is None: + module.fail_json(msg="The virtual machine was not found") + + dvswitch = pg.config.distributedVirtualSwitch + port.switchUuid = dvswitch.uuid + port.portgroupKey = pg.key + nic.port = port + + for device in vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualEthernetCard): + devicespec.device = device + devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + devicespec.device.backing = nic + vm_configspec.deviceChange.append(devicespec) + + task = vm.ReconfigVM_Task(vm_configspec) + changed, result = wait_for_task(task) + module.exit_json(changed=changed, result=result) + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def check_vm_network_state(module): + vm_name = module.params['vm_name'] + try: + content = connect_to_api(module) + module.params['content'] = content + vm = find_vm_by_name(content, vm_name) + module.params['vm'] = vm + if vm is None: + module.fail_json(msg="A virtual machine with name %s does not exist" % vm_name) + for device in vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualEthernetCard): + if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): + return 'present' + return 'absent' + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(vm_name=dict(required=True, type='str'), + dvportgroup_name=dict(required=True, type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + vm_nic_states = { + 'absent': migrate_network_adapter_vds, + 'present': state_exit_unchanged, + } + + vm_nic_states[check_vm_network_state(module)](module) + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() \ No newline at end of file From a2e15f07f863a33ed49d4709e96a2a9dad7d4d8c Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Mon, 24 Aug 2015 13:48:17 -0400 Subject: [PATCH 09/44] New VMware Module to support adding an ESXi host to vCenter --- cloud/vmware/vmware_host.py | 241 ++++++++++++++++++++++++++++++++++++ 1 file changed, 241 insertions(+) create mode 100644 cloud/vmware/vmware_host.py diff --git a/cloud/vmware/vmware_host.py b/cloud/vmware/vmware_host.py new file mode 100644 index 00000000000..162397a2190 --- /dev/null +++ b/cloud/vmware/vmware_host.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_host +short_description: Add/remove ESXi host to/from vCenter +description: + - This module can be used to add/remove an ESXi host to/from vCenter +version_added: 2.0 +author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter API server + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + datacenter_name: + description: + - Name of the datacenter to add the host + required: True + cluster_name: + description: + - Name of the cluster to add the host + required: True + esxi_hostname: + description: + - ESXi hostname to manage + required: True + esxi_username: + description: + - ESXi username + required: True + esxi_password: + description: + - ESXi password + required: True + state: + description: + - Add or remove the host + default: 'present' + choices: + - 'present' + - 'absent' + required: False +''' + +EXAMPLES = ''' +Example from Ansible playbook + + - name: Add ESXi Host to VCSA + local_action: + module: vmware_host + hostname: vcsa_host + username: vcsa_user + password: vcsa_pass + datacenter_name: datacenter_name + cluster_name: cluster_name + esxi_hostname: esxi_hostname + esxi_username: esxi_username + esxi_password: esxi_password + state: present +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def find_host_by_cluster_datacenter(module): + datacenter_name = module.params['datacenter_name'] + cluster_name = module.params['cluster_name'] + content = module.params['content'] + esxi_hostname = module.params['esxi_hostname'] + + dc = find_datacenter_by_name(content, datacenter_name) + cluster = find_cluster_by_name_datacenter(dc, cluster_name) + + for host in cluster.host: + if host.name == esxi_hostname: + return host, cluster + + return None, cluster + + +def add_host_to_vcenter(module): + cluster = module.params['cluster'] + + host_connect_spec = vim.host.ConnectSpec() + host_connect_spec.hostName = module.params['esxi_hostname'] + host_connect_spec.userName = module.params['esxi_username'] + host_connect_spec.password = module.params['esxi_password'] + host_connect_spec.force = True + host_connect_spec.sslThumbprint = "" + as_connected = True + esxi_license = None + resource_pool = None + + try: + task = cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license) + success, result = wait_for_task(task) + return success, result + except TaskError as add_task_error: + # This is almost certain to fail the first time. + # In order to get the sslThumbprint we first connect + # get the vim.fault.SSLVerifyFault then grab the sslThumbprint + # from that object. + # + # args is a tuple, selecting the first tuple + ssl_verify_fault = add_task_error.args[0] + host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint + + task = cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license) + success, result = wait_for_task(task) + return success, result + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def state_remove_host(module): + host = module.params['host'] + changed = True + result = None + if not module.check_mode: + if not host.runtime.inMaintenanceMode: + maintenance_mode_task = host.EnterMaintenanceMode_Task(300, True, None) + changed, result = wait_for_task(maintenance_mode_task) + + if changed: + task = host.Destroy_Task() + changed, result = wait_for_task(task) + else: + raise Exception(result) + module.exit_json(changed=changed, result=str(result)) + + +def state_update_host(module): + module.exit_json(changed=False, msg="Currently not implemented.") + + +def state_add_host(module): + + changed = True + result = None + + if not module.check_mode: + changed, result = add_host_to_vcenter(module) + module.exit_json(changed=changed, result=str(result)) + + +def check_host_state(module): + + content = connect_to_api(module) + module.params['content'] = content + + host, cluster = find_host_by_cluster_datacenter(module) + + module.params['cluster'] = cluster + if host is None: + return 'absent' + else: + module.params['host'] = host + return 'present' + + +def main(): + argument_spec = vmware_argument_spec() + argument_spec.update(dict(datacenter_name=dict(required=True, type='str'), + cluster_name=dict(required=True, type='str'), + esxi_hostname=dict(required=True, type='str'), + esxi_username=dict(required=True, type='str'), + esxi_password=dict(required=True, type='str', no_log=True), + state=dict(default='present', choices=['present', 'absent'], type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + try: + # Currently state_update_dvs is not implemented. + host_states = { + 'absent': { + 'present': state_remove_host, + 'absent': state_exit_unchanged, + }, + 'present': { + 'present': state_exit_unchanged, + 'absent': state_add_host, + } + } + + host_states[module.params['state']][check_host_state(module)](module) + + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + except Exception as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From 09a6760c51343b2fabe2c689851b16be8667c3cb Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Mon, 24 Aug 2015 13:54:09 -0400 Subject: [PATCH 10/44] New VMware Module to support migrating vmkernel adapter --- cloud/vmware/vmware_migrate_vmk.py | 219 +++++++++++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 cloud/vmware/vmware_migrate_vmk.py diff --git a/cloud/vmware/vmware_migrate_vmk.py b/cloud/vmware/vmware_migrate_vmk.py new file mode 100644 index 00000000000..c658c71b682 --- /dev/null +++ b/cloud/vmware/vmware_migrate_vmk.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_migrate_vmk +short_description: Migrate a VMK interface from VSS to VDS +description: + - Migrate a VMK interface from VSS to VDS +version_added: 2.0 +author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter API server + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + esxi_hostname: + description: + - ESXi hostname to be managed + required: True + device: + description: + - VMK interface name + required: True + current_switch_name: + description: + - Switch VMK interface is currently on + required: True + current_portgroup_name: + description: + - Portgroup name VMK interface is currently on + required: True + migrate_switch_name: + description: + - Switch name to migrate VMK interface to + required: True + migrate_portgroup_name: + description: + - Portgroup name to migrate VMK interface to + required: True +''' + +EXAMPLES = ''' +Example from Ansible playbook + + - name: Migrate Management vmk + local_action: + module: vmware_migrate_vmk + hostname: vcsa_host + username: vcsa_user + password: vcsa_pass + esxi_hostname: esxi_hostname + device: vmk1 + current_switch_name: temp_vswitch + current_portgroup_name: esx-mgmt + migrate_switch_name: dvSwitch + migrate_portgroup_name: Management +''' + + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def state_exit_unchanged(module): + module.exit_json(changed=False) + + +def state_migrate_vds_vss(module): + module.exit_json(changed=False, msg="Currently Not Implemented") + + +def create_host_vnic_config(dv_switch_uuid, portgroup_key, device): + + host_vnic_config = vim.host.VirtualNic.Config() + host_vnic_config.spec = vim.host.VirtualNic.Specification() + host_vnic_config.changeOperation = "edit" + host_vnic_config.device = device + host_vnic_config.portgroup = "" + host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() + host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid + host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key + + return host_vnic_config + + +def create_port_group_config(switch_name, portgroup_name): + port_group_config = vim.host.PortGroup.Config() + port_group_config.spec = vim.host.PortGroup.Specification() + + port_group_config.changeOperation = "remove" + port_group_config.spec.name = portgroup_name + port_group_config.spec.vlanId = -1 + port_group_config.spec.vswitchName = switch_name + port_group_config.spec.policy = vim.host.NetworkPolicy() + + return port_group_config + + +def state_migrate_vss_vds(module): + content = module.params['content'] + host_system = module.params['host_system'] + migrate_switch_name = module.params['migrate_switch_name'] + migrate_portgroup_name = module.params['migrate_portgroup_name'] + current_portgroup_name = module.params['current_portgroup_name'] + current_switch_name = module.params['current_switch_name'] + device = module.params['device'] + + host_network_system = host_system.configManager.networkSystem + + dv_switch = find_dvs_by_name(content, migrate_switch_name) + pg = find_dvspg_by_name(dv_switch, migrate_portgroup_name) + + config = vim.host.NetworkConfig() + config.portgroup = [create_port_group_config(current_switch_name, current_portgroup_name)] + config.vnic = [create_host_vnic_config(dv_switch.uuid, pg.key, device)] + host_network_system.UpdateNetworkConfig(config, "modify") + module.exit_json(changed=True) + + +def check_vmk_current_state(module): + + device = module.params['device'] + esxi_hostname = module.params['esxi_hostname'] + current_portgroup_name = module.params['current_portgroup_name'] + current_switch_name = module.params['current_switch_name'] + + content = connect_to_api(module) + + host_system = find_hostsystem_by_name(content, esxi_hostname) + + module.params['content'] = content + module.params['host_system'] = host_system + + for vnic in host_system.configManager.networkSystem.networkInfo.vnic: + if vnic.device == device: + module.params['vnic'] = vnic + if vnic.spec.distributedVirtualPort is None: + if vnic.portgroup == current_portgroup_name: + return "migrate_vss_vds" + else: + dvs = find_dvs_by_name(content, current_switch_name) + if dvs is None: + return "migrated" + if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: + return "migrate_vds_vss" + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'), + device=dict(required=True, type='str'), + current_switch_name=dict(required=True, type='str'), + current_portgroup_name=dict(required=True, type='str'), + migrate_switch_name=dict(required=True, type='str'), + migrate_portgroup_name=dict(required=True, type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi required for this module') + + try: + vmk_migration_states = { + 'migrate_vss_vds': state_migrate_vss_vds, + 'migrate_vds_vss': state_migrate_vds_vss, + 'migrated': state_exit_unchanged + } + + vmk_migration_states[check_vmk_current_state(module)](module) + + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + except Exception as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From c48945c10e3073fab2f4374e98055f210e8cb13d Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Mon, 24 Aug 2015 13:55:47 -0400 Subject: [PATCH 11/44] Adding vmware_target_canonical_facts module --- cloud/vmware/vmware_target_canonical_facts.py | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 cloud/vmware/vmware_target_canonical_facts.py diff --git a/cloud/vmware/vmware_target_canonical_facts.py b/cloud/vmware/vmware_target_canonical_facts.py new file mode 100644 index 00000000000..987b4a98753 --- /dev/null +++ b/cloud/vmware/vmware_target_canonical_facts.py @@ -0,0 +1,108 @@ +#!/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_target_canonical_facts +short_description: Return canonical (NAA) from an ESXi host +description: + - Return canonical (NAA) from an ESXi host based on SCSI target ID +version_added: 2.0 +author: Joseph Callen +notes: +requirements: + - Tested on vSphere 5.5 + - PyVmomi installed +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] + target_id: + description: + - The target id based on order of scsi device + required: True +''' + +EXAMPLES = ''' +# Example vmware_target_canonical_facts command from Ansible Playbooks +- name: Get Canonical name + local_action: > + vmware_target_canonical_facts + hostname="{{ ansible_ssh_host }}" username=root password=vmware + target_id=7 +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def find_hostsystem(content): + host_system = get_all_objs(content, [vim.HostSystem]) + for host in host_system: + return host + return None + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(target_id=dict(required=True, type='int'))) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + content = connect_to_api(module) + host = find_hostsystem(content) + + target_lun_uuid = {} + scsilun_canonical = {} + + # Associate the scsiLun key with the canonicalName (NAA) + for scsilun in host.config.storageDevice.scsiLun: + scsilun_canonical[scsilun.key] = scsilun.canonicalName + + # Associate target number with LUN uuid + for target in host.config.storageDevice.scsiTopology.adapter[0].target: + for lun in target.lun: + target_lun_uuid[target.target] = lun.scsiLun + + module.exit_json(changed=False, canonical=scsilun_canonical[target_lun_uuid[module.params['target_id']]]) + +from ansible.module_utils.basic import * +from ansible.module_utils.vmware import * + +if __name__ == '__main__': + main() + From 485670145729be8e2eb5f19bd06c7d4593ba3e84 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 23 Aug 2015 00:09:38 +0200 Subject: [PATCH 12/44] cloudstack: cs_domain: rename argument cleanup to clean_up for consistency --- cloud/cloudstack/cs_domain.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/cloudstack/cs_domain.py b/cloud/cloudstack/cs_domain.py index c9f345a00c2..27410040aec 100644 --- a/cloud/cloudstack/cs_domain.py +++ b/cloud/cloudstack/cs_domain.py @@ -37,7 +37,7 @@ options: - Network domain for networks in the domain. required: false default: null - cleanup: + clean_up: description: - Clean up all domain resources like child domains and accounts. - Considered on C(state=absent). @@ -225,7 +225,7 @@ class AnsibleCloudStackDomain(AnsibleCloudStack): if not self.module.check_mode: args = {} args['id'] = domain['id'] - args['cleanup'] = self.module.params.get('cleanup') + args['cleanup'] = self.module.params.get('clean_up') res = self.cs.deleteDomain(**args) if 'errortext' in res: @@ -244,7 +244,7 @@ def main(): path = dict(required=True), state = dict(choices=['present', 'absent'], default='present'), network_domain = dict(default=None), - cleanup = dict(choices=BOOLEANS, default=False), + clean_up = dict(choices=BOOLEANS, default=False), poll_async = dict(choices=BOOLEANS, default=True), api_key = dict(default=None), api_secret = dict(default=None, no_log=True), From 02e3adf40258bd157563bb3ccd950c673f7a70bd Mon Sep 17 00:00:00 2001 From: Chrrrles Paul Date: Wed, 26 Aug 2015 20:43:43 -0500 Subject: [PATCH 13/44] Revert "New VMware Module to support configuring a VMware vmkernel IP Address" --- cloud/vmware/vmware_vmkernel_ip_config.py | 136 ---------------------- 1 file changed, 136 deletions(-) delete mode 100644 cloud/vmware/vmware_vmkernel_ip_config.py diff --git a/cloud/vmware/vmware_vmkernel_ip_config.py b/cloud/vmware/vmware_vmkernel_ip_config.py deleted file mode 100644 index c07526f0aeb..00000000000 --- a/cloud/vmware/vmware_vmkernel_ip_config.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2015, Joseph Callen -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: vmware_vmkernel_ip_config -short_description: Configure the VMkernel IP Address -description: - - Configure the VMkernel IP Address -version_added: 2.0 -author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" -notes: - - Tested on vSphere 5.5 -requirements: - - "python >= 2.6" - - PyVmomi -options: - hostname: - description: - - The hostname or IP address of the ESXi server - required: True - username: - description: - - The username of the ESXi server - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the ESXi server - required: True - aliases: ['pass', 'pwd'] - vmk_name: - description: - - VMkernel interface name - required: True - ip_address: - description: - - IP address to assign to VMkernel interface - required: True - subnet_mask: - description: - - Subnet Mask to assign to VMkernel interface - required: True -''' - -EXAMPLES = ''' -# Example command from Ansible Playbook - -- name: Configure IP address on ESX host - local_action: - module: vmware_vmkernel_ip_config - hostname: esxi_hostname - username: esxi_username - password: esxi_password - vmk_name: vmk0 - ip_address: 10.0.0.10 - subnet_mask: 255.255.255.0 -''' - -try: - from pyVmomi import vim, vmodl - HAS_PYVMOMI = True -except ImportError: - HAS_PYVMOMI = False - - -def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask): - - host_config_manager = host_system.configManager - host_network_system = host_config_manager.networkSystem - - for vnic in host_network_system.networkConfig.vnic: - if vnic.device == vmk_name: - spec = vnic.spec - if spec.ip.ipAddress != ip_address: - spec.ip.dhcp = False - spec.ip.ipAddress = ip_address - spec.ip.subnetMask = subnet_mask - host_network_system.UpdateVirtualNic(vmk_name, spec) - return True - return False - - -def main(): - - argument_spec = vmware_argument_spec() - argument_spec.update(dict(vmk_name=dict(required=True, type='str'), - ip_address=dict(required=True, type='str'), - subnet_mask=dict(required=True, type='str'))) - - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) - - if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi is required for this module') - - vmk_name = module.params['vmk_name'] - ip_address = module.params['ip_address'] - subnet_mask = module.params['subnet_mask'] - - try: - content = connect_to_api(module, False) - host = get_all_objs(content, [vim.HostSystem]) - if not host: - module.fail_json(msg="Unable to locate Physical Host.") - host_system = host.keys()[0] - changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask) - module.exit_json(changed=changed) - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except Exception as e: - module.fail_json(msg=str(e)) - -from ansible.module_utils.vmware import * -from ansible.module_utils.basic import * - -if __name__ == '__main__': - main() From 0847bfecd672f6b2e0e4429e998df7c6e7042b1c Mon Sep 17 00:00:00 2001 From: Amanpreet Singh Date: Thu, 27 Aug 2015 02:26:42 +0530 Subject: [PATCH 14/44] Add new module: pagerduty_alert - trigger, acknowledge or resolve pagerduty incidents --- monitoring/pagerduty_alert.py | 157 ++++++++++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 monitoring/pagerduty_alert.py diff --git a/monitoring/pagerduty_alert.py b/monitoring/pagerduty_alert.py new file mode 100644 index 00000000000..a2dddb9ea45 --- /dev/null +++ b/monitoring/pagerduty_alert.py @@ -0,0 +1,157 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' + +module: pagerduty_alert +short_description: Trigger, acknowledge or resolve PagerDuty incidents +description: + - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events +version_added: "1.9" +author: + - "Amanpreet Singh (@aps-sids)" +requirements: + - PagerDuty API access +options: + service_key: + description: + - The GUID of one of your "Generic API" services. + - This is the "service key" listed on a Generic API's service detail page. + required: true + event_type: + description: + - Type of event to be sent. + required: true + choices: + - 'trigger' + - 'acknowledge' + - 'resolve' + desc: + description: + - For C(trigger) I(event_type) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters. + - For C(acknowledge) or C(resolve) I(event_type) - Text that will appear in the incident's log associated with this event. + required: false + default: Created via Ansible + incident_key: + description: + - Identifies the incident to which this I(event_type) should be applied. + - For C(trigger) I(event_type) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports. + - For C(acknowledge) or C(resolve) I(event_type) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. + required: false + client: + description: + - The name of the monitoring client that is triggering this event. + required: false + client_url: + description: + - The URL of the monitoring client that is triggering this event. + required: false +''' + +EXAMPLES = ''' +# Trigger an incident with just the basic options +- pagerduty_alert: + service_key=xxx + event_type=trigger + desc="problem that led to this trigger" + +# Trigger an incident with more options +- pagerduty_alert: + service_key=xxx + event_type=trigger + desc="problem that led to this trigger" + incident_key=somekey + client="Sample Monitoring Service" + client_url=http://service.example.com + +# Acknowledge an incident based on incident_key +- pagerduty_alert: + service_key=xxx + event_type=acknowledge + incident_key=somekey + desc="some text for incident's log" + +# Resolve an incident based on incident_key +- pagerduty_alert: + service_key=xxx + event_type=resolve + incident_key=somekey + desc="some text for incident's log" +''' + + +def send_event(module, service_key, event_type, desc, + incident_key=None, client=None, client_url=None): + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + headers = { + "Content-type": "application/json" + } + + data = { + "service_key": service_key, + "event_type": event_type, + "incident_key": incident_key, + "description": desc, + "client": client, + "client_url": client_url + } + + response, info = fetch_url(module, url, method='post', + headers=headers, data=json.dumps(data)) + if info['status'] != 200: + module.fail_json(msg="failed to %s. Reason: %s" % + (event_type, info['msg'])) + json_out = json.loads(response.read()) + return json_out, True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + service_key=dict(required=True), + event_type=dict(required=True, + choices=['trigger', 'acknowledge', 'resolve']), + client=dict(required=False, default=None), + client_url=dict(required=False, default=None), + desc=dict(required=False, default='Created via Ansible'), + incident_key=dict(required=False, default=None) + ) + ) + + service_key = module.params['service_key'] + event_type = module.params['event_type'] + client = module.params['client'] + client_url = module.params['client_url'] + desc = module.params['desc'] + incident_key = module.params['incident_key'] + + if event_type != 'trigger' and incident_key is None: + module.fail_json(msg="incident_key is required for " + "acknowledge or resolve events") + + out, changed = send_event(module, service_key, event_type, desc, + incident_key, client, client_url) + + module.exit_json(msg="success", result=out, changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +if __name__ == '__main__': + main() From a0af060c258e4fa116533765a0b955ac3fa815c6 Mon Sep 17 00:00:00 2001 From: Amanpreet Singh Date: Thu, 27 Aug 2015 18:02:45 +0530 Subject: [PATCH 15/44] Make pagerduty_alert module more inline with ansible modules - use state parameter instead of event_type - add support for check mode --- monitoring/pagerduty_alert.py | 98 +++++++++++++++++++++++++++-------- 1 file changed, 77 insertions(+), 21 deletions(-) diff --git a/monitoring/pagerduty_alert.py b/monitoring/pagerduty_alert.py index a2dddb9ea45..e2d127f0155 100644 --- a/monitoring/pagerduty_alert.py +++ b/monitoring/pagerduty_alert.py @@ -28,30 +28,38 @@ author: requirements: - PagerDuty API access options: + name: + description: + - PagerDuty unique subdomain. + required: true service_key: description: - The GUID of one of your "Generic API" services. - This is the "service key" listed on a Generic API's service detail page. required: true - event_type: + state: description: - Type of event to be sent. required: true choices: - - 'trigger' - - 'acknowledge' - - 'resolve' + - 'triggered' + - 'acknowledged' + - 'resolved' + api_key: + description: + - The pagerduty API key (readonly access), generated on the pagerduty site. + required: true desc: description: - - For C(trigger) I(event_type) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters. - - For C(acknowledge) or C(resolve) I(event_type) - Text that will appear in the incident's log associated with this event. + - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters. + - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event. required: false default: Created via Ansible incident_key: description: - - Identifies the incident to which this I(event_type) should be applied. - - For C(trigger) I(event_type) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports. - - For C(acknowledge) or C(resolve) I(event_type) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. + - Identifies the incident to which this I(state) should be applied. + - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports. + - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded. required: false client: description: @@ -66,14 +74,17 @@ options: EXAMPLES = ''' # Trigger an incident with just the basic options - pagerduty_alert: + name: companyabc service_key=xxx - event_type=trigger + api_key:yourapikey + state=triggered desc="problem that led to this trigger" # Trigger an incident with more options - pagerduty_alert: service_key=xxx - event_type=trigger + api_key=yourapikey + state=triggered desc="problem that led to this trigger" incident_key=somekey client="Sample Monitoring Service" @@ -82,19 +93,47 @@ EXAMPLES = ''' # Acknowledge an incident based on incident_key - pagerduty_alert: service_key=xxx - event_type=acknowledge + api_key=yourapikey + state=acknowledged incident_key=somekey desc="some text for incident's log" # Resolve an incident based on incident_key - pagerduty_alert: service_key=xxx - event_type=resolve + api_key=yourapikey + state=resolved incident_key=somekey desc="some text for incident's log" ''' +def check(module, name, state, service_key, api_key, incident_key=None): + url = "https://%s.pagerduty.com/api/v1/incidents" % name + headers = { + "Content-type": "application/json", + "Authorization": "Token token=%s" % api_key + } + + data = { + "service_key": service_key, + "incident_key": incident_key, + "sort_by": "incident_number:desc" + } + + response, info = fetch_url(module, url, method='get', + headers=headers, data=json.dumps(data)) + + if info['status'] != 200: + module.fail_json(msg="failed to check current incident status." + "Reason: %s" % info['msg']) + json_out = json.loads(response.read())["incidents"][0] + + if state != json_out["status"]: + return json_out, True + return json_out, False + + def send_event(module, service_key, event_type, desc, incident_key=None, client=None, client_url=None): url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" @@ -117,37 +156,54 @@ def send_event(module, service_key, event_type, desc, module.fail_json(msg="failed to %s. Reason: %s" % (event_type, info['msg'])) json_out = json.loads(response.read()) - return json_out, True + return json_out def main(): module = AnsibleModule( argument_spec=dict( + name=dict(required=True), service_key=dict(required=True), - event_type=dict(required=True, - choices=['trigger', 'acknowledge', 'resolve']), + api_key=dict(required=True), + state=dict(required=True, + choices=['triggered', 'acknowledged', 'resolved']), client=dict(required=False, default=None), client_url=dict(required=False, default=None), desc=dict(required=False, default='Created via Ansible'), incident_key=dict(required=False, default=None) - ) + ), + supports_check_mode=True ) + name = module.params['name'] service_key = module.params['service_key'] - event_type = module.params['event_type'] + api_key = module.params['api_key'] + state = module.params['state'] client = module.params['client'] client_url = module.params['client_url'] desc = module.params['desc'] incident_key = module.params['incident_key'] + state_event_dict = { + 'triggered': 'trigger', + 'acknowledged': 'acknowledge', + 'resolved': 'resolve' + } + + event_type = state_event_dict[state] + if event_type != 'trigger' and incident_key is None: module.fail_json(msg="incident_key is required for " "acknowledge or resolve events") - out, changed = send_event(module, service_key, event_type, desc, - incident_key, client, client_url) + out, changed = check(module, name, state, + service_key, api_key, incident_key) + + if not module.check_mode and changed is True: + out = send_event(module, service_key, event_type, desc, + incident_key, client, client_url) - module.exit_json(msg="success", result=out, changed=changed) + module.exit_json(result=out, changed=changed) # import module snippets from ansible.module_utils.basic import * From 2647d2b637d8d885485bdd3a7f57cfd4d9da235f Mon Sep 17 00:00:00 2001 From: Chrrrles Paul Date: Thu, 27 Aug 2015 17:44:29 -0500 Subject: [PATCH 16/44] =?UTF-8?q?Revert=20"Revert=20"New=20VMware=20Module?= =?UTF-8?q?=20to=20support=20configuring=20a=20VMware=20vmkernel=20IP?= =?UTF-8?q?=E2=80=A6"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cloud/vmware/vmware_vmkernel_ip_config.py | 136 ++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 cloud/vmware/vmware_vmkernel_ip_config.py diff --git a/cloud/vmware/vmware_vmkernel_ip_config.py b/cloud/vmware/vmware_vmkernel_ip_config.py new file mode 100644 index 00000000000..c07526f0aeb --- /dev/null +++ b/cloud/vmware/vmware_vmkernel_ip_config.py @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: vmware_vmkernel_ip_config +short_description: Configure the VMkernel IP Address +description: + - Configure the VMkernel IP Address +version_added: 2.0 +author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" +notes: + - Tested on vSphere 5.5 +requirements: + - "python >= 2.6" + - PyVmomi +options: + hostname: + description: + - The hostname or IP address of the ESXi server + required: True + username: + description: + - The username of the ESXi server + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the ESXi server + required: True + aliases: ['pass', 'pwd'] + vmk_name: + description: + - VMkernel interface name + required: True + ip_address: + description: + - IP address to assign to VMkernel interface + required: True + subnet_mask: + description: + - Subnet Mask to assign to VMkernel interface + required: True +''' + +EXAMPLES = ''' +# Example command from Ansible Playbook + +- name: Configure IP address on ESX host + local_action: + module: vmware_vmkernel_ip_config + hostname: esxi_hostname + username: esxi_username + password: esxi_password + vmk_name: vmk0 + ip_address: 10.0.0.10 + subnet_mask: 255.255.255.0 +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask): + + host_config_manager = host_system.configManager + host_network_system = host_config_manager.networkSystem + + for vnic in host_network_system.networkConfig.vnic: + if vnic.device == vmk_name: + spec = vnic.spec + if spec.ip.ipAddress != ip_address: + spec.ip.dhcp = False + spec.ip.ipAddress = ip_address + spec.ip.subnetMask = subnet_mask + host_network_system.UpdateVirtualNic(vmk_name, spec) + return True + return False + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(vmk_name=dict(required=True, type='str'), + ip_address=dict(required=True, type='str'), + subnet_mask=dict(required=True, type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + vmk_name = module.params['vmk_name'] + ip_address = module.params['ip_address'] + subnet_mask = module.params['subnet_mask'] + + try: + content = connect_to_api(module, False) + host = get_all_objs(content, [vim.HostSystem]) + if not host: + module.fail_json(msg="Unable to locate Physical Host.") + host_system = host.keys()[0] + changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask) + module.exit_json(changed=changed) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + except Exception as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From 29c8b50d569b1fbef9710861133975da0edac636 Mon Sep 17 00:00:00 2001 From: Alex Punco Date: Fri, 28 Aug 2015 13:26:21 +0300 Subject: [PATCH 17/44] fix creation containers on btrfs subvolumes --- cloud/lxc/lxc_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index 1f82bcb829e..adb9637acf9 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -448,7 +448,7 @@ LXC_BACKING_STORE = { 'zfs_root' ], 'btrfs': [ - 'lv_name', 'vg_name', 'thinpool', 'zfs_root' + 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size' ], 'loop': [ 'lv_name', 'vg_name', 'thinpool', 'zfs_root' From 009ee165a8970093080391243949ef1b151a6bb2 Mon Sep 17 00:00:00 2001 From: varnav Date: Fri, 28 Aug 2015 18:38:58 +0300 Subject: [PATCH 18/44] Small improvement in documentation --- system/firewalld.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/firewalld.py b/system/firewalld.py index 04dd4981584..9a63da3a544 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -52,7 +52,7 @@ options: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' required: false default: system-default(public) - choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block"] + choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ] permanent: description: - "Should this configuration be in the running firewalld configuration or persist across reboots." @@ -67,6 +67,7 @@ options: description: - "Should this port accept(enabled) or reject(disabled) connections." required: true + choices: [ "enabled", "disabled" ] timeout: description: - "The amount of time the rule should be in effect for when non-permanent." From 4d35698a304769b5998a2e7d6662d78e083f1c93 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 13 Nov 2014 19:38:52 -0500 Subject: [PATCH 19/44] Split out route table and subnet functionality from VPC module. --- cloud/amazon/ec2_vpc_route_table.py | 498 ++++++++++++++++++++++++++++ 1 file changed, 498 insertions(+) create mode 100644 cloud/amazon/ec2_vpc_route_table.py diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py new file mode 100644 index 00000000000..92d938a6ff6 --- /dev/null +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -0,0 +1,498 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_vpc_route_table +short_description: Configure route tables for AWS virtual private clouds +description: + - Create or removes route tables from AWS virtual private clouds.''' +'''This module has a dependency on python-boto. +version_added: "1.8" +options: + vpc_id: + description: + - "The VPC in which to create the route table." + required: true + route_table_id: + description: + - "The ID of the route table to update or delete." + required: false + default: null + resource_tags: + description: + - 'A dictionary array of resource tags of the form: { tag1: value1,''' +''' tag2: value2 }. Tags in this list are used to uniquely identify route''' +''' tables within a VPC when the route_table_id is not supplied. + required: false + default: null + aliases: [] + version_added: "1.6" + routes: + description: + - List of routes in the route table. Routes are specified''' +''' as dicts containing the keys 'dest' and one of 'gateway_id',''' +''' 'instance_id', 'interface_id', or 'vpc_peering_connection'. + required: true + aliases: [] + subnets: + description: + - An array of subnets to add to this route table. Subnets may either be''' +''' specified by subnet ID or by a CIDR such as '10.0.0.0/24'. + required: true + aliases: [] + wait: + description: + - wait for the VPC to be in state 'available' before returning + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [] + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 + aliases: [] + state: + description: + - Create or terminate the VPC + required: true + default: present + aliases: [] + region: + description: + - region in which the resource exists. + required: false + default: null + aliases: ['aws_region', 'ec2_region'] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY''' +''' environment variable is used. + required: false + default: None + aliases: ['ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY''' +''' environment variable is used. + required: false + default: None + aliases: ['ec2_access_key', 'access_key' ] + validate_certs: + description: + - When set to "no", SSL certificates will not be validated for boto''' +''' versions >= 2.6.0. + required: false + default: "yes" + choices: ["yes", "no"] + aliases: [] + version_added: "1.5" + +requirements: [ "boto" ] +author: Robert Estelle +''' + +EXAMPLES = ''' +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Basic creation example: +- name: Set up public subnet route table + local_action: + module: ec2_vpc_route_table + vpc_id: vpc-1245678 + region: us-west-1 + resource_tags: + Name: Public + subnets: + - '{{jumpbox_subnet.subnet_id}}' + - '{{frontend_subnet.subnet_id}}' + - '{{vpn_subnet.subnet_id}}' + routes: + - dest: 0.0.0.0/0 + gateway_id: '{{igw.gateway_id}}' + register: public_route_table + +- name: Set up NAT-protected route table + local_action: + module: ec2_vpc_route_table + vpc_id: vpc-1245678 + region: us-west-1 + resource_tags: + - Name: Internal + subnets: + - '{{application_subnet.subnet_id}}' + - '{{database_subnet.subnet_id}}' + - '{{splunk_subnet.subnet_id}}' + routes: + - dest: 0.0.0.0/0 + instance_id: '{{nat.instance_id}}' + register: nat_route_table +''' + + +import sys + +try: + import boto.ec2 + import boto.vpc + from boto.exception import EC2ResponseError +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +class RouteTableException(Exception): + pass + + +class TagCreationException(RouteTableException): + pass + + +def get_resource_tags(vpc_conn, resource_id): + return {t.name: t.value for t in + vpc_conn.get_all_tags(filters={'resource-id': resource_id})} + + +def dict_diff(old, new): + x = {} + old_keys = set(old.keys()) + new_keys = set(new.keys()) + + for k in old_keys.difference(new_keys): + x[k] = {'old': old[k]} + + for k in new_keys.difference(old_keys): + x[k] = {'new': new[k]} + + for k in new_keys.intersection(old_keys): + if new[k] != old[k]: + x[k] = {'new': new[k], 'old': old[k]} + + return x + + +def tags_match(match_tags, candidate_tags): + return all((k in candidate_tags and candidate_tags[k] == v + for k, v in match_tags.iteritems())) + + +def ensure_tags(vpc_conn, resource_id, tags, add_only, dry_run): + try: + cur_tags = get_resource_tags(vpc_conn, resource_id) + diff = dict_diff(cur_tags, tags) + if not diff: + return {'changed': False, 'tags': cur_tags} + + to_delete = {k: diff[k]['old'] for k in diff if 'new' not in diff[k]} + if to_delete and not add_only: + vpc_conn.delete_tags(resource_id, to_delete, dry_run=dry_run) + + to_add = {k: diff[k]['new'] for k in diff if 'old' not in diff[k]} + if to_add: + vpc_conn.create_tags(resource_id, to_add, dry_run=dry_run) + + latest_tags = get_resource_tags(vpc_conn, resource_id) + return {'changed': True, 'tags': latest_tags} + except EC2ResponseError as e: + raise TagCreationException('Unable to update tags for {0}, error: {1}' + .format(resource_id, e)) + + +def get_route_table_by_id(vpc_conn, vpc_id, route_table_id): + route_tables = vpc_conn.get_all_route_tables( + route_table_ids=[route_table_id], filters={'vpc_id': vpc_id}) + return route_tables[0] if route_tables else None + + +def get_route_table_by_tags(vpc_conn, vpc_id, tags): + route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id}) + for route_table in route_tables: + this_tags = get_resource_tags(vpc_conn, route_table.id) + if tags_match(tags, this_tags): + return route_table + + +def route_spec_matches_route(route_spec, route): + key_attr_map = { + 'destination_cidr_block': 'destination_cidr_block', + 'gateway_id': 'gateway_id', + 'instance_id': 'instance_id', + 'interface_id': 'interface_id', + 'vpc_peering_connection_id': 'vpc_peering_connection_id', + } + for k in key_attr_map.iterkeys(): + if k in route_spec: + if route_spec[k] != getattr(route, k): + return False + return True + + +def rename_key(d, old_key, new_key): + d[new_key] = d[old_key] + del d[old_key] + + +def index_of_matching_route(route_spec, routes_to_match): + for i, route in enumerate(routes_to_match): + if route_spec_matches_route(route_spec, route): + return i + + +def ensure_routes(vpc_conn, route_table, route_specs, check_mode): + routes_to_match = list(route_table.routes) + route_specs_to_create = [] + for route_spec in route_specs: + i = index_of_matching_route(route_spec, routes_to_match) + if i is None: + route_specs_to_create.append(route_spec) + else: + del routes_to_match[i] + routes_to_delete = [r for r in routes_to_match + if r.gateway_id != 'local'] + + changed = routes_to_delete or route_specs_to_create + if check_mode and changed: + return {'changed': True} + elif changed: + for route_spec in route_specs_to_create: + vpc_conn.create_route(route_table.id, **route_spec) + + for route in routes_to_delete: + vpc_conn.delete_route(route_table.id, route.destination_cidr_block) + return {'changed': True} + else: + return {'changed': False} + + +def get_subnet_by_cidr(vpc_conn, vpc_id, cidr): + subnets = vpc_conn.get_all_subnets( + filters={'cidr': cidr, 'vpc_id': vpc_id}) + if len(subnets) != 1: + raise RouteTableException( + 'Subnet with CIDR {0} has {1} matches'.format(cidr, len(subnets)) + ) + return subnets[0] + + +def get_subnet_by_id(vpc_conn, vpc_id, subnet_id): + subnets = vpc_conn.get_all_subnets(filters={'subnet-id': subnet_id}) + if len(subnets) != 1: + raise RouteTableException( + 'Subnet with ID {0} has {1} matches'.format(subnet_id, len(subnets)) + ) + return subnets[0] + + +def ensure_subnet_association(vpc_conn, vpc_id, route_table_id, subnet_id, + check_mode): + route_tables = vpc_conn.get_all_route_tables( + filters={'association.subnet_id': subnet_id, 'vpc_id': vpc_id} + ) + for route_table in route_tables: + if route_table.id is None: + continue + for a in route_table.associations: + if a.subnet_id == subnet_id: + if route_table.id == route_table_id: + return {'changed': False, 'association_id': a.id} + else: + if check_mode: + return {'changed': True} + vpc_conn.disassociate_route_table(a.id) + + association_id = vpc_conn.associate_route_table(route_table_id, subnet_id) + return {'changed': True, 'association_id': association_id} + + +def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets, + check_mode): + current_association_ids = [a.id for a in route_table.associations] + new_association_ids = [] + changed = False + for subnet in subnets: + result = ensure_subnet_association( + vpc_conn, vpc_id, route_table.id, subnet.id, check_mode) + changed = changed or result['changed'] + if changed and check_mode: + return {'changed': True} + new_association_ids.append(result['association_id']) + + to_delete = [a_id for a_id in current_association_ids + if a_id not in new_association_ids] + + for a_id in to_delete: + if check_mode: + return {'changed': True} + changed = True + vpc_conn.disassociate_route_table(a_id) + + return {'changed': changed} + + +def ensure_route_table_absent(vpc_conn, vpc_id, route_table_id, resource_tags, + check_mode): + if route_table_id: + route_table = get_route_table_by_id(vpc_conn, vpc_id, route_table_id) + elif resource_tags: + route_table = get_route_table_by_tags(vpc_conn, vpc_id, resource_tags) + else: + raise RouteTableException( + 'must provide route_table_id or resource_tags') + + if route_table is None: + return {'changed': False} + + if check_mode: + return {'changed': True} + + vpc_conn.delete_route_table(route_table.id) + return {'changed': True} + + +def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, + routes, subnets, check_mode): + changed = False + tags_valid = False + if route_table_id: + route_table = get_route_table_by_id(vpc_conn, vpc_id, route_table_id) + elif resource_tags: + route_table = get_route_table_by_tags(vpc_conn, vpc_id, resource_tags) + tags_valid = route_table is not None + else: + raise RouteTableException( + 'must provide route_table_id or resource_tags') + + if check_mode and route_table is None: + return {'changed': True} + + if route_table is None: + try: + route_table = vpc_conn.create_route_table(vpc_id) + except EC2ResponseError as e: + raise RouteTableException( + 'Unable to create route table {0}, error: {1}' + .format(route_table_id or resource_tags, e) + ) + + if not tags_valid and resource_tags is not None: + result = ensure_tags(vpc_conn, route_table.id, resource_tags, + add_only=True, dry_run=check_mode) + changed = changed or result['changed'] + + if routes is not None: + try: + result = ensure_routes(vpc_conn, route_table, routes, check_mode) + changed = changed or result['changed'] + except EC2ResponseError as e: + raise RouteTableException( + 'Unable to ensure routes for route table {0}, error: {1}' + .format(route_table, e) + ) + + if subnets: + associated_subnets = [] + try: + for subnet_name in subnets: + if ('.' in subnet_name) and ('/' in subnet_name): + subnet = get_subnet_by_cidr(vpc_conn, vpc_id, subnet_name) + else: + subnet = get_subnet_by_id(vpc_conn, vpc_id, subnet_name) + associated_subnets.append(subnet) + except EC2ResponseError as e: + raise RouteTableException( + 'Unable to find subnets for route table {0}, error: {1}' + .format(route_table, e) + ) + + try: + result = ensure_subnet_associations( + vpc_conn, vpc_id, route_table, associated_subnets, check_mode) + changed = changed or result['changed'] + except EC2ResponseError as e: + raise RouteTableException( + 'Unable to associate subnets for route table {0}, error: {1}' + .format(route_table, e) + ) + + return { + 'changed': changed, + 'route_table_id': route_table.id, + } + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update({ + 'vpc_id': {'required': True}, + 'route_table_id': {'required': False}, + 'resource_tags': {'type': 'dict', 'required': False}, + 'routes': {'type': 'list', 'required': False}, + 'subnets': {'type': 'list', 'required': False}, + 'state': {'choices': ['present', 'absent'], 'default': 'present'}, + }) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + if not region: + module.fail_json(msg='Region must be specified') + + try: + vpc_conn = boto.vpc.connect_to_region( + region, + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key + ) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + vpc_id = module.params.get('vpc_id') + route_table_id = module.params.get('route_table_id') + resource_tags = module.params.get('resource_tags') + + routes = module.params.get('routes') + for route_spec in routes: + rename_key(route_spec, 'dest', 'destination_cidr_block') + + subnets = module.params.get('subnets') + state = module.params.get('state', 'present') + + try: + if state == 'present': + result = ensure_route_table_present( + vpc_conn, vpc_id, route_table_id, resource_tags, + routes, subnets, module.check_mode + ) + elif state == 'absent': + result = ensure_route_table_absent( + vpc_conn, vpc_id, route_table_id, resource_tags, + module.check_mode + ) + except RouteTableException as e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) + +from ansible.module_utils.basic import * # noqa +from ansible.module_utils.ec2 import * # noqa + +if __name__ == '__main__': + main() From e395bb456ec733be7699002082064310eede9224 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 13 Nov 2014 19:57:15 -0500 Subject: [PATCH 20/44] EC2 subnet/route-table: Simplify tag updating. --- cloud/amazon/ec2_vpc_route_table.py | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 92d938a6ff6..6536ff29f94 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -169,24 +169,6 @@ def get_resource_tags(vpc_conn, resource_id): vpc_conn.get_all_tags(filters={'resource-id': resource_id})} -def dict_diff(old, new): - x = {} - old_keys = set(old.keys()) - new_keys = set(new.keys()) - - for k in old_keys.difference(new_keys): - x[k] = {'old': old[k]} - - for k in new_keys.difference(old_keys): - x[k] = {'new': new[k]} - - for k in new_keys.intersection(old_keys): - if new[k] != old[k]: - x[k] = {'new': new[k], 'old': old[k]} - - return x - - def tags_match(match_tags, candidate_tags): return all((k in candidate_tags and candidate_tags[k] == v for k, v in match_tags.iteritems())) @@ -195,15 +177,14 @@ def tags_match(match_tags, candidate_tags): def ensure_tags(vpc_conn, resource_id, tags, add_only, dry_run): try: cur_tags = get_resource_tags(vpc_conn, resource_id) - diff = dict_diff(cur_tags, tags) - if not diff: + if tags == cur_tags: return {'changed': False, 'tags': cur_tags} - to_delete = {k: diff[k]['old'] for k in diff if 'new' not in diff[k]} + to_delete = {k: cur_tags[k] for k in cur_tags if k not in tags} if to_delete and not add_only: vpc_conn.delete_tags(resource_id, to_delete, dry_run=dry_run) - to_add = {k: diff[k]['new'] for k in diff if 'old' not in diff[k]} + to_add = {k: tags[k] for k in tags if k not in cur_tags} if to_add: vpc_conn.create_tags(resource_id, to_add, dry_run=dry_run) From 60efbe8beccf1768e693788d2698c766e0129450 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 13:41:03 -0500 Subject: [PATCH 21/44] ec2_vpc - VPCException -> AnsibleVPCException --- cloud/amazon/ec2_vpc_route_table.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 6536ff29f94..0f6184c40d4 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -156,11 +156,11 @@ except ImportError: sys.exit(1) -class RouteTableException(Exception): +class AnsibleRouteTableException(Exception): pass -class TagCreationException(RouteTableException): +class AnsibleTagCreationException(AnsibleRouteTableException): pass @@ -191,8 +191,8 @@ def ensure_tags(vpc_conn, resource_id, tags, add_only, dry_run): latest_tags = get_resource_tags(vpc_conn, resource_id) return {'changed': True, 'tags': latest_tags} except EC2ResponseError as e: - raise TagCreationException('Unable to update tags for {0}, error: {1}' - .format(resource_id, e)) + raise AnsibleTagCreationException( + 'Unable to update tags for {0}, error: {1}'.format(resource_id, e)) def get_route_table_by_id(vpc_conn, vpc_id, route_table_id): @@ -265,7 +265,7 @@ def get_subnet_by_cidr(vpc_conn, vpc_id, cidr): subnets = vpc_conn.get_all_subnets( filters={'cidr': cidr, 'vpc_id': vpc_id}) if len(subnets) != 1: - raise RouteTableException( + raise AnsibleRouteTableException( 'Subnet with CIDR {0} has {1} matches'.format(cidr, len(subnets)) ) return subnets[0] @@ -274,8 +274,9 @@ def get_subnet_by_cidr(vpc_conn, vpc_id, cidr): def get_subnet_by_id(vpc_conn, vpc_id, subnet_id): subnets = vpc_conn.get_all_subnets(filters={'subnet-id': subnet_id}) if len(subnets) != 1: - raise RouteTableException( - 'Subnet with ID {0} has {1} matches'.format(subnet_id, len(subnets)) + raise AnsibleRouteTableException( + 'Subnet with ID {0} has {1} matches'.format( + subnet_id, len(subnets)) ) return subnets[0] @@ -333,7 +334,7 @@ def ensure_route_table_absent(vpc_conn, vpc_id, route_table_id, resource_tags, elif resource_tags: route_table = get_route_table_by_tags(vpc_conn, vpc_id, resource_tags) else: - raise RouteTableException( + raise AnsibleRouteTableException( 'must provide route_table_id or resource_tags') if route_table is None: @@ -356,7 +357,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, route_table = get_route_table_by_tags(vpc_conn, vpc_id, resource_tags) tags_valid = route_table is not None else: - raise RouteTableException( + raise AnsibleRouteTableException( 'must provide route_table_id or resource_tags') if check_mode and route_table is None: @@ -366,7 +367,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, try: route_table = vpc_conn.create_route_table(vpc_id) except EC2ResponseError as e: - raise RouteTableException( + raise AnsibleRouteTableException( 'Unable to create route table {0}, error: {1}' .format(route_table_id or resource_tags, e) ) @@ -381,7 +382,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, result = ensure_routes(vpc_conn, route_table, routes, check_mode) changed = changed or result['changed'] except EC2ResponseError as e: - raise RouteTableException( + raise AnsibleRouteTableException( 'Unable to ensure routes for route table {0}, error: {1}' .format(route_table, e) ) @@ -396,7 +397,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, subnet = get_subnet_by_id(vpc_conn, vpc_id, subnet_name) associated_subnets.append(subnet) except EC2ResponseError as e: - raise RouteTableException( + raise AnsibleRouteTableException( 'Unable to find subnets for route table {0}, error: {1}' .format(route_table, e) ) @@ -406,7 +407,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, vpc_conn, vpc_id, route_table, associated_subnets, check_mode) changed = changed or result['changed'] except EC2ResponseError as e: - raise RouteTableException( + raise AnsibleRouteTableException( 'Unable to associate subnets for route table {0}, error: {1}' .format(route_table, e) ) @@ -467,7 +468,7 @@ def main(): vpc_conn, vpc_id, route_table_id, resource_tags, module.check_mode ) - except RouteTableException as e: + except AnsibleRouteTableException as e: module.fail_json(msg=str(e)) module.exit_json(**result) From 95006afe8cf244c462c55c649989f85e606bc79b Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 13:45:50 -0500 Subject: [PATCH 22/44] ec2_vpc - Fail module using fail_json on boto import failure. --- cloud/amazon/ec2_vpc_route_table.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 0f6184c40d4..56d3c16c9ec 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -145,15 +145,17 @@ EXAMPLES = ''' ''' -import sys +import sys # noqa try: import boto.ec2 import boto.vpc from boto.exception import EC2ResponseError + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False + if __name__ != '__main__': + raise class AnsibleRouteTableException(Exception): @@ -432,6 +434,8 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, ) + if not HAS_BOTO: + module.fail_json(msg='boto is required for this module') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) if not region: From a50f5cac2cefb0151981f78b7dfac5a2d80ca19a Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 14:28:28 -0500 Subject: [PATCH 23/44] ec2_vpc - More efficient tag search. --- cloud/amazon/ec2_vpc_route_table.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 56d3c16c9ec..e79b1b10ee4 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -171,11 +171,6 @@ def get_resource_tags(vpc_conn, resource_id): vpc_conn.get_all_tags(filters={'resource-id': resource_id})} -def tags_match(match_tags, candidate_tags): - return all((k in candidate_tags and candidate_tags[k] == v - for k, v in match_tags.iteritems())) - - def ensure_tags(vpc_conn, resource_id, tags, add_only, dry_run): try: cur_tags = get_resource_tags(vpc_conn, resource_id) @@ -204,11 +199,18 @@ def get_route_table_by_id(vpc_conn, vpc_id, route_table_id): def get_route_table_by_tags(vpc_conn, vpc_id, tags): - route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id}) - for route_table in route_tables: - this_tags = get_resource_tags(vpc_conn, route_table.id) - if tags_match(tags, this_tags): - return route_table + filters = {'vpc_id': vpc_id} + filters.update({'tag:{}'.format(t): v + for t, v in tags.iteritems()}) + route_tables = vpc_conn.get_all_route_tables(filters=filters) + + if not route_tables: + return None + elif len(route_tables) == 1: + return route_tables[0] + + raise RouteTableException( + 'Found more than one route table based on the supplied tags, aborting') def route_spec_matches_route(route_spec, route): From 0e635dd0907731ffcd4a6962e56ea7295e2482df Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 14:50:38 -0500 Subject: [PATCH 24/44] ec2_vpc - Update some documentation strings. --- cloud/amazon/ec2_vpc_route_table.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index e79b1b10ee4..60a87ae2430 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -50,8 +50,8 @@ options: aliases: [] subnets: description: - - An array of subnets to add to this route table. Subnets may either be''' -''' specified by subnet ID or by a CIDR such as '10.0.0.0/24'. + - An array of subnets to add to this route table. Subnets may either''' +''' be specified by subnet ID or by a CIDR such as '10.0.0.0/24'. required: true aliases: [] wait: From e3c14c1b021324f84166f8db7286b2447921ddfe Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 15:00:14 -0500 Subject: [PATCH 25/44] ec2_vpc - Update dict comprehensions and {} formats for python2.6 --- cloud/amazon/ec2_vpc_route_table.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 60a87ae2430..f12255e7771 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -200,8 +200,8 @@ def get_route_table_by_id(vpc_conn, vpc_id, route_table_id): def get_route_table_by_tags(vpc_conn, vpc_id, tags): filters = {'vpc_id': vpc_id} - filters.update({'tag:{}'.format(t): v - for t, v in tags.iteritems()}) + filters.update(dict((('tag:{0}'.format(t), v) + for t, v in tags.iteritems()))) route_tables = vpc_conn.get_all_route_tables(filters=filters) if not route_tables: From f79aeaee86d9686c09564a47cd4bf8ec9f04087a Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 15:18:56 -0500 Subject: [PATCH 26/44] ec2_vpc - More dry running in check mode. --- cloud/amazon/ec2_vpc_route_table.py | 33 ++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index f12255e7771..7f340359077 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -171,7 +171,7 @@ def get_resource_tags(vpc_conn, resource_id): vpc_conn.get_all_tags(filters={'resource-id': resource_id})} -def ensure_tags(vpc_conn, resource_id, tags, add_only, dry_run): +def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode): try: cur_tags = get_resource_tags(vpc_conn, resource_id) if tags == cur_tags: @@ -179,11 +179,11 @@ def ensure_tags(vpc_conn, resource_id, tags, add_only, dry_run): to_delete = {k: cur_tags[k] for k in cur_tags if k not in tags} if to_delete and not add_only: - vpc_conn.delete_tags(resource_id, to_delete, dry_run=dry_run) + vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode) to_add = {k: tags[k] for k in tags if k not in cur_tags} if to_add: - vpc_conn.create_tags(resource_id, to_add, dry_run=dry_run) + vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode) latest_tags = get_resource_tags(vpc_conn, resource_id) return {'changed': True, 'tags': latest_tags} @@ -252,17 +252,17 @@ def ensure_routes(vpc_conn, route_table, route_specs, check_mode): if r.gateway_id != 'local'] changed = routes_to_delete or route_specs_to_create - if check_mode and changed: - return {'changed': True} - elif changed: + if changed: for route_spec in route_specs_to_create: - vpc_conn.create_route(route_table.id, **route_spec) + vpc_conn.create_route(route_table.id, + dry_run=check_mode, + **route_spec) for route in routes_to_delete: - vpc_conn.delete_route(route_table.id, route.destination_cidr_block) - return {'changed': True} - else: - return {'changed': False} + vpc_conn.delete_route(route_table.id, + route.destination_cidr_block, + dry_run=check_mode) + return {'changed': changed} def get_subnet_by_cidr(vpc_conn, vpc_id, cidr): @@ -323,10 +323,8 @@ def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets, if a_id not in new_association_ids] for a_id in to_delete: - if check_mode: - return {'changed': True} changed = True - vpc_conn.disassociate_route_table(a_id) + vpc_conn.disassociate_route_table(a_id, dry_run=check_mode) return {'changed': changed} @@ -344,10 +342,7 @@ def ensure_route_table_absent(vpc_conn, vpc_id, route_table_id, resource_tags, if route_table is None: return {'changed': False} - if check_mode: - return {'changed': True} - - vpc_conn.delete_route_table(route_table.id) + vpc_conn.delete_route_table(route_table.id, dry_run=check_mode) return {'changed': True} @@ -378,7 +373,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, if not tags_valid and resource_tags is not None: result = ensure_tags(vpc_conn, route_table.id, resource_tags, - add_only=True, dry_run=check_mode) + add_only=True, check_mode=check_mode) changed = changed or result['changed'] if routes is not None: From f4ce0dbc96b72a24c0527ccf3af37110fbfaf7de Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Mon, 1 Dec 2014 15:56:04 -0500 Subject: [PATCH 27/44] ec2_vpc_route_table - Support route propagation through VGW. Based on work by Bret Martin via pull request #356 --- cloud/amazon/ec2_vpc_route_table.py | 36 +++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 7f340359077..fc736ba451c 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -54,6 +54,11 @@ options: ''' be specified by subnet ID or by a CIDR such as '10.0.0.0/24'. required: true aliases: [] + propagating_vgw_ids: + description: + - Enables route propagation from virtual gateways specified by ID. + required: false + aliases: [] wait: description: - wait for the VPC to be in state 'available' before returning @@ -329,6 +334,24 @@ def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets, return {'changed': changed} +def ensure_propagation(vpc_conn, route_table_id, propagating_vgw_ids, + check_mode): + + # NOTE: As of boto==2.15.0, it is not yet possible to query the existing + # propagating gateways. However, EC2 does support this as evidenced by + # the describe-route-tables tool. For now, just enable the given VGWs + # and do not disable any others. + changed = False + for vgw_id in propagating_vgw_ids: + if vgw_id not in original_association_ids: + changed = True + vpc_conn.enable_vgw_route_propagation(route_table_id, + vgw_id, + test_run=check_mode) + + return {'changed': changed} + + def ensure_route_table_absent(vpc_conn, vpc_id, route_table_id, resource_tags, check_mode): if route_table_id: @@ -347,7 +370,8 @@ def ensure_route_table_absent(vpc_conn, vpc_id, route_table_id, resource_tags, def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, - routes, subnets, check_mode): + routes, subnets, propagating_vgw_ids, + check_mode): changed = False tags_valid = False if route_table_id: @@ -371,6 +395,12 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, .format(route_table_id or resource_tags, e) ) + if propagating_vgw_ids is not None: + result = ensure_propagation(vpc_conn, route_table_id, + propagating_vgw_ids, + check_mode=check_mode) + changed = changed or result['changed'] + if not tags_valid and resource_tags is not None: result = ensure_tags(vpc_conn, route_table.id, resource_tags, add_only=True, check_mode=check_mode) @@ -422,6 +452,7 @@ def main(): argument_spec.update({ 'vpc_id': {'required': True}, 'route_table_id': {'required': False}, + 'propagating_vgw_ids': {'type': 'list', 'required': False}, 'resource_tags': {'type': 'dict', 'required': False}, 'routes': {'type': 'list', 'required': False}, 'subnets': {'type': 'list', 'required': False}, @@ -450,6 +481,7 @@ def main(): vpc_id = module.params.get('vpc_id') route_table_id = module.params.get('route_table_id') resource_tags = module.params.get('resource_tags') + propagating_vgw_ids = module.params.get('propagating_vgw_ids', []) routes = module.params.get('routes') for route_spec in routes: @@ -462,7 +494,7 @@ def main(): if state == 'present': result = ensure_route_table_present( vpc_conn, vpc_id, route_table_id, resource_tags, - routes, subnets, module.check_mode + routes, subnets, propagating_vgw_ids, module.check_mode ) elif state == 'absent': result = ensure_route_table_absent( From f0a4be1b4bce41ece9e4ab033fc2cbebc444f2b8 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Wed, 3 Dec 2014 13:01:44 -0500 Subject: [PATCH 28/44] ec2_vpc_route_table - Fix unintended tag search regression. --- cloud/amazon/ec2_vpc_route_table.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index fc736ba451c..b6fda27b703 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -176,6 +176,11 @@ def get_resource_tags(vpc_conn, resource_id): vpc_conn.get_all_tags(filters={'resource-id': resource_id})} +def tags_match(match_tags, candidate_tags): + return all((k in candidate_tags and candidate_tags[k] == v + for k, v in match_tags.iteritems())) + + def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode): try: cur_tags = get_resource_tags(vpc_conn, resource_id) @@ -204,18 +209,11 @@ def get_route_table_by_id(vpc_conn, vpc_id, route_table_id): def get_route_table_by_tags(vpc_conn, vpc_id, tags): - filters = {'vpc_id': vpc_id} - filters.update(dict((('tag:{0}'.format(t), v) - for t, v in tags.iteritems()))) - route_tables = vpc_conn.get_all_route_tables(filters=filters) - - if not route_tables: - return None - elif len(route_tables) == 1: - return route_tables[0] - - raise RouteTableException( - 'Found more than one route table based on the supplied tags, aborting') + route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id}) + for route_table in route_tables: + this_tags = get_resource_tags(vpc_conn, route_table.id) + if tags_match(tags, this_tags): + return route_table def route_spec_matches_route(route_spec, route): From 17ed722d556a4a24a3dadfd37c2833f57287d1c5 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 4 Dec 2014 22:10:02 -0500 Subject: [PATCH 29/44] ec2_vpc_route_tables - Remove more dict comprehensions. --- cloud/amazon/ec2_vpc_route_table.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index b6fda27b703..af28ef341cc 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -172,8 +172,8 @@ class AnsibleTagCreationException(AnsibleRouteTableException): def get_resource_tags(vpc_conn, resource_id): - return {t.name: t.value for t in - vpc_conn.get_all_tags(filters={'resource-id': resource_id})} + return dict((t.name, t.value) for t in + vpc_conn.get_all_tags(filters={'resource-id': resource_id})) def tags_match(match_tags, candidate_tags): @@ -187,11 +187,11 @@ def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode): if tags == cur_tags: return {'changed': False, 'tags': cur_tags} - to_delete = {k: cur_tags[k] for k in cur_tags if k not in tags} + to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags) if to_delete and not add_only: vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode) - to_add = {k: tags[k] for k in tags if k not in cur_tags} + to_add = dict((k, tags[k]) for k in tags if k not in cur_tags) if to_add: vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode) From 43566b0cafd5f3b6979a4cd182ee3505717a9a71 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 4 Dec 2014 22:10:49 -0500 Subject: [PATCH 30/44] ec2_vpc_route_tables - Allow reference to subnets by id, name, or cidr. --- cloud/amazon/ec2_vpc_route_table.py | 98 ++++++++++++++++++++--------- 1 file changed, 69 insertions(+), 29 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index af28ef341cc..491751e23dd 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -51,7 +51,7 @@ options: subnets: description: - An array of subnets to add to this route table. Subnets may either''' -''' be specified by subnet ID or by a CIDR such as '10.0.0.0/24'. +''' be specified by subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'. required: true aliases: [] propagating_vgw_ids: @@ -141,8 +141,8 @@ EXAMPLES = ''' - Name: Internal subnets: - '{{application_subnet.subnet_id}}' - - '{{database_subnet.subnet_id}}' - - '{{splunk_subnet.subnet_id}}' + - 'Database Subnet' + - '10.0.0.0/8' routes: - dest: 0.0.0.0/0 instance_id: '{{nat.instance_id}}' @@ -151,6 +151,7 @@ EXAMPLES = ''' import sys # noqa +import re try: import boto.ec2 @@ -171,6 +172,70 @@ class AnsibleTagCreationException(AnsibleRouteTableException): pass +class AnsibleSubnetSearchException(AnsibleRouteTableException): + pass + +CIDR_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$') +SUBNET_RE = re.compile('^subnet-[A-z0-9]+$') +ROUTE_TABLE_RE = re.compile('^rtb-[A-z0-9]+$') + + +def find_subnets(vpc_conn, vpc_id, identified_subnets): + """ + Finds a list of subnets, each identified either by a raw ID, a unique + 'Name' tag, or a CIDR such as 10.0.0.0/8. + + Note that this function is duplicated in other ec2 modules, and should + potentially be moved into potentially be moved into a shared module_utils + """ + subnet_ids = [] + subnet_names = [] + subnet_cidrs = [] + for subnet in (identified_subnets or []): + if re.match(SUBNET_RE, subnet): + subnet_ids.append(subnet) + elif re.match(CIDR_RE, subnet): + subnet_cidrs.append(subnet) + else: + subnet_names.append(subnet) + + subnets_by_id = [] + if subnet_ids: + subnets_by_id = vpc_conn.get_all_subnets( + subnet_ids, filters={'vpc_id': vpc_id}) + + for subnet_id in subnet_ids: + if not any(s.id == subnet_id for s in subnets_by_id): + raise AnsibleSubnetSearchException( + 'Subnet ID "{0}" does not exist'.format(subnet_id)) + + subnets_by_cidr = [] + if subnet_cidrs: + subnets_by_cidr = vpc_conn.get_all_subnets( + filters={'vpc_id': vpc_id, 'cidr': subnet_cidrs}) + + for cidr in subnet_cidrs: + if not any(s.cidr_block == cidr for s in subnets_by_cidr): + raise AnsibleSubnetSearchException( + 'Subnet CIDR "{0}" does not exist'.format(subnet_cidr)) + + subnets_by_name = [] + if subnet_names: + subnets_by_name = vpc_conn.get_all_subnets( + filters={'vpc_id': vpc_id, 'tag:Name': subnet_names}) + + for name in subnet_names: + matching = [s.tags.get('Name') == name for s in subnets_by_name] + if len(matching) == 0: + raise AnsibleSubnetSearchException( + 'Subnet named "{0}" does not exist'.format(name)) + elif len(matching) > 1: + raise AnsibleSubnetSearchException( + 'Multiple subnets named "{0}"'.format(name)) + + return subnets_by_id + subnets_by_cidr + subnets_by_name + + def get_resource_tags(vpc_conn, resource_id): return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id})) @@ -268,26 +333,6 @@ def ensure_routes(vpc_conn, route_table, route_specs, check_mode): return {'changed': changed} -def get_subnet_by_cidr(vpc_conn, vpc_id, cidr): - subnets = vpc_conn.get_all_subnets( - filters={'cidr': cidr, 'vpc_id': vpc_id}) - if len(subnets) != 1: - raise AnsibleRouteTableException( - 'Subnet with CIDR {0} has {1} matches'.format(cidr, len(subnets)) - ) - return subnets[0] - - -def get_subnet_by_id(vpc_conn, vpc_id, subnet_id): - subnets = vpc_conn.get_all_subnets(filters={'subnet-id': subnet_id}) - if len(subnets) != 1: - raise AnsibleRouteTableException( - 'Subnet with ID {0} has {1} matches'.format( - subnet_id, len(subnets)) - ) - return subnets[0] - - def ensure_subnet_association(vpc_conn, vpc_id, route_table_id, subnet_id, check_mode): route_tables = vpc_conn.get_all_route_tables( @@ -417,12 +462,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, if subnets: associated_subnets = [] try: - for subnet_name in subnets: - if ('.' in subnet_name) and ('/' in subnet_name): - subnet = get_subnet_by_cidr(vpc_conn, vpc_id, subnet_name) - else: - subnet = get_subnet_by_id(vpc_conn, vpc_id, subnet_name) - associated_subnets.append(subnet) + associated_subnets = find_subnets(vpc_conn, vpc_id, subnets) except EC2ResponseError as e: raise AnsibleRouteTableException( 'Unable to find subnets for route table {0}, error: {1}' From c9883db03d668c8b5eeff1204c3602c3006fd905 Mon Sep 17 00:00:00 2001 From: Herby Gillot Date: Thu, 11 Jun 2015 14:01:40 +1000 Subject: [PATCH 31/44] Allow VPC igw to be specified by gateway_id: "igw" --- cloud/amazon/ec2_vpc_route_table.py | 36 ++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 491751e23dd..dc21d9607e1 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -45,7 +45,9 @@ options: description: - List of routes in the route table. Routes are specified''' ''' as dicts containing the keys 'dest' and one of 'gateway_id',''' -''' 'instance_id', 'interface_id', or 'vpc_peering_connection'. +''' 'instance_id', 'interface_id', or 'vpc_peering_connection'. ''' +''' If 'gateway_id' is specified, you can refer to the VPC's IGW ''' +''' by using the value "igw". required: true aliases: [] subnets: @@ -168,6 +170,10 @@ class AnsibleRouteTableException(Exception): pass +class AnsibleIgwSearchException(AnsibleRouteTableException): + pass + + class AnsibleTagCreationException(AnsibleRouteTableException): pass @@ -236,6 +242,29 @@ def find_subnets(vpc_conn, vpc_id, identified_subnets): return subnets_by_id + subnets_by_cidr + subnets_by_name +def find_igw(vpc_conn, vpc_id): + """ + Finds the Internet gateway for the given VPC ID. + + Raises an AnsibleIgwSearchException if either no IGW can be found, or more + than one found for the given VPC. + + Note that this function is duplicated in other ec2 modules, and should + potentially be moved into potentially be moved into a shared module_utils + """ + igw = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc_id}) + + if not igw: + return AnsibleIgwSearchException('No IGW found for VPC "{0}"'. + format(vpc_id)) + elif len(igw) == 1: + return igw[0].id + else: + raise AnsibleIgwSearchException('Multiple IGWs found for VPC "{0}"'. + format(vpc_id)) + + def get_resource_tags(vpc_conn, resource_id): return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id})) @@ -525,6 +554,11 @@ def main(): for route_spec in routes: rename_key(route_spec, 'dest', 'destination_cidr_block') + if 'gateway_id' in route_spec and route_spec['gateway_id'] and \ + route_spec['gateway_id'].lower() == 'igw': + igw = find_igw(vpc_conn, vpc_id) + route_spec['gateway_id'] = igw + subnets = module.params.get('subnets') state = module.params.get('state', 'present') From 4f2cd7cb6e0ae12a578bc5768846272474c71f46 Mon Sep 17 00:00:00 2001 From: whiter Date: Thu, 11 Jun 2015 14:07:04 +1000 Subject: [PATCH 32/44] Documentation update --- cloud/amazon/ec2_vpc_route_table.py | 87 +++++++---------------------- 1 file changed, 21 insertions(+), 66 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index dc21d9607e1..677d2ea3383 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -1,121 +1,75 @@ #!/usr/bin/python -# This file is part of Ansible # -# Ansible is free software: you can redistribute it and/or modify +# This is a free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # -# Ansible is distributed in the hope that it will be useful, +# This Ansible library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# along with this library. If not, see . DOCUMENTATION = ''' --- module: ec2_vpc_route_table -short_description: Configure route tables for AWS virtual private clouds +short_description: Manage route tables for AWS virtual private clouds description: - - Create or removes route tables from AWS virtual private clouds.''' -'''This module has a dependency on python-boto. -version_added: "1.8" + - Manage route tables for AWS virtual private clouds +version_added: "2.0" +author: Robert Estelle, @erydo options: vpc_id: description: - - "The VPC in which to create the route table." + - VPC ID of the VPC in which to create the route table. required: true route_table_id: description: - - "The ID of the route table to update or delete." + - The ID of the route table to update or delete. required: false default: null resource_tags: description: - - 'A dictionary array of resource tags of the form: { tag1: value1,''' -''' tag2: value2 }. Tags in this list are used to uniquely identify route''' -''' tables within a VPC when the route_table_id is not supplied. + - A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used to uniquely identify route tables within a VPC when the route_table_id is not supplied. required: false default: null - aliases: [] - version_added: "1.6" routes: description: - - List of routes in the route table. Routes are specified''' -''' as dicts containing the keys 'dest' and one of 'gateway_id',''' -''' 'instance_id', 'interface_id', or 'vpc_peering_connection'. ''' -''' If 'gateway_id' is specified, you can refer to the VPC's IGW ''' -''' by using the value "igw". + - List of routes in the route table. Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', 'instance_id', 'interface_id', or 'vpc_peering_connection'. If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. required: true aliases: [] subnets: description: - - An array of subnets to add to this route table. Subnets may either''' -''' be specified by subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'. + - An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'. required: true - aliases: [] propagating_vgw_ids: description: - - Enables route propagation from virtual gateways specified by ID. + - Enable route propagation from virtual gateways specified by ID. required: false - aliases: [] wait: description: - - wait for the VPC to be in state 'available' before returning + - Wait for the VPC to be in state 'available' before returning. required: false default: "no" choices: [ "yes", "no" ] - aliases: [] wait_timeout: description: - - how long before wait gives up, in seconds + - How long before wait gives up, in seconds. default: 300 - aliases: [] state: description: - - Create or terminate the VPC - required: true - default: present - aliases: [] - region: - description: - - region in which the resource exists. + - Create or destroy the VPC route table required: false - default: null - aliases: ['aws_region', 'ec2_region'] - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY''' -''' environment variable is used. - required: false - default: None - aliases: ['ec2_secret_key', 'secret_key' ] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY''' -''' environment variable is used. - required: false - default: None - aliases: ['ec2_access_key', 'access_key' ] - validate_certs: - description: - - When set to "no", SSL certificates will not be validated for boto''' -''' versions >= 2.6.0. - required: false - default: "yes" - choices: ["yes", "no"] - aliases: [] - version_added: "1.5" - -requirements: [ "boto" ] -author: Robert Estelle + default: present + choices: [ 'present', 'absent' ] +extends_documentation_fragment: aws ''' EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. # Basic creation example: - name: Set up public subnet route table @@ -583,3 +537,4 @@ from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() + \ No newline at end of file From 3527aec2c5209f10b90827dc7f697cdaffeb2d63 Mon Sep 17 00:00:00 2001 From: whiter Date: Fri, 19 Jun 2015 09:56:50 +1000 Subject: [PATCH 33/44] Changed to use "connect_to_aws" method --- cloud/amazon/ec2_vpc_route_table.py | 54 ++++++++++++++--------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 677d2ea3383..b0fa9cbc426 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -20,7 +20,7 @@ short_description: Manage route tables for AWS virtual private clouds description: - Manage route tables for AWS virtual private clouds version_added: "2.0" -author: Robert Estelle, @erydo +author: Robert Estelle (@erydo) options: vpc_id: description: @@ -470,34 +470,32 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, def main(): argument_spec = ec2_argument_spec() - argument_spec.update({ - 'vpc_id': {'required': True}, - 'route_table_id': {'required': False}, - 'propagating_vgw_ids': {'type': 'list', 'required': False}, - 'resource_tags': {'type': 'dict', 'required': False}, - 'routes': {'type': 'list', 'required': False}, - 'subnets': {'type': 'list', 'required': False}, - 'state': {'choices': ['present', 'absent'], 'default': 'present'}, - }) - module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec.update( + dict( + vpc_id = dict(default=None, required=True), + route_table_id = dict(default=None, required=False), + propagating_vgw_ids = dict(default=None, required=False, type='list'), + resource_tags = dict(default=None, required=False, type='dict'), + routes = dict(default=None, required=False, type='list'), + subnets = dict(default=None, required=False, type='list'), + state = dict(default='present', choices=['present', 'absent']) + ) ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + if not HAS_BOTO: module.fail_json(msg='boto is required for this module') - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - if not region: - module.fail_json(msg='Region must be specified') - - try: - vpc_conn = boto.vpc.connect_to_region( - region, - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key - ) - except boto.exception.NoAuthHandlerFound as e: - module.fail_json(msg=str(e)) + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") vpc_id = module.params.get('vpc_id') route_table_id = module.params.get('route_table_id') @@ -510,7 +508,7 @@ def main(): if 'gateway_id' in route_spec and route_spec['gateway_id'] and \ route_spec['gateway_id'].lower() == 'igw': - igw = find_igw(vpc_conn, vpc_id) + igw = find_igw(connection, vpc_id) route_spec['gateway_id'] = igw subnets = module.params.get('subnets') @@ -519,12 +517,12 @@ def main(): try: if state == 'present': result = ensure_route_table_present( - vpc_conn, vpc_id, route_table_id, resource_tags, + connection, vpc_id, route_table_id, resource_tags, routes, subnets, propagating_vgw_ids, module.check_mode ) elif state == 'absent': result = ensure_route_table_absent( - vpc_conn, vpc_id, route_table_id, resource_tags, + connection, vpc_id, route_table_id, resource_tags, module.check_mode ) except AnsibleRouteTableException as e: From 3e02c0d3d940c3e7bb0e4c0c3128cf8ffacb6dad Mon Sep 17 00:00:00 2001 From: Rob White Date: Tue, 28 Jul 2015 21:39:09 +1000 Subject: [PATCH 34/44] Blank aliases removed --- cloud/amazon/ec2_vpc_route_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index b0fa9cbc426..6b3efa3286a 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -40,7 +40,6 @@ options: description: - List of routes in the route table. Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', 'instance_id', 'interface_id', or 'vpc_peering_connection'. If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. required: true - aliases: [] subnets: description: - An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'. @@ -65,6 +64,7 @@ options: required: false default: present choices: [ 'present', 'absent' ] + extends_documentation_fragment: aws ''' From 546858cec9afbbde1862b9ac4e40da88a1067dc1 Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Mon, 10 Aug 2015 14:30:09 -0400 Subject: [PATCH 35/44] Correct enable_vgw_route_propagation test_run parameter to dry_run --- cloud/amazon/ec2_vpc_route_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 6b3efa3286a..53164e254d9 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -373,7 +373,7 @@ def ensure_propagation(vpc_conn, route_table_id, propagating_vgw_ids, changed = True vpc_conn.enable_vgw_route_propagation(route_table_id, vgw_id, - test_run=check_mode) + dry_run=check_mode) return {'changed': changed} From 954f48f28aed4be47634613ccb0a051ef7ab9874 Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Mon, 10 Aug 2015 14:31:46 -0400 Subject: [PATCH 36/44] Don't check original_association_ids since it is not set, per comment above --- cloud/amazon/ec2_vpc_route_table.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 53164e254d9..bb530cb0e9e 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -369,11 +369,10 @@ def ensure_propagation(vpc_conn, route_table_id, propagating_vgw_ids, # and do not disable any others. changed = False for vgw_id in propagating_vgw_ids: - if vgw_id not in original_association_ids: - changed = True - vpc_conn.enable_vgw_route_propagation(route_table_id, - vgw_id, - dry_run=check_mode) + changed = True + vpc_conn.enable_vgw_route_propagation(route_table_id, + vgw_id, + dry_run=check_mode) return {'changed': changed} From 271cbe833e1ecbc8e43fd295f7d7c140ee41e619 Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Mon, 10 Aug 2015 14:35:25 -0400 Subject: [PATCH 37/44] Call ensure_propagation() with the retrieved route table ID --- cloud/amazon/ec2_vpc_route_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index bb530cb0e9e..faafc6955e6 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -421,7 +421,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, ) if propagating_vgw_ids is not None: - result = ensure_propagation(vpc_conn, route_table_id, + result = ensure_propagation(vpc_conn, route_table.id, propagating_vgw_ids, check_mode=check_mode) changed = changed or result['changed'] From 29ce49e84f207892c618d89523ca938e07ec01dd Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Mon, 10 Aug 2015 15:10:19 -0400 Subject: [PATCH 38/44] Don't attempt to delete routes using propagating virtual gateways --- cloud/amazon/ec2_vpc_route_table.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index faafc6955e6..d93effcc550 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -290,7 +290,8 @@ def index_of_matching_route(route_spec, routes_to_match): return i -def ensure_routes(vpc_conn, route_table, route_specs, check_mode): +def ensure_routes(vpc_conn, route_table, route_specs, propagating_vgw_ids, + check_mode): routes_to_match = list(route_table.routes) route_specs_to_create = [] for route_spec in route_specs: @@ -299,8 +300,16 @@ def ensure_routes(vpc_conn, route_table, route_specs, check_mode): route_specs_to_create.append(route_spec) else: del routes_to_match[i] + + # NOTE: As of boto==2.38.0, the origin of a route is not available + # (for example, whether it came from a gateway with route propagation + # enabled). Testing for origin == 'EnableVgwRoutePropagation' is more + # correct than checking whether the route uses a propagating VGW. + # The current logic will leave non-propagated routes using propagating + # VGWs in place. routes_to_delete = [r for r in routes_to_match - if r.gateway_id != 'local'] + if r.gateway_id != 'local' + and r.gateway_id not in propagating_vgw_ids] changed = routes_to_delete or route_specs_to_create if changed: @@ -433,7 +442,8 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, if routes is not None: try: - result = ensure_routes(vpc_conn, route_table, routes, check_mode) + result = ensure_routes(vpc_conn, route_table, routes, + propagating_vgw_ids, check_mode) changed = changed or result['changed'] except EC2ResponseError as e: raise AnsibleRouteTableException( From 96e4194588c088294f1935d7d984065cb3393034 Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Mon, 10 Aug 2015 15:25:13 -0400 Subject: [PATCH 39/44] Don't enable route propagation on a virtual gateway with propagated routes --- cloud/amazon/ec2_vpc_route_table.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index d93effcc550..2328006883c 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -369,17 +369,22 @@ def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets, return {'changed': changed} -def ensure_propagation(vpc_conn, route_table_id, propagating_vgw_ids, +def ensure_propagation(vpc_conn, route_table, propagating_vgw_ids, check_mode): - # NOTE: As of boto==2.15.0, it is not yet possible to query the existing - # propagating gateways. However, EC2 does support this as evidenced by - # the describe-route-tables tool. For now, just enable the given VGWs - # and do not disable any others. + # NOTE: As of boto==2.38.0, it is not yet possible to query the existing + # propagating gateways. However, EC2 does support this as shown in its API + # documentation. For now, a reasonable proxy for this is the presence of + # propagated routes using the gateway in the route table. If such a route + # is found, propagation is almost certainly enabled. changed = False for vgw_id in propagating_vgw_ids: + for r in list(route_table.routes): + if r.gateway_id == vgw_id: + return {'changed': False} + changed = True - vpc_conn.enable_vgw_route_propagation(route_table_id, + vpc_conn.enable_vgw_route_propagation(route_table.id, vgw_id, dry_run=check_mode) @@ -430,7 +435,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, ) if propagating_vgw_ids is not None: - result = ensure_propagation(vpc_conn, route_table.id, + result = ensure_propagation(vpc_conn, route_table, propagating_vgw_ids, check_mode=check_mode) changed = changed or result['changed'] From a2fb8edb3c67247adad6b7a420929390cb73ece1 Mon Sep 17 00:00:00 2001 From: whiter Date: Sun, 30 Aug 2015 22:25:05 +0200 Subject: [PATCH 40/44] Added option to specify tags or route-table-id, quoted doc strings, added more detail to returned route table object, numerous minor fixes --- cloud/amazon/ec2_vpc_route_table.py | 298 ++++++++++++++++------------ 1 file changed, 172 insertions(+), 126 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 2328006883c..a65efaa78fc 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -20,50 +20,47 @@ short_description: Manage route tables for AWS virtual private clouds description: - Manage route tables for AWS virtual private clouds version_added: "2.0" -author: Robert Estelle (@erydo) +author: Robert Estelle (@erydo), Rob White (@wimnat) options: - vpc_id: + lookup: description: - - VPC ID of the VPC in which to create the route table. - required: true - route_table_id: + - "Look up route table by either tags or by route table ID. Non-unique tag lookup will fail. If no tags are specifed then no lookup for an existing route table is performed and a new route table will be created. To change tags of a route table, you must look up by id." + required: false + default: tag + choices: [ 'tag', 'id' ] + propagating_vgw_ids: description: - - The ID of the route table to update or delete. + - "Enable route propagation from virtual gateways specified by ID." required: false - default: null - resource_tags: + route_table_id: description: - - A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used to uniquely identify route tables within a VPC when the route_table_id is not supplied. + - "The ID of the route table to update or delete." required: false default: null routes: description: - - List of routes in the route table. Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', 'instance_id', 'interface_id', or 'vpc_peering_connection'. If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. + - "List of routes in the route table. Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', 'instance_id', 'interface_id', or 'vpc_peering_connection'. If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'." required: true + state: + description: + - "Create or destroy the VPC route table" + required: false + default: present + choices: [ 'present', 'absent' ] subnets: description: - - An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'. + - "An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'." required: true - propagating_vgw_ids: + tags: description: - - Enable route propagation from virtual gateways specified by ID. + - "A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used to uniquely identify route tables within a VPC when the route_table_id is not supplied." required: false - wait: - description: - - Wait for the VPC to be in state 'available' before returning. - required: false - default: "no" - choices: [ "yes", "no" ] - wait_timeout: - description: - - How long before wait gives up, in seconds. - default: 300 - state: + default: null + aliases: [ "resource_tags" ] + vpc_id: description: - - Create or destroy the VPC route table - required: false - default: present - choices: [ 'present', 'absent' ] + - "VPC ID of the VPC in which to create the route table." + required: true extends_documentation_fragment: aws ''' @@ -73,36 +70,35 @@ EXAMPLES = ''' # Basic creation example: - name: Set up public subnet route table - local_action: - module: ec2_vpc_route_table + ec2_vpc_route_table: vpc_id: vpc-1245678 region: us-west-1 - resource_tags: + tags: Name: Public subnets: - - '{{jumpbox_subnet.subnet_id}}' - - '{{frontend_subnet.subnet_id}}' - - '{{vpn_subnet.subnet_id}}' + - "{{ jumpbox_subnet.subnet_id }}" + - "{{ frontend_subnet.subnet_id }}" + - "{{ vpn_subnet.subnet_id }}" routes: - dest: 0.0.0.0/0 - gateway_id: '{{igw.gateway_id}}' + gateway_id: "{{ igw.gateway_id }}" register: public_route_table - name: Set up NAT-protected route table - local_action: - module: ec2_vpc_route_table + ec2_vpc_route_table: vpc_id: vpc-1245678 region: us-west-1 - resource_tags: + tags: - Name: Internal subnets: - - '{{application_subnet.subnet_id}}' + - "{{ application_subnet.subnet_id }}" - 'Database Subnet' - '10.0.0.0/8' routes: - dest: 0.0.0.0/0 - instance_id: '{{nat.instance_id}}' + instance_id: "{{ nat.instance_id }}" register: nat_route_table + ''' @@ -210,12 +206,12 @@ def find_igw(vpc_conn, vpc_id): filters={'attachment.vpc-id': vpc_id}) if not igw: - return AnsibleIgwSearchException('No IGW found for VPC "{0}"'. + raise AnsibleIgwSearchException('No IGW found for VPC {0}'. format(vpc_id)) elif len(igw) == 1: return igw[0].id else: - raise AnsibleIgwSearchException('Multiple IGWs found for VPC "{0}"'. + raise AnsibleIgwSearchException('Multiple IGWs found for VPC {0}'. format(vpc_id)) @@ -251,17 +247,29 @@ def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode): def get_route_table_by_id(vpc_conn, vpc_id, route_table_id): - route_tables = vpc_conn.get_all_route_tables( - route_table_ids=[route_table_id], filters={'vpc_id': vpc_id}) - return route_tables[0] if route_tables else None - + route_table = None + route_tables = vpc_conn.get_all_route_tables(route_table_ids=[route_table_id], filters={'vpc_id': vpc_id}) + if route_tables: + route_table = route_tables[0] + + return route_table + def get_route_table_by_tags(vpc_conn, vpc_id, tags): + + count = 0 + route_table = None route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id}) - for route_table in route_tables: - this_tags = get_resource_tags(vpc_conn, route_table.id) + for table in route_tables: + this_tags = get_resource_tags(vpc_conn, table.id) if tags_match(tags, this_tags): - return route_table + route_table = table + count +=1 + + if count > 1: + raise RuntimeError("Tags provided do not identify a unique route table") + else: + return route_table def route_spec_matches_route(route_spec, route): @@ -391,75 +399,132 @@ def ensure_propagation(vpc_conn, route_table, propagating_vgw_ids, return {'changed': changed} -def ensure_route_table_absent(vpc_conn, vpc_id, route_table_id, resource_tags, - check_mode): - if route_table_id: - route_table = get_route_table_by_id(vpc_conn, vpc_id, route_table_id) - elif resource_tags: - route_table = get_route_table_by_tags(vpc_conn, vpc_id, resource_tags) - else: - raise AnsibleRouteTableException( - 'must provide route_table_id or resource_tags') +def ensure_route_table_absent(connection, module): + + lookup = module.params.get('lookup') + route_table_id = module.params.get('route_table_id') + tags = module.params.get('tags') + vpc_id = module.params.get('vpc_id') + check_mode = module.params.get('check_mode') + + if lookup == 'tag': + if tags is not None: + try: + route_table = get_route_table_by_tags(connection, vpc_id, tags) + except EC2ResponseError as e: + module.fail_json(msg=e.message) + except RuntimeError as e: + module.fail_json(msg=e.args[0]) + else: + route_table = None + elif lookup == 'id': + try: + route_table = get_route_table_by_id(connection, vpc_id, route_table_id) + except EC2ResponseError as e: + module.fail_json(msg=e.message) if route_table is None: return {'changed': False} - vpc_conn.delete_route_table(route_table.id, dry_run=check_mode) + try: + connection.delete_route_table(route_table.id, dry_run=check_mode) + except EC2ResponseError as e: + module.fail_json(msg=e.message) + return {'changed': True} -def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, - routes, subnets, propagating_vgw_ids, - check_mode): +def get_route_table_info(route_table): + + # Add any routes to array + routes = [] + for route in route_table.routes: + routes.append(route.__dict__) + + route_table_info = { 'id': route_table.id, + 'routes': routes, + 'tags': route_table.tags, + 'vpc_id': route_table.vpc_id + } + + return route_table_info + +def create_route_spec(connection, routes, vpc_id): + + for route_spec in routes: + rename_key(route_spec, 'dest', 'destination_cidr_block') + + if 'gateway_id' in route_spec and route_spec['gateway_id'] and \ + route_spec['gateway_id'].lower() == 'igw': + igw = find_igw(connection, vpc_id) + route_spec['gateway_id'] = igw + + return routes + +def ensure_route_table_present(connection, module): + + lookup = module.params.get('lookup') + propagating_vgw_ids = module.params.get('propagating_vgw_ids', []) + route_table_id = module.params.get('route_table_id') + subnets = module.params.get('subnets') + tags = module.params.get('tags') + vpc_id = module.params.get('vpc_id') + check_mode = module.params.get('check_mode') + try: + routes = create_route_spec(connection, module.params.get('routes'), vpc_id) + except AnsibleIgwSearchException as e: + module.fail_json(msg=e[0]) + changed = False tags_valid = False - if route_table_id: - route_table = get_route_table_by_id(vpc_conn, vpc_id, route_table_id) - elif resource_tags: - route_table = get_route_table_by_tags(vpc_conn, vpc_id, resource_tags) - tags_valid = route_table is not None - else: - raise AnsibleRouteTableException( - 'must provide route_table_id or resource_tags') - - if check_mode and route_table is None: - return {'changed': True} + if lookup == 'tag': + if tags is not None: + try: + route_table = get_route_table_by_tags(connection, vpc_id, tags) + except EC2ResponseError as e: + module.fail_json(msg=e.message) + except RuntimeError as e: + module.fail_json(msg=e.args[0]) + else: + route_table = None + elif lookup == 'id': + try: + route_table = get_route_table_by_id(connection, vpc_id, route_table_id) + except EC2ResponseError as e: + module.fail_json(msg=e.message) + + # If no route table returned then create new route table if route_table is None: + print route_table.keys() + try: + route_table = connection.create_route_table(vpc_id, check_mode) + changed = True + except EC2ResponseError, e: + module.fail_json(msg=e.message) + + if routes is not None: try: - route_table = vpc_conn.create_route_table(vpc_id) + result = ensure_routes(connection, route_table, routes, propagating_vgw_ids, check_mode) + changed = changed or result['changed'] except EC2ResponseError as e: - raise AnsibleRouteTableException( - 'Unable to create route table {0}, error: {1}' - .format(route_table_id or resource_tags, e) - ) + module.fail_json(msg=e.message) if propagating_vgw_ids is not None: - result = ensure_propagation(vpc_conn, route_table, + result = ensure_propagation(vpc_conn, route_table_id, propagating_vgw_ids, check_mode=check_mode) changed = changed or result['changed'] - if not tags_valid and resource_tags is not None: - result = ensure_tags(vpc_conn, route_table.id, resource_tags, + if not tags_valid and tags is not None: + result = ensure_tags(connection, route_table.id, tags, add_only=True, check_mode=check_mode) changed = changed or result['changed'] - if routes is not None: - try: - result = ensure_routes(vpc_conn, route_table, routes, - propagating_vgw_ids, check_mode) - changed = changed or result['changed'] - except EC2ResponseError as e: - raise AnsibleRouteTableException( - 'Unable to ensure routes for route table {0}, error: {1}' - .format(route_table, e) - ) - if subnets: associated_subnets = [] try: - associated_subnets = find_subnets(vpc_conn, vpc_id, subnets) + associated_subnets = find_subnets(connection, vpc_id, subnets) except EC2ResponseError as e: raise AnsibleRouteTableException( 'Unable to find subnets for route table {0}, error: {1}' @@ -467,8 +532,7 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, ) try: - result = ensure_subnet_associations( - vpc_conn, vpc_id, route_table, associated_subnets, check_mode) + result = ensure_subnet_associations(connection, vpc_id, route_table, associated_subnets, check_mode) changed = changed or result['changed'] except EC2ResponseError as e: raise AnsibleRouteTableException( @@ -476,23 +540,21 @@ def ensure_route_table_present(vpc_conn, vpc_id, route_table_id, resource_tags, .format(route_table, e) ) - return { - 'changed': changed, - 'route_table_id': route_table.id, - } + module.exit_json(changed=changed, route_table=get_route_table_info(route_table)) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( - vpc_id = dict(default=None, required=True), - route_table_id = dict(default=None, required=False), + lookup = dict(default='tag', required=False, choices=['tag', 'id']), propagating_vgw_ids = dict(default=None, required=False, type='list'), - resource_tags = dict(default=None, required=False, type='dict'), + route_table_id = dict(default=None, required=False), routes = dict(default=None, required=False, type='list'), + state = dict(default='present', choices=['present', 'absent']), subnets = dict(default=None, required=False, type='list'), - state = dict(default='present', choices=['present', 'absent']) + tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']), + vpc_id = dict(default=None, required=True) ) ) @@ -511,34 +573,18 @@ def main(): else: module.fail_json(msg="region must be specified") - vpc_id = module.params.get('vpc_id') + lookup = module.params.get('lookup') route_table_id = module.params.get('route_table_id') - resource_tags = module.params.get('resource_tags') - propagating_vgw_ids = module.params.get('propagating_vgw_ids', []) - - routes = module.params.get('routes') - for route_spec in routes: - rename_key(route_spec, 'dest', 'destination_cidr_block') - - if 'gateway_id' in route_spec and route_spec['gateway_id'] and \ - route_spec['gateway_id'].lower() == 'igw': - igw = find_igw(connection, vpc_id) - route_spec['gateway_id'] = igw - - subnets = module.params.get('subnets') state = module.params.get('state', 'present') + if lookup == 'id' and route_table_id is None: + module.fail_json("You must specify route_table_id if lookup is set to id") + try: if state == 'present': - result = ensure_route_table_present( - connection, vpc_id, route_table_id, resource_tags, - routes, subnets, propagating_vgw_ids, module.check_mode - ) + result = ensure_route_table_present(connection, module) elif state == 'absent': - result = ensure_route_table_absent( - connection, vpc_id, route_table_id, resource_tags, - module.check_mode - ) + result = ensure_route_table_absent(connection, module) except AnsibleRouteTableException as e: module.fail_json(msg=str(e)) @@ -549,4 +595,4 @@ from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() - \ No newline at end of file + From 2dc67f7c6b98d3cde214beba48723a46532404a0 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 31 Aug 2015 15:53:02 +0200 Subject: [PATCH 41/44] cloudstack: cs_template: add new arg cross_zones --- cloud/cloudstack/cs_template.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/cloud/cloudstack/cs_template.py b/cloud/cloudstack/cs_template.py index d451ece7138..c6c482f9c0f 100644 --- a/cloud/cloudstack/cs_template.py +++ b/cloud/cloudstack/cs_template.py @@ -86,6 +86,12 @@ options: - Only used if C(state) is present. required: false default: false + cross_zones: + description: + - Whether the template should be syned across zones. + - Only used if C(state) is present. + required: false + default: false project: description: - Name of the project the template to be registered in. @@ -185,9 +191,8 @@ EXAMPLES = ''' url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova" hypervisor: VMware format: OVA - zone: tokio-ix + cross_zones: yes os_type: Debian GNU/Linux 7(64-bit) - is_routing: yes # Create a template from a stopped virtual machine's volume - local_action: @@ -456,11 +461,15 @@ class AnsibleCloudStackTemplate(AnsibleCloudStack): args['isrouting'] = self.module.params.get('is_routing') args['sshkeyenabled'] = self.module.params.get('sshkey_enabled') args['hypervisor'] = self.get_hypervisor() - args['zoneid'] = self.get_zone(key='id') args['domainid'] = self.get_domain(key='id') args['account'] = self.get_account(key='name') args['projectid'] = self.get_project(key='id') + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') + else: + args['zoneid'] = -1 + if not self.module.check_mode: res = self.cs.registerTemplate(**args) if 'errortext' in res: @@ -473,11 +482,13 @@ class AnsibleCloudStackTemplate(AnsibleCloudStack): args = {} args['isready'] = self.module.params.get('is_ready') args['templatefilter'] = self.module.params.get('template_filter') - args['zoneid'] = self.get_zone(key='id') args['domainid'] = self.get_domain(key='id') args['account'] = self.get_account(key='name') args['projectid'] = self.get_project(key='id') + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') + # if checksum is set, we only look on that. checksum = self.module.params.get('checksum') if not checksum: @@ -543,6 +554,7 @@ def main(): details = dict(default=None), bits = dict(type='int', choices=[ 32, 64 ], default=64), state = dict(choices=['present', 'absent'], default='present'), + cross_zones = dict(type='bool', choices=BOOLEANS, default=False), zone = dict(default=None), domain = dict(default=None), account = dict(default=None), From c9785a69487c10026a46f58e529d94f3c008b9b7 Mon Sep 17 00:00:00 2001 From: Tim Bielawa Date: Mon, 31 Aug 2015 13:14:05 -0400 Subject: [PATCH 42/44] Fix capitalization in nagios 'services' parameter comment --- monitoring/nagios.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index 16edca2aa6a..ed1da7a1e2e 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -76,7 +76,7 @@ options: servicegroup: version_added: "2.0" description: - - the Servicegroup we want to set downtimes/alerts for. + - The Servicegroup we want to set downtimes/alerts for. B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime). command: description: @@ -86,7 +86,7 @@ options: required: true default: null -author: "Tim Bielawa (@tbielawa)" +author: "Tim Bielawa (@tbielawa)" requirements: [ "Nagios" ] ''' From 0c1257b0c17da7f8ba629c35cb90fd2456da3828 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 1 Sep 2015 00:28:27 +0200 Subject: [PATCH 43/44] cloudstack: cs_instance: deploy instance in desired state on state=started/stopped Before this change, an instance must be present for make use of state=stopped/started. Now we are deploying an instance in the desire state if it does not exist. In this case all args needed to deploy the instance must be passed. However the short form for stopping/starting an _existing_ instance still works as before. --- cloud/cloudstack/cs_instance.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 4ead1317b2f..6f1339123d8 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -548,7 +548,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): return user_data - def deploy_instance(self): + def deploy_instance(self, start_vm=True): self.result['changed'] = True networkids = self.get_network_ids() if networkids is not None: @@ -573,6 +573,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): args['group'] = self.module.params.get('group') args['keypair'] = self.module.params.get('ssh_key') args['size'] = self.module.params.get('disk_size') + args['startvm'] = start_vm args['rootdisksize'] = self.module.params.get('root_disk_size') args['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups')) @@ -700,10 +701,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): def stop_instance(self): instance = self.get_instance() + if not instance: - self.module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + instance = self.deploy_instance(start_vm=False) + return instance - if instance['state'].lower() in ['stopping', 'stopped']: + elif instance['state'].lower() in ['stopping', 'stopped']: return instance if instance['state'].lower() in ['starting', 'running']: @@ -722,10 +725,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): def start_instance(self): instance = self.get_instance() + if not instance: - self.module.fail_json(msg="Instance named '%s' not found" % module.params.get('name')) + instance = self.deploy_instance() + return instance - if instance['state'].lower() in ['starting', 'running']: + elif instance['state'].lower() in ['starting', 'running']: return instance if instance['state'].lower() in ['stopped', 'stopping']: @@ -744,10 +749,12 @@ class AnsibleCloudStackInstance(AnsibleCloudStack): def restart_instance(self): instance = self.get_instance() + if not instance: - module.fail_json(msg="Instance named '%s' not found" % self.module.params.get('name')) + instance = self.deploy_instance() + return instance - if instance['state'].lower() in [ 'running', 'starting' ]: + elif instance['state'].lower() in [ 'running', 'starting' ]: self.result['changed'] = True if not self.module.check_mode: instance = self.cs.rebootVirtualMachine(id=instance['id']) From 65d3a3be59f028def687293ddac5aa92eeb705ce Mon Sep 17 00:00:00 2001 From: whiter Date: Wed, 2 Sep 2015 14:34:56 +0100 Subject: [PATCH 44/44] Remove debug print statement. Fixed ensure_propagation call to pass 'route_table' and 'connection'. --- cloud/amazon/ec2_vpc_route_table.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index a65efaa78fc..70f53bad26a 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -496,7 +496,6 @@ def ensure_route_table_present(connection, module): # If no route table returned then create new route table if route_table is None: - print route_table.keys() try: route_table = connection.create_route_table(vpc_id, check_mode) changed = True @@ -511,7 +510,7 @@ def ensure_route_table_present(connection, module): module.fail_json(msg=e.message) if propagating_vgw_ids is not None: - result = ensure_propagation(vpc_conn, route_table_id, + result = ensure_propagation(connection, route_table, propagating_vgw_ids, check_mode=check_mode) changed = changed or result['changed']